python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""providing absolute costs of resources in GCloud Dataproc"""
from dataclasses import dataclass
from spark_rapids_pytools.common.prop_manager import JSONPropertiesContainer
from spark_rapids_pytools.common.sys_storage import FSUtil
from spark_rapids_pytools.pricing.price_provider import PriceProvider
@dataclass
class DataprocCatalogContainer(JSONPropertiesContainer):
def _init_fields(self) -> None:
# the prices of the products are defined under 'gcp_price_list'
self.props = self.props['gcp_price_list']
@dataclass
class DataprocPriceProvider(PriceProvider):
"""
Provide costs of Dataproc instances
"""
name = 'Dataproc'
def _process_resource_configs(self):
online_entries = self.pricing_configs['gcloud'].get_value('catalog', 'onlineResources')
for online_entry in online_entries:
if online_entry.get('resourceKey') == 'gcloud-catalog':
file_name = online_entry.get('localFile')
self.cache_files = {'gcloud': FSUtil.build_path(self.cache_directory, file_name)}
self.resource_urls = {'gcloud': online_entry.get('onlineURL')}
break
def _create_catalogs(self):
self.catalogs = {'gcloud': DataprocCatalogContainer(prop_arg=self.cache_files['gcloud'])}
def get_ssd_price(self, machine_type: str) -> float:
lookup_key = 'CP-COMPUTEENGINE-LOCAL-SSD'
ssd_unit_size_factor = float(self.pricing_configs['gcloud'].get_value('catalog', 'ssd', 'unitSizeFactor'))
return self.catalogs['gcloud'].get_value(lookup_key, self.region) * ssd_unit_size_factor
def get_ram_price(self, machine_type: str) -> float:
lookup_key = self._key_for_cpe_machine_ram(machine_type)
return self.catalogs['gcloud'].get_value(lookup_key, self.region)
def get_gpu_price(self, gpu_device: str) -> float:
lookup_key = self._key_for_gpu_device(gpu_device)
return self.catalogs['gcloud'].get_value(lookup_key, self.region)
def get_cpu_price(self, machine_type: str) -> float:
lookup_key = self._key_for_cpe_machine_cores(machine_type)
return self.catalogs['gcloud'].get_value(lookup_key, self.region)
def get_container_cost(self) -> float:
return self.__get_dataproc_cluster_price()
def __get_dataproc_cluster_price(self) -> float:
lookup_key = 'CP-DATAPROC'
return self.catalogs['gcloud'].get_value(lookup_key, 'us')
def get_cores_count_for_vm(self, machine_type: str) -> str:
lookup_key = self._key_for_cpe_vm(machine_type)
cores = self.catalogs['gcloud'].get_value_silent(lookup_key, 'cores')
return cores
def get_ram_size_for_vm(self, machine_type: str) -> str:
lookup_key = self._key_for_cpe_vm(machine_type)
memory = self.catalogs['gcloud'].get_value_silent(lookup_key, 'memory')
return memory
@classmethod
def _key_for_cpe_machine_cores(cls, machine_type: str) -> str:
return f'CP-COMPUTEENGINE-{cls._get_machine_prefix(machine_type).upper()}-PREDEFINED-VM-CORE'
@classmethod
def _key_for_cpe_machine_ram(cls, machine_type: str) -> str:
return f'CP-COMPUTEENGINE-{cls._get_machine_prefix(machine_type).upper()}-PREDEFINED-VM-RAM'
@classmethod
def _key_for_gpu_device(cls, gpu_device: str) -> str:
return f'GPU_NVIDIA_TESLA_{gpu_device.upper()}'
@classmethod
def _get_machine_prefix(cls, machine_type: str) -> str:
return machine_type.split('-')[0]
@classmethod
def _key_for_cpe_vm(cls, machine_type: str):
return f'CP-COMPUTEENGINE-VMIMAGE-{machine_type.upper()}'
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/pricing/dataproc_pricing.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""providing absolute costs of resources in Databricks Azure platform"""
from dataclasses import dataclass, field
from spark_rapids_pytools.common.prop_manager import JSONPropertiesContainer
from spark_rapids_pytools.common.sys_storage import FSUtil
from spark_rapids_pytools.common.utilities import Utils
from spark_rapids_pytools.pricing.price_provider import PriceProvider
@dataclass
class DatabricksAzurePriceProvider(PriceProvider):
"""
Provide costs of Databricks Azure instances
"""
name = 'Databricks-Azure'
plan: str = field(default='premium-databricks-azure', init=False) # standard, premium (default), or enterprise
# TODO: current default to 'premium' plan and 'Jobs Compute' compute type,
# need to figure out how to find these values from cluster properties
def _generate_cache_files(self):
src_file_path = Utils.resource_path(f'{self.plan}-catalog.json')
FSUtil.cache_resource(src_file_path, self.cache_files[self.plan])
super()._generate_cache_files()
def _process_resource_configs(self):
online_entries = self.pricing_configs['databricks-azure'].get_value('catalog', 'onlineResources')
for online_entry in online_entries:
file_name = online_entry.get('localFile')
file_key = online_entry.get('resourceKey').split('-catalog')[0]
if file_key == self.plan:
self.cache_files[file_key] = FSUtil.build_path(self.cache_directory, file_name)
def _create_catalogs(self):
super()._create_catalogs()
for file_key, cache_file in self.cache_files.items():
self.catalogs[file_key] = JSONPropertiesContainer(prop_arg=cache_file)
def get_ssd_price(self, machine_type: str) -> float:
pass
def get_ram_price(self, machine_type: str) -> float:
pass
def get_gpu_price(self, gpu_device: str) -> float:
pass
def get_cpu_price(self, machine_type: str) -> float:
pass
def get_container_cost(self) -> float:
pass
def get_cores_count_for_vm(self, machine_type: str) -> str:
pass
def get_ram_size_for_vm(self, machine_type: str) -> str:
pass
def get_instance_price(self, instance, compute_type: str = 'Jobs Compute') -> float:
try:
job_type_conf = self.catalogs[self.plan].get_value(compute_type)
instance_name = instance.split('Standard_')[1] if instance.startswith('Standard_') else instance
instance_conf = job_type_conf.get('Instances').get(instance_name)
rate_per_hour = instance_conf.get('TotalPricePerHour')
return rate_per_hour
except Exception as ex: # pylint: disable=broad-except
self.logger.error('Could not find price for instance type \'%s\': %s', instance, ex)
raise ex
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/pricing/databricks_azure_pricing.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""providing absolute costs of resources in Databricks"""
from dataclasses import dataclass, field
from spark_rapids_pytools.common.prop_manager import JSONPropertiesContainer
from spark_rapids_pytools.common.sys_storage import FSUtil
from spark_rapids_pytools.common.utilities import Utils
from spark_rapids_pytools.pricing.emr_pricing import EMREc2PriceProvider
@dataclass
class DatabricksCatalogContainer(JSONPropertiesContainer):
def _init_fields(self) -> None:
pass
@dataclass
class DatabricksPriceProvider(EMREc2PriceProvider):
"""
Provide costs of Databricks instances
"""
name = 'Databricks'
plan: str = field(default='databricks-premium', init=False) # standard, premium (default), or enterprise
# TODO: current default to 'premium' plan and 'Jobs Compute' compute type,
# need to figure out how to find these values from cluster properties
def _generate_cache_files(self):
src_file_path = Utils.resource_path(f'{self.plan}-catalog.json')
FSUtil.cache_resource(src_file_path, self.cache_files[self.plan])
super()._generate_cache_files()
def _process_resource_configs(self):
online_entries = self.pricing_configs['databricks'].get_value('catalog', 'onlineResources')
for online_entry in online_entries:
file_name = online_entry.get('localFile')
file_key = online_entry.get('resourceKey').split('-catalog')[0]
if file_key == self.plan or 'databricks' not in file_key:
self.cache_files[file_key] = FSUtil.build_path(self.cache_directory, file_name)
if 'databricks' not in file_key:
self.resource_urls[file_key] = online_entry.get('onlineURL')
def _create_catalogs(self):
super()._create_catalogs()
for file_key, cache_file in self.cache_files.items():
if 'ec2' not in file_key:
self.catalogs[file_key] = DatabricksCatalogContainer(prop_arg=cache_file)
def get_ssd_price(self, machine_type: str) -> float:
pass
def get_ram_price(self, machine_type: str) -> float:
pass
def get_gpu_price(self, gpu_device: str) -> float:
pass
def get_cpu_price(self, machine_type: str) -> float:
pass
def get_container_cost(self) -> float:
pass
def get_cores_count_for_vm(self, machine_type: str) -> str:
pass
def get_ram_size_for_vm(self, machine_type: str) -> str:
pass
def get_instance_price(self, instance, compute_type: str = 'Jobs Compute') -> float:
# the cost of an instance is amount of DBU * JOB_type-rate-per-hour
job_type_conf = self.catalogs[self.plan].get_value(compute_type)
rate_per_hour = job_type_conf.get('RatePerHour')
instance_conf = job_type_conf.get('Instances').get(instance)
instance_dbu = instance_conf.get('DBU')
return instance_dbu * rate_per_hour
| spark-rapids-tools-dev | user_tools/src/spark_rapids_pytools/pricing/databricks_pricing.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Enumeration types commonly used through the AS python implementations."""
from enum import Enum
from typing import Union, cast, Optional
class EnumeratedType(str, Enum):
"""Abstract representation of enumerated values"""
# Make enum case-insensitive by overriding the Enum's missing method
@classmethod
def _missing_(cls, value):
value = value.lower()
for member in cls:
if member.lower() == value:
return member
return None
@classmethod
def tostring(cls, value: Union[Enum, str]) -> str:
"""Return the string representation of the state object attribute
:param str value: the state object to turn into string
:return: the uppercase string that represents the state object
:rtype: str
"""
value = cast(Enum, value)
return str(value._value_).upper() # pylint: disable=protected-access
@classmethod
def fromstring(cls, value: str) -> Optional[str]:
"""Return the state object attribute that matches the given value
:param str value: string to look up
:return: the state object attribute that matches the string
:rtype: str
"""
return getattr(cls, value.upper(), None)
@classmethod
def pretty_print(cls, value):
# type: (Union[Enum, str]) -> str
"""Return the string representation of the state object attribute
:param str value: the state object to turn into string
:return: the string that represents the state object
:rtype: str
"""
value = cast(Enum, value)
return str(value._value_) # pylint: disable=protected-access
###########
# CSP Enums
###########
class CspEnv(EnumeratedType):
"""Represents the supported types of runtime CSP"""
DATABRICKS_AWS = 'databricks_aws'
DATABRICKS_AZURE = 'databricks_azure'
DATAPROC = 'dataproc'
EMR = 'emr'
ONPREM = 'onprem'
NONE = 'NONE'
@classmethod
def get_default(cls):
return cls.ONPREM
@classmethod
def _missing_(cls, value):
value = value.lower()
# convert hyphens to underscores
value = value.replace('-', '_')
for member in cls:
if member.lower() == value:
return member
return None
@classmethod
def requires_pricing_map(cls, value) -> bool:
return value in [cls.ONPREM]
def get_equivalent_pricing_platform(self) -> list:
platforms_map = {
self.ONPREM: [CspEnv.DATAPROC]
}
return platforms_map.get(self)
def map_to_java_arg(self) -> str:
str_value = self.__class__.pretty_print(self)
# convert_underscores_to-hyphens
return str_value.replace('_', '-')
#############
# Tools Enums
#############
class QualFilterApp(EnumeratedType):
"""Values used to filter out the applications in the qualification report"""
SAVINGS = 'savings'
SPEEDUPS = 'speedups'
ALL = 'all'
@classmethod
def get_default(cls):
return cls.SAVINGS
class QualGpuClusterReshapeType(EnumeratedType):
"""Values used to filter out the applications in the qualification report"""
MATCH = 'match'
CLUSTER = 'cluster'
JOB = 'job'
@classmethod
def get_default(cls):
return cls.MATCH
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/enums.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""init file of the Accelerated Spark python implementations"""
from .enums import (
EnumeratedType, CspEnv
)
from .utils import (
get_elem_from_dict, get_elem_non_safe
)
from .storagelib.csppath import (
CspPath, path_impl_registry, CspPathT
)
__all__ = [
'EnumeratedType',
'CspEnv',
'get_elem_from_dict',
'get_elem_non_safe',
'CspPathT',
'path_impl_registry',
'CspPath'
]
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Define some custom exceptions defined through the implementation.
This helps to catch specific behaviors when necessary
"""
from typing import Optional
from pydantic import ValidationError
class CspPathException(Exception):
"""Base exception for all custom exceptions."""
class InvalidProtocolPrefixError(CspPathException, ValueError):
pass
class FSMismatchError(CspPathException, ValueError):
pass
class CspFileExistsError(CspPathException, ValueError):
pass
class CspPathNotFoundException(CspPathException, ValueError):
pass
class JsonLoadException(CspPathException, ValueError):
pass
class YamlLoadException(CspPathException, ValueError):
pass
class CspPathAttributeError(CspPathException, ValueError):
pass
class InvalidPropertiesSchema(CspPathException, ValueError):
"""
Defines a class to represent errors caused by invalid properties schema
"""
def __init__(self, msg: str, pydantic_err: Optional[ValidationError] = None):
if pydantic_err is None:
self.message = msg
else:
content = [msg]
for err_obj in pydantic_err.errors():
field_loc = err_obj.get('loc')
field_title = field_loc[0] if field_loc else ''
single_err = [field_title, err_obj.get('type', ''), err_obj.get('msg', '')]
content.append(str.join('. ', single_err))
self.message = str.join('\n', content)
super().__init__(self.message)
class IllegalArgumentError(CspPathException, ValueError):
pass
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/exceptions.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CLI to run tools associated with RAPIDS Accelerator for Apache Spark plugin."""
import fire
from spark_rapids_tools.enums import QualGpuClusterReshapeType
from spark_rapids_tools.utils.util import gen_app_banner
from spark_rapids_pytools.rapids.bootstrap import Bootstrap
from spark_rapids_pytools.rapids.profiling import ProfilingAsLocal
from spark_rapids_pytools.rapids.qualification import QualificationAsLocal
from .argprocessor import AbsToolUserArgModel
class ToolsCLI(object): # pylint: disable=too-few-public-methods
"""CLI that provides a runtime environment that simplifies running cost and performance analysis
using the RAPIDS Accelerator for Apache Spark.
A wrapper script to run RAPIDS Accelerator tools (Qualification, Profiling, and Bootstrap)
locally on the dev machine.
"""
def qualification(self,
eventlogs: str = None,
cluster: str = None,
platform: str = None,
target_platform: str = None,
output_folder: str = None,
filter_apps: str = None,
gpu_cluster_recommendation: str = QualGpuClusterReshapeType.tostring(
QualGpuClusterReshapeType.get_default())):
"""The Qualification cmd provides estimated running costs and speedups by migrating Apache
Spark applications to GPU accelerated clusters.
The Qualification cmd analyzes Spark eventlogs generated from CPU based Spark applications to
help quantify the expected acceleration and costs savings of migrating a Spark application or
query to GPU.
The cmd will process each app individually, but will group apps with the same name into the
same output row after averaging duration metrics accordingly.
:param eventlogs: Event log filenames or CSP storage directories containing event logs
(comma separated).
Skipping this argument requires that the cluster argument points to a valid
cluster name on the CSP.
:param cluster: Name of cluster or path to cluster-properties.
:param platform: defines one of the following "onprem", "emr", "dataproc", "databricks-aws",
and "databricks-azure".
:param target_platform: Cost savings and speedup recommendation for comparable cluster in
target_platform based on on-premises cluster configuration.
Currently only `dataproc` is supported for target_platform.
If not provided, the final report will be limited to GPU speedups only without
cost-savings.
:param output_folder: path to store the output
:param filter_apps: filtering criteria of the applications listed in the final STDOUT table
is one of the following (ALL, SPEEDUPS, SAVINGS).
Requires "Cluster".
Note that this filter does not affect the CSV report.
"ALL" means no filter applied. "SPEEDUPS" lists all the apps that are either
'Recommended', or 'Strongly Recommended' based on speedups. "SAVINGS"
lists all the apps that have positive estimated GPU savings except for the apps that
are "Not Applicable"
:param gpu_cluster_recommendation: The type of GPU cluster recommendation to generate.
Requires "Cluster".
It accepts one of the following:
"MATCH": keep GPU cluster same number of nodes as CPU cluster;
"CLUSTER": recommend optimal GPU cluster by cost for entire cluster;
"JOB": recommend optimal GPU cluster by cost per job
"""
qual_args = AbsToolUserArgModel.create_tool_args('qualification',
eventlogs=eventlogs,
cluster=cluster,
platform=platform,
target_platform=target_platform,
output_folder=output_folder,
filter_apps=filter_apps,
gpu_cluster_recommendation=gpu_cluster_recommendation)
if qual_args:
tool_obj = QualificationAsLocal(platform_type=qual_args['runtimePlatform'],
output_folder=qual_args['outputFolder'],
wrapper_options=qual_args)
tool_obj.launch()
def profiling(self,
eventlogs: str = None,
cluster: str = None,
platform: str = None,
output_folder: str = None):
"""The Profiling cmd provides information which can be used for debugging and profiling
Apache Spark applications running on accelerated GPU cluster.
The Profiling tool analyzes both CPU or GPU generated eventlogs and generates information
including the Spark version, executor details, properties, etc.
The tool also will recommend setting for the application assuming that the job will be able
to use all the cluster resources (CPU and GPU) when it is running.
:param eventlogs: Event log filenames or cloud storage directories
containing event logs (comma separated). If missing, the wrapper reads the Spark's
property `spark.eventLog.dir` defined in the `cluster`.
:param cluster: The cluster on which the Spark applications were executed. The argument
can be a cluster name or a valid path to the cluster's properties file (json format)
generated by the CSP SDK.
:param platform: defines one of the following "onprem", "emr", "dataproc", "databricks-aws",
and "databricks-azure".
:param output_folder: path to store the output.
"""
prof_args = AbsToolUserArgModel.create_tool_args('profiling',
eventlogs=eventlogs,
cluster=cluster,
platform=platform,
output_folder=output_folder)
if prof_args:
tool_obj = ProfilingAsLocal(platform_type=prof_args['runtimePlatform'],
output_folder=prof_args['outputFolder'],
wrapper_options=prof_args)
tool_obj.launch()
def bootstrap(self,
cluster: str,
platform: str,
output_folder: str = None,
dry_run: bool = True):
"""Provides optimized RAPIDS Accelerator for Apache Spark configs based on GPU cluster shape.
This tool is supposed to be used once a cluster has been created to set the recommended
configurations.
The tool will apply settings for the cluster assuming that jobs will run serially so that
each job can use up all the cluster resources (CPU and GPU) when it is running.
:param cluster: Name of the cluster running an accelerated computing instance class
:param platform: defines one of the following "onprem", "emr", "dataproc", "databricks-aws",
and "databricks-azure".
:param output_folder: path where the final recommendations will be saved.
:param dry_run: True or False to update the Spark config settings on Dataproc driver node.
"""
boot_args = AbsToolUserArgModel.create_tool_args('qualification',
cluster=cluster,
platform=platform,
output_folder=output_folder,
dry_run=dry_run)
if boot_args:
tool_obj = Bootstrap(platform_type=boot_args['runtimePlatform'],
cluster=cluster,
output_folder=boot_args['outputFolder'],
wrapper_options=boot_args)
tool_obj.launch()
def main():
# Make Python Fire not use a pager when it prints a help text
fire.core.Display = lambda lines, out: out.write('\n'.join(lines) + '\n')
print(gen_app_banner())
fire.Fire(ToolsCLI())
if __name__ == '__main__':
main()
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/cmdli/tools_cli.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""init file of the user CLI used to run the tools"""
from .tools_cli import ToolsCLI
__all__ = [
'ToolsCLI'
]
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/cmdli/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of argument processors for Tools"""
import dataclasses
from collections import defaultdict
from enum import IntEnum
from functools import partial
from logging import Logger
from typing import Optional, Any, ClassVar, Callable, Type, Dict
from pydantic import model_validator, ValidationError
from pydantic.dataclasses import dataclass
from pydantic_core import PydanticCustomError
from spark_rapids_tools.cloud import ClientCluster
from spark_rapids_tools.exceptions import IllegalArgumentError
from spark_rapids_tools.utils import AbstractPropContainer, is_http_file
from spark_rapids_pytools.cloud_api.sp_types import DeployMode
from spark_rapids_pytools.common.utilities import ToolLogging
from spark_rapids_pytools.rapids.qualification import QualGpuClusterReshapeType
from ..enums import QualFilterApp, CspEnv
from ..storagelib.csppath import CspPath
from ..tools.autotuner import AutoTunerPropMgr
from ..utils.util import dump_tool_usage
class ArgValueCase(IntEnum):
"""
Enum cases representing the status of each argument. Used to decide on the case of each
argument to decide whether the input is valid or not.
"""
UNDEFINED = 1
VALUE_A = 2
VALUE_B = 4
VALUE_C = 8
IGNORE = 15
@classmethod
def are_equal(cls, value1: 'ArgValueCase', value2: 'ArgValueCase') -> bool:
return (value1 & value2) != 0
@classmethod
def array_equal(cls, arr1: list, arr2: list) -> bool:
if len(arr1) != len(arr2):
return False
return all(cls.are_equal(arr1[i], arr2[i]) for i in range(len(arr1)))
class UserArgValidatorImpl: # pylint: disable=too-few-public-methods
"""
A metaclass holding information about the validator responsible to process user's input.
"""
name: str
_validator_class: Type['AbsToolUserArgModel']
@property
def validator_class(self) -> Type['AbsToolUserArgModel']:
return self._validator_class
@validator_class.setter
def validator_class(self, clazz):
self._validator_class = clazz
user_arg_validation_registry: Dict[str, UserArgValidatorImpl] = defaultdict(UserArgValidatorImpl)
def register_tool_arg_validator(tool_name: str) -> Callable:
def decorator(cls: type) -> type:
cls.tool_name = tool_name
user_arg_validation_registry[tool_name].name = tool_name
user_arg_validation_registry[tool_name].validator_class = cls
return cls
return decorator
@dataclass
class AbsToolUserArgModel:
"""
Abstract class that represent the arguments collected by the user to run the tools.
This is used as doing preliminary validation against some of the common pattern
"""
cluster: Optional[str] = None
platform: Optional[CspEnv] = None
output_folder: Optional[str] = None
rejected: dict = dataclasses.field(init=False, default_factory=dict)
detected: dict = dataclasses.field(init=False, default_factory=dict)
extra: dict = dataclasses.field(init=False, default_factory=dict)
argv_cases: list = dataclasses.field(init=False,
default_factory=lambda: [])
p_args: dict = dataclasses.field(init=False, default_factory=lambda: {
'meta': {},
'toolArgs': {}
})
logger: ClassVar[Logger] = ToolLogging.get_and_setup_logger('spark_rapids_tools.argparser')
tool_name: ClassVar[str] = None
@classmethod
def create_tool_args(cls, tool_name: str, *args: Any, **kwargs: Any) -> Optional[dict]:
try:
impl_entry = user_arg_validation_registry.get(tool_name)
impl_class = impl_entry.validator_class
new_obj = impl_class(*args, **kwargs)
return new_obj.build_tools_args()
except (ValidationError, IllegalArgumentError) as e:
impl_class.logger.error('Validation err: %s\n', e)
dump_tool_usage(impl_class.tool_name)
return None
def get_eventlogs(self) -> Optional[str]:
if hasattr(self, 'eventlogs'):
return self.eventlogs
return None
def raise_validation_exception(self, validation_err: str):
raise IllegalArgumentError(
f'Invalid arguments: {validation_err}')
def determine_cluster_arg_type(self) -> ArgValueCase:
# self.cluster is provided. then we need to verify that the expected files are there
if CspPath.is_file_path(self.cluster, raise_on_error=False):
# check it is valid prop file
if AbstractPropContainer.is_valid_prop_path(self.cluster, raise_on_error=False):
# the file cannot be a http_url
if is_http_file(self.cluster):
# we do not accept http://urls
raise IllegalArgumentError(
f'Cluster properties cannot be a web URL path: {self.cluster}')
cluster_case = ArgValueCase.VALUE_B
else:
raise PydanticCustomError(
'file_path',
'Cluster property file is not in valid format {.json, .yaml, or .yml}')
else:
cluster_case = ArgValueCase.VALUE_A
return cluster_case
def detect_platform_from_cluster_prop(self):
client_cluster = ClientCluster(CspPath(self.cluster))
self.p_args['toolArgs']['platform'] = CspEnv.fromstring(client_cluster.platform_name)
def detect_platform_from_eventlogs_prefix(self):
map_storage_to_platform = {
'gcs': CspEnv.DATAPROC,
's3': CspEnv.EMR,
'local': CspEnv.ONPREM,
'hdfs': CspEnv.ONPREM,
'adls': CspEnv.DATABRICKS_AZURE
}
# in case we have a list of eventlogs, we need to split them and take the first one
ev_logs_path = CspPath(self.get_eventlogs().split(',')[0])
storage_type = ev_logs_path.get_storage_name()
self.p_args['toolArgs']['platform'] = map_storage_to_platform[storage_type]
def validate_onprem_with_cluster_name(self):
if self.platform == CspEnv.ONPREM:
raise IllegalArgumentError(
f'Invalid arguments: Cannot run cluster by name with platform [{CspEnv.ONPREM}]')
def init_extra_arg_cases(self) -> list:
return []
def init_tool_args(self):
pass
def init_arg_cases(self):
if self.platform is None:
self.argv_cases.append(ArgValueCase.UNDEFINED)
else:
self.argv_cases.append(ArgValueCase.VALUE_A)
if self.cluster is None:
self.argv_cases.append(ArgValueCase.UNDEFINED)
else:
self.argv_cases.append(self.determine_cluster_arg_type())
self.argv_cases.extend(self.init_extra_arg_cases())
def define_invalid_arg_cases(self):
pass
def define_detection_cases(self):
pass
def define_extra_arg_cases(self):
pass
def build_tools_args(self) -> dict:
pass
def apply_arg_cases(self):
for curr_cases in [self.rejected, self.detected, self.extra]:
for case_key, case_value in curr_cases.items():
if any(ArgValueCase.array_equal(self.argv_cases, case_i) for case_i in case_value['cases']):
# debug the case key
self.logger.info('...applying argument case: %s', case_key)
case_value['callable']()
def validate_arguments(self):
self.init_tool_args()
self.init_arg_cases()
self.define_invalid_arg_cases()
self.define_detection_cases()
self.define_extra_arg_cases()
self.apply_arg_cases()
def get_or_set_platform(self) -> CspEnv:
if self.p_args['toolArgs']['platform'] is None:
# set the platform to default onPrem
runtime_platform = CspEnv.get_default()
else:
runtime_platform = self.p_args['toolArgs']['platform']
self.post_platform_assignment_validation(runtime_platform)
return runtime_platform
def post_platform_assignment_validation(self, assigned_platform):
# do some validation after we decide the cluster type
if self.argv_cases[1] == ArgValueCase.VALUE_A:
if assigned_platform == CspEnv.ONPREM:
# it is not allowed to run cluster_by_name on an OnPrem platform
raise IllegalArgumentError(
f'Invalid arguments: Cannot run cluster by name with platform [{CspEnv.ONPREM}]')
@dataclass
class ToolUserArgModel(AbsToolUserArgModel):
"""
Abstract class that represents the arguments collected by the user to run the tool.
This is used as doing preliminary validation against some of the common pattern
"""
eventlogs: Optional[str] = None
def init_extra_arg_cases(self) -> list:
if self.eventlogs is None:
return [ArgValueCase.UNDEFINED]
return [ArgValueCase.VALUE_A]
def define_invalid_arg_cases(self):
super().define_invalid_arg_cases()
self.rejected['Missing Eventlogs'] = {
'valid': False,
'callable': partial(self.raise_validation_exception,
'Cannot run tool cmd. Cannot define eventlogs from input '
'(platform, cluster, and eventlogs)'),
'cases': [
[ArgValueCase.IGNORE, ArgValueCase.UNDEFINED, ArgValueCase.UNDEFINED]
]
}
self.rejected['Cluster By Name Without Platform Hints'] = {
'valid': False,
'callable': partial(self.raise_validation_exception,
'Cannot run tool cmd on a named cluster without hints about the target '
'platform. Re-run the command providing at least one of the '
'eventlogs/platform arguments.'),
'cases': [
[ArgValueCase.UNDEFINED, ArgValueCase.VALUE_A, ArgValueCase.UNDEFINED]
]
}
self.rejected['Cluster By Name Cannot go with OnPrem'] = {
'valid': False,
'callable': partial(self.validate_onprem_with_cluster_name),
'cases': [
[ArgValueCase.VALUE_A, ArgValueCase.VALUE_A, ArgValueCase.IGNORE]
]
}
def define_detection_cases(self):
self.detected['Define Platform from Cluster Properties file'] = {
'valid': True,
'callable': partial(self.detect_platform_from_cluster_prop),
'cases': [
[ArgValueCase.UNDEFINED, ArgValueCase.VALUE_B, ArgValueCase.IGNORE]
]
}
self.detected['Define Platform based on Eventlogs prefix'] = {
'valid': True,
'callable': partial(self.detect_platform_from_eventlogs_prefix),
'cases': [
[ArgValueCase.UNDEFINED, ArgValueCase.UNDEFINED, ArgValueCase.VALUE_A],
[ArgValueCase.UNDEFINED, ArgValueCase.VALUE_A, ArgValueCase.VALUE_A]
]
}
@dataclass
@register_tool_arg_validator('qualification')
class QualifyUserArgModel(ToolUserArgModel):
"""
Represents the arguments collected by the user to run the qualification tool.
This is used as doing preliminary validation against some of the common pattern
"""
target_platform: Optional[CspEnv] = None
filter_apps: Optional[QualFilterApp] = None
gpu_cluster_recommendation: Optional[QualGpuClusterReshapeType] = None
def init_tool_args(self):
self.p_args['toolArgs']['platform'] = self.platform
self.p_args['toolArgs']['savingsCalculations'] = True
self.p_args['toolArgs']['filterApps'] = self.filter_apps
self.p_args['toolArgs']['targetPlatform'] = self.target_platform
# check the reshapeType argument
if self.gpu_cluster_recommendation is None:
self.p_args['toolArgs']['gpuClusterRecommendation'] = QualGpuClusterReshapeType.get_default()
else:
self.p_args['toolArgs']['gpuClusterRecommendation'] = self.gpu_cluster_recommendation
def define_extra_arg_cases(self):
self.extra['Disable CostSavings'] = {
'valid': True,
'callable': partial(self.disable_savings_calculations),
'cases': [
[ArgValueCase.IGNORE, ArgValueCase.UNDEFINED, ArgValueCase.VALUE_A]
]
}
def _reset_savings_flags(self, reason_msg: Optional[str] = None):
self.p_args['toolArgs']['savingsCalculations'] = False
if self.p_args['toolArgs']['filterApps'] == QualFilterApp.SAVINGS:
# we cannot use QualFilterApp.SAVINGS if savingsCalculations is disabled.
self.p_args['toolArgs']['filterApps'] = QualFilterApp.SPEEDUPS
if reason_msg:
self.logger.info('Cost saving is disabled: %s', reason_msg)
def disable_savings_calculations(self):
self._reset_savings_flags(reason_msg='Cluster\'s information is missing.')
self.p_args['toolArgs']['targetPlatform'] = None
@model_validator(mode='after')
def validate_arg_cases(self) -> 'QualifyUserArgModel':
# shortcircuit to fail early
self.validate_arguments()
return self
def build_tools_args(self) -> dict:
# At this point, if the platform is still none, then we can set it to the default value
# which is the onPrem platform.
runtime_platform = self.get_or_set_platform()
# check the targetPlatform argument
if self.p_args['toolArgs']['targetPlatform']:
equivalent_pricing_list = runtime_platform.get_equivalent_pricing_platform()
if not equivalent_pricing_list:
# no target_platform for that runtime environment
self.logger.info(
'Argument target_platform does not support the current cluster [%s]', runtime_platform)
self.p_args['toolArgs']['targetPlatform'] = None
else:
if not self.p_args['toolArgs']['targetPlatform'] in equivalent_pricing_list:
raise IllegalArgumentError(
'Invalid arguments: '
f'The platform [{self.p_args["toolArgs"]["targetPlatform"]}] is currently '
f'not supported to calculate savings from [{runtime_platform}] cluster')
else:
# target platform is not set, then we disable cost savings if the runtime platform if
# onprem
if CspEnv.requires_pricing_map(runtime_platform):
self._reset_savings_flags(reason_msg=f'Platform [{runtime_platform}] requires '
'"target_platform" argument to generate cost savings')
# check the filter_apps argument
if self.p_args['toolArgs']['filterApps'] is None:
# set a default filterApps argument to be savings if the cost savings is enabled
if self.p_args['toolArgs']['savingsCalculations']:
self.p_args['toolArgs']['filterApps'] = QualFilterApp.SAVINGS
else:
self.p_args['toolArgs']['filterApps'] = QualFilterApp.SPEEDUPS
# finally generate the final values
wrapped_args = {
'runtimePlatform': runtime_platform,
'outputFolder': self.output_folder,
'platformOpts': {
'credentialFile': None,
'deployMode': DeployMode.LOCAL,
# used to be sent to the scala core java cmd
'targetPlatform': self.p_args['toolArgs']['targetPlatform']
},
'migrationClustersProps': {
'cpuCluster': self.cluster,
'gpuCluster': None
},
'jobSubmissionProps': {
'remoteFolder': None,
'platformArgs': {
'jvmMaxHeapSize': 24
}
},
'savingsCalculations': self.p_args['toolArgs']['savingsCalculations'],
'eventlogs': self.eventlogs,
'filterApps': QualFilterApp.fromstring(self.p_args['toolArgs']['filterApps']),
'toolsJar': None,
'gpuClusterRecommendation': self.p_args['toolArgs']['gpuClusterRecommendation'],
# used to initialize the pricing information
'targetPlatform': self.p_args['toolArgs']['targetPlatform']
}
return wrapped_args
@dataclass
@register_tool_arg_validator('profiling')
class ProfileUserArgModel(ToolUserArgModel):
"""
Represents the arguments collected by the user to run the profiling tool.
This is used as doing preliminary validation against some of the common pattern
"""
def determine_cluster_arg_type(self) -> ArgValueCase:
cluster_case = super().determine_cluster_arg_type()
if cluster_case == ArgValueCase.VALUE_B:
# determine is this an autotuner file or not
auto_tuner_prop_obj = AutoTunerPropMgr.load_from_file(self.cluster, raise_on_error=False)
if auto_tuner_prop_obj:
cluster_case = ArgValueCase.VALUE_C
self.p_args['toolArgs']['autotuner'] = self.cluster
return cluster_case
def init_tool_args(self):
self.p_args['toolArgs']['platform'] = self.platform
self.p_args['toolArgs']['autotuner'] = None
def define_invalid_arg_cases(self):
super().define_invalid_arg_cases()
self.rejected['Autotuner requires eventlogs'] = {
'valid': False,
'callable': partial(self.raise_validation_exception,
'Cannot run tool cmd. AutoTuner requires eventlogs argument'),
'cases': [
[ArgValueCase.IGNORE, ArgValueCase.VALUE_C, ArgValueCase.UNDEFINED]
]
}
def define_detection_cases(self):
super().define_detection_cases()
# append the case when the autotuner input
self.detected['Define Platform based on Eventlogs prefix']['cases'].append(
[ArgValueCase.UNDEFINED, ArgValueCase.VALUE_C, ArgValueCase.VALUE_A]
)
@model_validator(mode='after')
def validate_arg_cases(self) -> 'ProfileUserArgModel':
# shortcircuit to fail early
self.validate_arguments()
return self
def build_tools_args(self) -> dict:
runtime_platform = self.get_or_set_platform()
# check if the cluster infor was autotuner_input
if self.p_args['toolArgs']['autotuner']:
# this is an autotuner input
self.p_args['toolArgs']['cluster'] = None
else:
# this is an actual cluster argument
self.p_args['toolArgs']['cluster'] = self.cluster
# finally generate the final values
wrapped_args = {
'runtimePlatform': runtime_platform,
'outputFolder': self.output_folder,
'platformOpts': {
'credentialFile': None,
'deployMode': DeployMode.LOCAL,
},
'migrationClustersProps': {
'gpuCluster': self.p_args['toolArgs']['cluster']
},
'jobSubmissionProps': {
'remoteFolder': None,
'platformArgs': {
'jvmMaxHeapSize': 24
}
},
'eventlogs': self.eventlogs,
'toolsJar': None,
'autoTunerFileInput': self.p_args['toolArgs']['autotuner']
}
return wrapped_args
@dataclass
@register_tool_arg_validator('bootstrap')
class BootstrapUserArgModel(AbsToolUserArgModel):
"""
Represents the arguments collected by the user to run the bootstrap tool.
This is used as doing preliminary validation against some of the common pattern
"""
dry_run: Optional[bool] = True
def build_tools_args(self) -> dict:
return {
'runtimePlatform': self.platform,
'outputFolder': self.output_folder,
'platformOpts': {},
'dryRun': self.dry_run
}
@model_validator(mode='after')
def validate_non_empty_args(self) -> 'BootstrapUserArgModel':
error_flag = 0
components = []
if self.cluster is None:
error_flag = 1
components.append('cluster')
if self.platform is None:
error_flag |= 2
components.append('platform')
if error_flag > 0:
missing = str.join(' and ', components)
raise ValueError(f'Cmd requires [{missing}] to be specified')
return self
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/cmdli/argprocessor.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""init file of the library that interface with RAPIDS tools"""
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/tools/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Includes classes and wrappers related to autotuner feature"""
from typing import Optional, ClassVar, Type
from spark_rapids_tools.utils.propmanager import PropValidatorSchemaCamel, PropValidatorSchema, AbstractPropContainer
class AutoTunerInputSchema(PropValidatorSchemaCamel):
system: dict
gpu: Optional[dict] = None
software_properties: Optional[dict] = None
class AutoTunerPropMgr(AbstractPropContainer):
schema_clzz: ClassVar[Type['PropValidatorSchema']] = AutoTunerInputSchema
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/tools/autotuner.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of helpers and utilities related to manage the properties and dictionaries."""
import json
from functools import partial
from json import JSONDecodeError
from pathlib import Path as PathlibPath
from typing import Union, Any, TypeVar, ClassVar, Type, Tuple, Optional
import yaml
from pydantic import BaseModel, ConfigDict, model_validator, ValidationError
from spark_rapids_tools.exceptions import JsonLoadException, YamlLoadException, InvalidPropertiesSchema
from spark_rapids_tools.storagelib.csppath import CspPath, CspPathT
from spark_rapids_tools.utils.util import to_camel_case, to_camel_capital_case, get_elem_from_dict, get_elem_non_safe
def load_json(file_path: Union[str, CspPathT]) -> Any:
if isinstance(file_path, str):
file_path = CspPath(file_path)
with file_path.open_input_stream() as fis:
try:
return json.load(fis)
except JSONDecodeError as e:
raise JsonLoadException('Incorrect format of JSON File') from e
except TypeError as e:
raise JsonLoadException('Incorrect Type of JSON content') from e
def load_yaml(file_path: Union[str, CspPathT]) -> Any:
if isinstance(file_path, str):
file_path = CspPath(file_path)
with file_path.open_input_stream() as fis:
try:
return yaml.safe_load(fis)
except yaml.YAMLError as e:
raise YamlLoadException('Incorrect format of Yaml File') from e
PropContainerT = TypeVar('PropContainerT', bound='AbstractPropContainer')
PropValidatorSchemaT = TypeVar('PropValidatorSchemaT', bound='PropValidatorSchema')
class PropValidatorSchema(BaseModel):
"""
Base class that uses Pydantic to validate a given schema
"""
model_config = ConfigDict(extra='allow')
@classmethod
def is_valid_schema(cls, raise_on_error: bool,
prop: Any) -> Tuple[bool, Optional[PropValidatorSchemaT]]:
try:
# Instantiate cluster_schema instance
new_obj = cls(**prop)
return True, new_obj
except ValidationError as exc:
if raise_on_error:
raise InvalidPropertiesSchema('Invalid Schema for for the properties. ', exc) from exc
return False, None
class PropValidatorSchemaCamel(PropValidatorSchema):
model_config = ConfigDict(alias_generator=to_camel_case)
class PropValidatorSchemaUpper(PropValidatorSchema):
model_config = ConfigDict(alias_generator=to_camel_capital_case)
class AbstractPropContainer(BaseModel):
"""
An abstract class that loads properties (dictionary).
"""
props: Any
schema_clzz: ClassVar[Type['PropValidatorSchema']] = None
@classmethod
def is_valid_prop_path(cls,
file_path: Union[str, PathlibPath],
raise_on_error: bool = True) -> bool:
return CspPath.is_file_path(file_path,
extensions=['json', 'yaml', 'yml'],
raise_on_error=raise_on_error)
@model_validator(mode='before')
@classmethod
def validate_prop_schema(cls, data: Any) -> Any:
if cls.schema_clzz is None:
return data
cls.schema_clzz.is_valid_schema(True, data.get('props'))
return data
@classmethod
def load_from_file(cls,
file_path: Union[str, CspPathT],
raise_on_error: bool = True) -> Optional[PropContainerT]:
loader_func = partial(load_json, file_path)
if not str(file_path).endswith('.json'):
loader_func = partial(load_yaml, file_path)
try:
prop = loader_func()
new_prop_obj = cls(props=prop)
return new_prop_obj
except FileNotFoundError as fe:
if raise_on_error:
raise ValueError(f'Input file {file_path} does not exist') from fe
except (InvalidPropertiesSchema, ValidationError) as ve:
if raise_on_error:
raise ve
return None
def get_value(self, *key_strs):
return get_elem_from_dict(self.props, key_strs)
def get_value_silent(self, *key_strs):
return get_elem_non_safe(self.props, key_strs)
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/utils/propmanager.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility and helper methods"""
import os
import pathlib
import re
import sys
from functools import reduce
from operator import getitem
from typing import Any, Optional
import fire
from pydantic import ValidationError, AnyHttpUrl, TypeAdapter
import spark_rapids_pytools
from spark_rapids_tools.exceptions import CspPathAttributeError
def get_elem_from_dict(data, keys):
try:
return reduce(getitem, keys, data)
except LookupError:
print(f'ERROR: Could not find elements [{keys}]')
return None
def get_elem_non_safe(data, keys):
try:
return reduce(getitem, keys, data)
except LookupError:
return None
def stringify_path(fpath) -> str:
if isinstance(fpath, str):
actual_val = fpath
elif hasattr(fpath, '__fspath__'):
actual_val = os.fspath(fpath)
else:
raise CspPathAttributeError('Not a valid path')
expanded_path = os.path.expanduser(actual_val)
# make sure we return absolute path
return os.path.abspath(expanded_path)
def is_http_file(value: Any) -> bool:
try:
TypeAdapter(AnyHttpUrl).validate_python(value)
return True
except ValidationError:
# ignore
return False
def get_path_as_uri(fpath: str) -> str:
if re.match(r'\w+://', fpath):
# that's already a valid url
return fpath
# stringify the path to apply the common methods which is expanding the file.
local_path = stringify_path(fpath)
return pathlib.PurePath(local_path).as_uri()
def to_camel_case(word: str) -> str:
return word.split('_')[0] + ''.join(x.capitalize() or '_' for x in word.split('_')[1:])
def to_camel_capital_case(word: str) -> str:
return ''.join(x.capitalize() for x in word.split('_'))
def to_snake_case(word: str) -> str:
return ''.join(['_' + i.lower() if i.isupper() else i for i in word]).lstrip('_')
def dump_tool_usage(tool_name: Optional[str], raise_sys_exit: Optional[bool] = True):
imported_module = __import__('spark_rapids_tools.cmdli', globals(), locals(), ['ToolsCLI'])
wrapper_clzz = getattr(imported_module, 'ToolsCLI')
help_name = 'ascli'
usage_cmd = f'{tool_name} --help'
try:
fire.Fire(wrapper_clzz(), name=help_name, command=usage_cmd)
except fire.core.FireExit:
# ignore the sys.exit(0) thrown by the help usage.
# ideally we want to exit with error
pass
if raise_sys_exit:
sys.exit(1)
def gen_app_banner() -> str:
"""
ASCII Art is generated by an online Test-to-ASCII Art generator tool https://patorjk.com/software/taag
:return: a string representing the banner of the user tools including the version
"""
c_ver = spark_rapids_pytools.__version__
return rf"""
********************************************************************
* *
* _____ __ ____ _ __ *
* / ___/____ ____ ______/ /__ / __ \____ _____ (_)___/ /____ *
* \__ \/ __ \/ __ `/ ___/ //_/ / /_/ / __ `/ __ \/ / __ / ___/ *
* ___/ / /_/ / /_/ / / / ,< / _, _/ /_/ / /_/ / / /_/ (__ ) *
* /____/ .___/\__,_/_/ /_/|_| /_/ |_|\__,_/ .___/_/\__,_/____/ *
* /_/__ __ ______ /_/ __ *
* / / / /_______ _____ /_ __/___ ____ / /____ *
* / / / / ___/ _ \/ ___/ / / / __ \/ __ \/ / ___/ *
* / /_/ (__ ) __/ / / / / /_/ / /_/ / (__ ) *
* \____/____/\___/_/ /_/ \____/\____/_/____/ *
* *
* Version. {c_ver} *
* *
* NVIDIA Corporation *
* [email protected] *
********************************************************************
"""
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/utils/util.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""init file of the utils package for the Accelerated Spark tools"""
from .util import (
get_elem_from_dict, get_elem_non_safe, is_http_file
)
from .propmanager import (
AbstractPropContainer,
PropValidatorSchema
)
__all__ = [
'get_elem_from_dict',
'get_elem_non_safe',
'AbstractPropContainer',
'PropValidatorSchema',
'is_http_file'
]
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract class for FS that wraps on top of the remote clients"""
import abc
import os
from typing import Generic, Callable, TypeVar, Any, Union
from pyarrow import fs as arrow_fs
from .csppath import CspPathImplementation, CspPath, path_impl_registry
from ..exceptions import (
CspPathNotFoundException
)
BoundedCspPath = TypeVar('BoundedCspPath', bound=CspPath)
BoundedArrowFsT = TypeVar('BoundedArrowFsT', bound=arrow_fs.FileSystem)
def register_fs_class(key: str, fs_subclass: str) -> Callable:
def decorator(cls: type) -> type:
if not issubclass(cls, CspFs):
raise TypeError('Only subclasses of CspFs can be registered.')
path_impl_registry[key].fs_class = cls
path_impl_registry[key].name = key
imported_module = __import__('pyarrow.fs', globals(), locals(), [fs_subclass])
defined_clzz = getattr(imported_module, fs_subclass)
path_impl_registry[key].fslib_class = defined_clzz
cls._path_meta = path_impl_registry[key] # pylint: disable=protected-access
return cls
return decorator
def custom_dir(orig_type, new_type):
"""
Given a type orig_type, it adds the attributes found in the new_type. used by the delegator.
See the description in CspFs class.
"""
return dir(type(orig_type)) + list(orig_type.__dict__.keys()) + new_type
class CspFs(abc.ABC, Generic[BoundedCspPath]):
"""
Abstract FileSystem that provides input and output streams as well as directory operations.
The datapaths are abstract representations.
This class uses delegations to utilize the interface implemented in the pyArrow Filesystem
class. That way, we don't have to rewrite every single API.
Instead, if the attribute exists in the child class it is going to be used.
See more explanation in the blogpost
https://www.fast.ai/posts/2019-08-06-delegation.html
Base class for attr accesses in "self._xtra" passed down to "self.fs"
"""
_path_meta: CspPathImplementation
_default_fs = None
@classmethod
def create_fs_handler(cls, *args: Any, **kwargs: Any) -> BoundedArrowFsT:
return cls._path_meta.fslib_class(*args, **kwargs)
@classmethod
def get_default_client(cls) -> 'CspFs':
if cls._default_fs is None:
cls._default_fs = cls()
return cls._default_fs
@property
def _xtra(self):
"""returns the members defined in the child class as long as they are not protected"""
return [o for o in dir(self.fs) if not o.startswith('_')]
def __getattr__(self, k):
"""returns the members defined in the child class as long as they are not protected"""
if k in self._xtra:
return getattr(self.fs, k)
raise AttributeError(k)
def __dir__(self):
"""extends the list of attributes to include the child class"""
return custom_dir(self, self._xtra)
def __init__(self, *args: Any, **kwargs: Any):
self.fs = self.create_fs_handler(*args, **kwargs)
def create_as_path(self, entry_path: Union[str, BoundedCspPath]) -> BoundedCspPath:
return self._path_meta.path_class(entry_path=entry_path, fs_obj=self)
@classmethod
def copy_resources(cls, src: BoundedCspPath, dest: BoundedCspPath):
"""
Copy files between FileSystems.
This functions allows you to recursively copy directories of files from
one file system to another, such as from S3 to your local machine. Note that the
copy_resources uses threads by default. The chunk size is set to 1 MB.
:param src: BoundedCspPath
Source file path or URI to a single file or directory
If a directory, files will be copied recursively from this path.
:param dest: BoundedCspPath
Destination directory where the source is copied to.
If the directory does not exist, it will be created first.
If the source is a file, then the final destination will be dest/file_name
If the source is a directory, then a new folder is created under dest as
"dest/src".
"""
# check that the src path exists
if not src.exists():
raise CspPathNotFoundException(f'Source Path does not exist {src}')
dest_path = os.path.join(str(dest), src.base_name())
if src.is_dir():
# create a subfolder in the destination
dest_path = os.path.join(str(dest), src.base_name())
dest = dest.fs_obj.create_as_path(entry_path=dest_path)
# dest must be a directory. make sure it exists
dest.create_dirs()
else:
dest.create_dirs()
dest = dest.fs_obj.create_as_path(entry_path=dest_path)
arrow_fs.copy_files(src.no_prefix, dest.no_prefix,
source_filesystem=src.fs_obj.fs,
destination_filesystem=dest.fs_obj.fs)
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/storagelib/cspfs.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""init file of the storagelib package which offers a common interface to access any FS protocol."""
from .s3.s3fs import S3Fs
from .s3.s3path import S3Path
from .gcs.gcsfs import GcsFs
from .gcs.gcspath import GcsPath
from .hdfs.hdfsfs import HdfsFs
from .hdfs.hdfspath import HdfsPath
from .adls.adlsfs import AdlsFs
from .adls.adlspath import AdlsPath
from .local.localfs import LocalFs
from .local.localpath import LocalPath
from .csppath import CspPathT, path_impl_registry, CspPath
from .cspfs import CspFs, BoundedArrowFsT, register_fs_class
__all__ = [
'AdlsFs',
'AdlsPath',
'CspFs',
'CspPath',
'BoundedArrowFsT',
'GcsFs',
'GcsPath',
'HdfsFs',
'HdfsPath',
'LocalFs',
'LocalPath',
'CspPath',
'CspPathT',
'path_impl_registry',
'register_fs_class',
'S3Fs',
'S3Path',
]
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/storagelib/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Abstract representation of a file path that can access local/URI values.
Similar to cloudpathlib project, this implementation uses dict registry to
register an implementation. However, the path representation is built on top of
pyArrow FS API. As a result, there is no need to write a full storage client to
access remote files. This comes with a tradeoff in providing limited set of file
operations.
"""
import abc
import sys
from collections import defaultdict
from functools import cached_property
from pathlib import Path as PathlibPath
from typing import Union, Type, TypeVar, Any, Dict, Callable, overload, Optional, TYPE_CHECKING, List
from pyarrow import fs
from pyarrow.fs import FileType, FileSystem, FileInfo
from pydantic import ValidationError, model_validator, FilePath, AnyHttpUrl, StringConstraints, TypeAdapter
from pydantic.dataclasses import dataclass
from pydantic_core import PydanticCustomError
from typing_extensions import Annotated
from ..exceptions import (
InvalidProtocolPrefixError,
FSMismatchError, CspFileExistsError
)
from ..utils.util import get_path_as_uri, is_http_file
if sys.version_info >= (3, 10):
from typing import TypeGuard
else:
from typing_extensions import TypeGuard
if sys.version_info >= (3, 11):
from typing import Self
else:
from typing_extensions import Self
if TYPE_CHECKING:
from .cspfs import CspFs
CspPathString = Annotated[str, StringConstraints(pattern=r'^\w+://.*')]
class CspPathImplementation:
"""
A metaclass implementation that describes the behavior of the path class
"""
name: str
_path_class: Type['CspPath']
_fs_class: Type['CspFs']
_fslib_class: Type['FileSystem']
@property
def fs_class(self) -> Type['CspFs']:
return self._fs_class
@fs_class.setter
def fs_class(self, clazz):
self._fs_class = clazz
@property
def path_class(self) -> Type['CspPath']:
return self._path_class
@path_class.setter
def path_class(self, clazz):
self._path_class = clazz
@property
def fslib_class(self) -> Type['FileSystem']:
return self._fslib_class
@fslib_class.setter
def fslib_class(self, clazz):
self._fslib_class = clazz
path_impl_registry: Dict[str, CspPathImplementation] = defaultdict(CspPathImplementation)
T = TypeVar('T')
CspPathT = TypeVar('CspPathT', bound='CspPath')
def register_path_class(key: str) -> Callable[[Type[CspPathT]], Type[CspPathT]]:
def decorator(cls: Type[CspPathT]) -> Type[CspPathT]:
if not issubclass(cls, CspPath):
raise TypeError('Only subclasses of CloudPath can be registered.')
path_impl_registry[key].path_class = cls
cls._path_meta = path_impl_registry[key] # pylint: disable=protected-access
return cls
return decorator
@dataclass
class AcceptedFilePath:
"""
Class used to represent input that can be accepted as file paths.
"""
file_path: Union[CspPathString, FilePath, AnyHttpUrl]
extensions: Optional[List[str]] = None
@model_validator(mode='after')
def validate_file_extensions(self) -> 'AcceptedFilePath':
if self.extensions:
if not any(str(self.file_path).endswith(ext) for ext in self.extensions):
raise PydanticCustomError(
'file_path',
(f'Invalid file extension for input file {self.file_path}. '
f'Accepted: {self.extensions}'))
def is_http_file(self) -> bool:
return is_http_file(self.file_path)
class CspPathMeta(abc.ABCMeta):
"""
Class meta used to add hooks to the type of the CspPath as needed.
This is used typically to dynamically assign any class type as subclass to CspPath.
"""
@overload
def __call__(
cls: Type[T], entry_path: CspPathT, *args: Any, **kwargs: Any
) -> CspPathT:
...
@overload
def __call__(
cls: Type[T], entry_path: Union[str, 'CspPath'], *args: Any, **kwargs: Any
) -> T:
...
def __call__(
cls: Type[T], entry_path: Union[str, CspPathT], *args: Any, **kwargs: Any
) -> Union[T, CspPathT]:
# cls is a class that is the instance of this metaclass, e.g., CloudPath
if not issubclass(cls, CspPath):
raise TypeError(
f'Only subclasses of {CspPath.__name__} can be instantiated from its meta class.'
)
if isinstance(entry_path, str):
# convert the string to uri if it is not
entry_path = get_path_as_uri(entry_path)
# Dispatch to subclass if base ASFsPath
if cls is CspPath:
for path_clz_entry in path_impl_registry.values():
path_class = path_clz_entry.path_class
if path_class is not None and path_class.is_valid_csppath(
entry_path, raise_on_error=False
):
# Instantiate path_class instance
new_obj = object.__new__(path_class)
path_class.__init__(new_obj, entry_path, *args, **kwargs)
return new_obj
new_obj = object.__new__(cls)
cls.__init__(new_obj, entry_path, *args, **kwargs) # type: ignore[type-var]
return new_obj
class CspPath(metaclass=CspPathMeta):
"""
Base class for storage systems, based on pyArrow's FileSystem. The class provides support for
URI/local file path like "gs://", "s3://", "abfss://", "file://".
Instances represent an absolute path in a storage with filesystem path semantics, and for basic
operations like streaming, and opening a file for read/write operations.
Only basic metadata about file entries, such as the file size and modification time, is made
available.
Examples
--------
Create a new path subclass from a gcs URI:
>>> gs_path = CspPath('gs://bucket-name/folder_00/subfolder_01')
<spark_rapids_tools.storagelib.gcs.gcpath.GcsPath object at ...>
or from S3 URI:
>>> s3_path = CspPath('s3://bucket-name/folder_00/subfolder_01')
<spark_rapids_tools.storagelib.s3.s3path.S3Path object at ...>
or from local file URI:
>>> local_path1, local_path2 = (CspPath('~/my_folder'), CspPath('file:///my_folder'))
<spark_rapids_tools.storagelib.local.localpath.LocalPath object at ...,
spark_rapids_tools.storagelib.local.localpath.LocalPath object at ...>
Print the data from the file with `open_input_file()`:
>>> with as_path.open_input_file() as f:
... print(f.readall())
b'data'
Check that path is file
>>> gs_path = CspPath('gs://bucket-name/folder_00/subfolder_01')
>>> print(gs_path.is_file())
"""
protocol_prefix: str
_path_meta: CspPathImplementation
@staticmethod
def is_file_path(file_path: Union[str, PathlibPath],
extensions: List[str] = None,
raise_on_error: bool = True):
try:
TypeAdapter(AcceptedFilePath).validate_python({'file_path': file_path, 'extensions': extensions})
return True
except ValidationError as err:
if raise_on_error:
raise err
return False
@overload
@classmethod
def is_valid_csppath(cls, path: str, raise_on_error: bool = ...) -> bool:
...
@overload
@classmethod
def is_valid_csppath(cls, path: 'CspPath', raise_on_error: bool = ...) -> TypeGuard[Self]:
...
@classmethod
def is_valid_csppath(
cls, path: Union[str, 'CspPath'], raise_on_error: bool = False
) -> Union[bool, TypeGuard[Self]]:
valid = cls.is_protocol_prefix(str(path))
if raise_on_error and not valid:
raise InvalidProtocolPrefixError(
f'"{path}" is not a valid path since it does not start with "{cls.protocol_prefix}"'
)
return valid
def __init__(
self,
entry_path: Union[str, Self],
fs_obj: Optional['CspFs'] = None
) -> None:
self.is_valid_csppath(entry_path, raise_on_error=True)
self._fpath = str(entry_path)
if fs_obj is None:
if isinstance(entry_path, CspPath):
fs_obj = entry_path.fs_obj
else:
fs_obj = self._path_meta.fs_class.get_default_client()
if not isinstance(fs_obj, self._path_meta.fs_class):
raise FSMismatchError(
f'Client of type [{fs_obj.__class__}] is not valid for cloud path of type '
f'[{self.__class__}]; must be instance of [{self._path_meta.fs_class}], or '
f'None to use default client for this cloud path class.'
)
self.fs_obj = fs_obj
self._file_info = None
def __str__(self) -> str:
return self._fpath
@classmethod
def is_protocol_prefix(cls, value: str) -> bool:
return value.lower().startswith(cls.protocol_prefix.lower())
@cached_property
def no_prefix(self) -> str:
return self._fpath[len(self.protocol_prefix):]
def _pull_file_info(self) -> FileInfo:
return self.fs_obj.get_file_info(self.no_prefix)
@cached_property
def file_info(self) -> FileInfo:
self._file_info = self._pull_file_info()
return self._file_info
def is_file(self):
return self.file_info.is_file
def is_dir(self):
return self.file_info.type == FileType.Directory
def exists(self) -> bool:
f_info = self.file_info
return f_info.type in [FileType.File, FileType.Directory]
def base_name(self) -> str:
return self.file_info.base_name
def create_dirs(self, exist_ok: bool = True):
if not exist_ok:
# check that the file does not exist
if self.exists():
raise CspFileExistsError(f'Path already Exists: {self}')
self.fs_obj.create_dir(self.no_prefix)
# force the file information object to be retrieved again by invalidating the cached property
if 'file_info' in self.__dict__:
del self.__dict__['file_info']
def open_input_stream(self):
return self.fs_obj.open_input_stream(self.no_prefix)
def open_output_stream(self):
return self.fs_obj.open_output_stream(self.no_prefix)
@classmethod
def download_files(cls, src_url: str, dest_url: str):
fs.copy_files(src_url, dest_url)
@classmethod
def get_storage_name(cls) -> str:
return cls._path_meta.name
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/storagelib/csppath.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper implementation for S3 remote path"""
from ..csppath import CspPath, register_path_class
@register_path_class("s3")
class S3Path(CspPath):
protocol_prefix: str = "s3://"
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/storagelib/s3/s3path.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""init file of the storage library of S3"""
from .s3fs import S3Fs
from .s3path import S3Path
__all__ = [
"S3Fs",
"S3Path",
]
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/storagelib/s3/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for the S3 File system"""
from spark_rapids_tools.storagelib.cspfs import register_fs_class, CspFs
@register_fs_class('s3', 'S3FileSystem')
class S3Fs(CspFs):
"""
Implementation of FileSystem for S3-backed filesystem on top of pyArrow
(Docstring copied from pyArrow.S3FileSystem)
The S3FileSystem is initialized with the following list of arguments:
>>> S3FileSystem(access_key=None, *, secret_key=None, session_token=None, bool anonymous=False,
... region=None, request_timeout=None, connect_timeout=None, scheme=None,
... endpoint_override=None, bool background_writes=True, default_metadata=None,
... role_arn=None, session_name=None, external_id=None, load_frequency=900,
... proxy_options=None, allow_bucket_creation=False, allow_bucket_deletion=False,
... retry_strategy: S3RetryStrategy = AwsStandardS3RetryStrategy(max_attempts=3))
If neither access_key nor secret_key are provided, and role_arn is also not
provided, then attempts to initialize from AWS environment variables,
otherwise both access_key and secret_key must be provided.
"""
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/storagelib/s3/s3fs.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""init file of the storage library of ADLS systems (Azure data lake storage)"""
from .adlsfs import AdlsFs
from .adlspath import AdlsPath
__all__ = [
'AdlsFs',
'AdlsPath',
]
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/storagelib/adls/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper implementation for ADLS remote path"""
from ..csppath import CspPath, register_path_class
@register_path_class("adls")
class AdlsPath(CspPath):
protocol_prefix: str = "abfss://"
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/storagelib/adls/adlspath.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for the ADLS File system"""
from typing import Any
import adlfs
from pyarrow.fs import PyFileSystem, FSSpecHandler
from ..cspfs import CspFs, BoundedArrowFsT, register_fs_class
@register_fs_class("adls", "PyFileSystem")
class AdlsFs(CspFs):
"""Access Azure Datalake Gen2 and Azure Storage if it were a file system using Multiprotocol
Access (Docstring copied from adlfs).
Since AzureBlobFileSystem follows the fsspec interface, this class wraps it into a python-based
PyArrow filesystem (PyFileSystem) using FSSpecHandler.
The initialization of the filesystem looks for the following env_variables:
AZURE_STORAGE_ACCOUNT_NAME
AZURE_STORAGE_ACCOUNT_KEY
AZURE_STORAGE_CONNECTION_STRING
AZURE_STORAGE_SAS_TOKEN
AZURE_STORAGE_CLIENT_ID
AZURE_STORAGE_CLIENT_SECRET
AZURE_STORAGE_TENANT_ID
"""
@classmethod
def create_fs_handler(cls, *args: Any, **kwargs: Any) -> BoundedArrowFsT:
azure_fs = adlfs.AzureBlobFileSystem(*args, **kwargs)
return PyFileSystem(FSSpecHandler(azure_fs))
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/storagelib/adls/adlsfs.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper implementation for local path"""
from ..csppath import register_path_class, CspPath
@register_path_class('local')
class LocalPath(CspPath):
protocol_prefix: str = 'file://'
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/storagelib/local/localpath.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""init file of the storage library of local disk storage"""
from .localfs import LocalFs
from .localpath import LocalPath
__all__ = [
'LocalFs',
'LocalPath',
]
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/storagelib/local/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for the local File system"""
from ..cspfs import CspFs, register_fs_class
@register_fs_class('local', 'LocalFileSystem')
class LocalFs(CspFs):
"""
A FileSystem implementation accessing files on the local machine. Implemented on top of pyArrow.
"""
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/storagelib/local/localfs.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper implementation for HDFS remote path"""
from ..csppath import register_path_class, CspPath
@register_path_class("hdfs")
class HdfsPath(CspPath):
protocol_prefix: str = "hdfs://"
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/storagelib/hdfs/hdfspath.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for the Hadoop File system"""
from ..cspfs import CspFs, register_fs_class
@register_fs_class("hdfs", "HadoopFileSystem")
class HdfsFs(CspFs):
"""Implementation of FileSystem for HAdoopFileSystem on top of pyArrow
(Docstring copied from pyArrow.HadoopFileSystem)
The HadoopFileSystem is initialized with the following list of arguments:
>>> HadoopFileSystem(unicode host, int port=8020, unicode user=None, *, int replication=3,
... int buffer_size=0, default_block_size=None, kerb_ticket=None, extra_conf=None)
The libhdfs library is loaded at runtime (rather than at link / library load time, since the
library may not be in your LD_LIBRARY_PATH), and relies on some environment variables.
HADOOP_HOME: the root of your installed Hadoop distribution. Often has lib/native/libhdfs.so.
JAVA_HOME: the location of your Java SDK installation.
ARROW_LIBHDFS_DIR (optional): explicit location of libhdfs.so if it is installed somewhere
other than $HADOOP_HOME/lib/native.
CLASSPATH: must contain the Hadoop jars.
example to set the export CLASSPATH=`$HADOOP_HOME/bin/hadoop classpath --glob`
"""
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/storagelib/hdfs/hdfsfs.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""init file of the storage library of Hadoop file systems"""
from .hdfsfs import HdfsFs
from .hdfspath import HdfsPath
__all__ = [
'HdfsFs',
'HdfsPath',
]
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/storagelib/hdfs/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for the Google storage File system"""
from ..cspfs import CspFs, register_fs_class
@register_fs_class('gcs', 'GcsFileSystem')
class GcsFs(CspFs):
"""Implementation of FileSystem for Google storage on top of pyArrow
(Docstring copied from pyArrow.GcsFileSystem).
The GcsFileSystem is initialized with the following list of arguments:
>>> GcsFileSystem(bool anonymous=False, *,
... access_token=None, target_service_account=None,
... credential_token_expiration=None, default_bucket_location='US',
... scheme=None, endpoint_override=None, default_metadata=None, retry_time_limit=None)
the constructor uses the process described in https://google.aip.dev/auth/4110
to resolve credentials. If not running on Google Cloud Platform (GCP), this generally requires
the environment variable GOOGLE_APPLICATION_CREDENTIALS to point to a JSON file containing
credentials.
"""
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/storagelib/gcs/gcsfs.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper implementation for Gstorage remote path"""
from ..csppath import register_path_class, CspPath
@register_path_class("gcs")
class GcsPath(CspPath):
protocol_prefix: str = "gs://"
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/storagelib/gcs/gcspath.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""init file of the storage library of googleStorage systems"""
from .gcsfs import GcsFs
from .gcspath import GcsPath
__all__ = [
'GcsFs',
'GcsPath',
]
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/storagelib/gcs/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""init file of the library that represents CSP interface and functionalities"""
from .cluster import ClientCluster
from .onprem.onpremcluster import OnPremClientCluster
from .emr.emrcluster import EmrClientCluster
from .dataproc.dataproccluster import DataprocClientCluster
from .databricks.dbcluster import DBAwsClientCluster, DBAzureClientCluster
__all__ = [
'ClientCluster',
'DBAwsClientCluster',
'DBAzureClientCluster',
'DataprocClientCluster',
'EmrClientCluster',
'OnPremClientCluster'
]
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/cloud/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Define some abstract cluster representation and some of the dataclasses that can provide
information about the hardware configurations
"""
import abc
from collections import defaultdict
from typing import Type, Union, Any, TypeVar, Dict, Callable, ClassVar, Optional
from ..exceptions import InvalidPropertiesSchema
from ..storagelib import CspPathT
from ..utils import AbstractPropContainer, PropValidatorSchema
CT = TypeVar('CT')
ClientClusterT = TypeVar('ClientClusterT', bound='ClientCluster')
class ClusterProxy:
"""
Class holding metadata of the client implementation that interfaces with Cluster
"""
name: str
dependencies_loaded: bool = True
_client_clzz: Type['ClientCluster']
_prop_mgr_clzz: Type['ClusterPropMgr']
@property
def client_clzz(self) -> Type['ClientCluster']:
return self._client_clzz
@client_clzz.setter
def client_clzz(self, clzz):
self._client_clzz = clzz
@property
def prop_mgr_clzz(self) -> Type['ClusterPropMgr']:
return self._prop_mgr_clzz
@prop_mgr_clzz.setter
def prop_mgr_clzz(self, clzz):
self._prop_mgr_clzz = clzz
cluster_registry: Dict[str, ClusterProxy] = defaultdict(ClusterProxy)
def register_client_cluster(key: str) -> Callable[[Type[ClientClusterT]], Type[ClientClusterT]]:
def decorator(cls: Type[ClientClusterT]) -> Type[ClientClusterT]:
if not issubclass(cls, ClientCluster):
raise TypeError('Only subclasses of ClusterSchema can be registered.')
cluster_registry[key].client_clzz = cls
cluster_registry[key].name = key
cls._client_impl_meta = cluster_registry[key] # pylint: disable=protected-access
return cls
return decorator
def register_cluster_prop_mgr(key: str) -> Callable:
def decorator(cls: type) -> type:
if not issubclass(cls, ClusterPropMgr):
raise TypeError('Only subclasses of ClusterPropMgr can be registered.')
cluster_registry[key].prop_mgr_clzz = cls
return cls
return decorator
class ClientClusterMeta(abc.ABCMeta):
"""
Meta class representing client cluster
"""
def __call__(cls: Type[CT], file_path: CspPathT, *args: Any, **kwargs: Any
) -> Union[CT, ClientClusterT]:
# cls is a class that is the instance of this metaclass, e.g., CloudPath
if not issubclass(cls, ClientCluster):
raise TypeError(
f'Only subclasses of {ClientCluster.__name__} can be instantiated from its meta class.'
)
# Dispatch to subclass if base ClientCluster
if cls is ClientCluster:
for client_proxy in cluster_registry.values():
client_clzz = client_proxy.client_clzz
prop_mgr_clzz = client_proxy.prop_mgr_clzz
if prop_mgr_clzz is not None and client_clzz is not None:
prop_mgr_obj = prop_mgr_clzz.load_from_file(file_path, False)
if prop_mgr_obj is not None:
client_obj = object.__new__(client_clzz)
client_clzz.__init__(client_obj, prop_mgr=prop_mgr_obj, *args, **kwargs)
return client_obj
# no implementation matched the provided property file
# raise an error
raise InvalidPropertiesSchema(
msg=f'Incorrect properties files: [{file_path}] '
'is incorrect or it does not match a valid Schema')
prop_mgr_obj = cls._client_impl_meta.prop_mgr_clzz.load_from_file(file_path, True)
client_obj = object.__new__(cls)
cls.__init__(client_obj, prop_mgr=prop_mgr_obj, *args, **kwargs)
return client_obj
class ClusterPropMgr(AbstractPropContainer):
schema_clzz: ClassVar[Type['PropValidatorSchema']] = None
class ClientCluster(metaclass=ClientClusterMeta): # pylint: disable=too-few-public-methods
_client_impl_meta: ClusterProxy
def __init__(self, prop_mgr: Optional['ClusterPropMgr']):
self._prop_mgr = prop_mgr
@property
def platform_name(self) -> str:
return self._client_impl_meta.name
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/cloud/cluster.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Define implementation for the EMR cluster
"""
from typing import ClassVar, Type
from spark_rapids_tools.cloud.cluster import register_cluster_prop_mgr, register_client_cluster, ClusterPropMgr, ClientCluster
from spark_rapids_tools.utils.propmanager import PropValidatorSchemaUpper, PropValidatorSchema
class EmrClusterSchema(PropValidatorSchemaUpper):
cluster: dict
@register_cluster_prop_mgr('emr')
class EmrClusterPropMgr(ClusterPropMgr):
schema_clzz: ClassVar[Type['PropValidatorSchema']] = EmrClusterSchema
@register_client_cluster('emr')
class EmrClientCluster(ClientCluster): # pylint: disable=too-few-public-methods
pass
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/cloud/emr/emrcluster.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""init file of the cloud implementation for EMR."""
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/cloud/emr/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Define implementation for the onPrem cluster
"""
from typing import ClassVar, Type
from typing_extensions import TypedDict
from pydantic import ConfigDict
from spark_rapids_tools.cloud.cluster import ClientCluster, ClusterPropMgr, register_cluster_prop_mgr, register_client_cluster
from spark_rapids_tools.utils.propmanager import PropValidatorSchema
from spark_rapids_tools.utils.util import to_camel_case
class OnPremDriverConfigSchema(TypedDict):
num_cores: int
memory: str
class OnPremExecutorConfigSchema(TypedDict):
num_cores: int
memory: str
num_workers: int
class OnPremClusterConfigSchema(TypedDict):
__pydantic_config__ = ConfigDict(alias_generator=to_camel_case)
master_config: OnPremDriverConfigSchema
worker_config: OnPremExecutorConfigSchema
class OnPremClusterSchema(PropValidatorSchema):
config: OnPremClusterConfigSchema
@register_cluster_prop_mgr('onprem')
class OnPremClusterPropMgr(ClusterPropMgr):
schema_clzz: ClassVar[Type['PropValidatorSchema']] = OnPremClusterSchema
@register_client_cluster('onprem')
class OnPremClientCluster(ClientCluster): # pylint: disable=too-few-public-methods
pass
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/cloud/onprem/onpremcluster.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""init file of the cloud implementation for onPrem."""
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/cloud/onprem/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""init file of the cloud implementation for databricks."""
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/cloud/databricks/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Define implementation for the databricks cluster
"""
from typing import ClassVar, Type, Optional
from spark_rapids_tools.cloud.cluster import register_client_cluster, register_cluster_prop_mgr, ClusterPropMgr, ClientCluster
from spark_rapids_tools.utils.propmanager import PropValidatorSchema
class DBAwsClusterSchema(PropValidatorSchema):
cluster_id: str
driver: dict
aws_attributes: dict
spark_conf: Optional[dict] = None
class DBAzureClusterSchema(PropValidatorSchema):
cluster_id: str
driver: dict
azure_attributes: dict
spark_conf: Optional[dict] = None
@register_cluster_prop_mgr('databricks_aws')
class DBAwsClusterPropMgr(ClusterPropMgr):
schema_clzz: ClassVar[Type['PropValidatorSchema']] = DBAwsClusterSchema
@register_client_cluster('databricks_aws')
class DBAwsClientCluster(ClientCluster): # pylint: disable=too-few-public-methods
pass
@register_cluster_prop_mgr('databricks_azure')
class DBAzureClusterPropMgr(ClusterPropMgr):
schema_clzz: ClassVar[Type['PropValidatorSchema']] = DBAzureClusterSchema
@register_client_cluster('databricks_azure')
class DBAzureClientCluster(ClientCluster): # pylint: disable=too-few-public-methods
pass
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/cloud/databricks/dbcluster.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Define implementation for the dataproc cluster
"""
from typing import ClassVar, Type
from spark_rapids_tools.cloud.cluster import ClientCluster, register_client_cluster, ClusterPropMgr, register_cluster_prop_mgr
from spark_rapids_tools.utils.propmanager import PropValidatorSchemaCamel, PropValidatorSchema
class DataprocClusterSchema(PropValidatorSchemaCamel):
cluster_name: str
cluster_uuid: str
project_id: str
config: dict
@register_cluster_prop_mgr('dataproc')
class DataprocClusterPropMgr(ClusterPropMgr):
schema_clzz: ClassVar[Type[PropValidatorSchema]] = DataprocClusterSchema
@register_client_cluster('dataproc')
class DataprocClientCluster(ClientCluster): # pylint: disable=too-few-public-methods
pass
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/cloud/dataproc/dataproccluster.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""init file of the cloud implementation for dataproc."""
| spark-rapids-tools-dev | user_tools/src/spark_rapids_tools/cloud/dataproc/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build helpers."""
import datetime
import os
def get_version(main=None):
if main is None:
# pylint: disable=import-outside-toplevel
from spark_rapids_validation_tool import VERSION as main
suffix = ''
nightly = os.environ.get('USERTOOLS_NIGHTLY')
if nightly == '1':
suffix = '.dev' + datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
return main + suffix
| spark-rapids-tools-dev | data_validation/src/spark_rapids_validation_tool/build.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""init file of the spark_rapids_validation package."""
from spark_rapids_validation_tool.build import get_version
VERSION = '23.02.0'
__version__ = get_version(VERSION) | spark-rapids-tools-dev | data_validation/src/spark_rapids_validation_tool/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper class to run tools associated with RAPIDS Accelerator for Apache Spark plugin."""
import fire
import yaml
from spark_rapids_validation_tool.data_validation_dataproc import DataValidationDataproc
class DataprocWrapper(object):
def validation_parse(self,
cluster,
region,
check,
format,
table1,
table1_partition,
table2,
table2_partition,
pk,
exclude_column,
include_column,
filter,
output_path,
output_format,
precision,
debug,
spark_conf):
"""
Run data validation tool on remote Dataproc cluster to compare whether two tables have same results, one scenario is it will be easier for
users to determine whether the Spark job using RAPIDS Accelerator(aka GPU Spark job)
returns the same result as the CPU Spark job. Here we assume two tables have same column names.
:param cluster: Name of the Dataproc cluster.
:param region: Region of Dataproc cluster (e.g. us-central1)
:param check: Metadata validation or Data validation (e.g. valid_metadata or valid_data. )
:param format: The format of tables, currently only support hive format. If the format is parquet/orc/csv, the t1 and t2 should be an absolute path. Options are [hive, orc, parquet, csv](e.g. --format=hive or --format=parquet)
:param table1: The first table name, if the format is parquet/orc/csv, this value should be an absolute path. (e.g. --table1=table1)
:param table1_partition: The first table’s partition clause. (e.g. --table1_partition=partition1='p1')
:param table2: The second table name, if the format is parquet/orc/csv, this value should be an absolute path.. (e.g. --table2=table2)
:param table2_partition: The second table’s partition clause. (e.g. --table2_partition=partition1='p1')
:param pk: The Primary key columns(comma separated), pk is required for data_validation. (e.g. --pk=pk1,pk2,pk3).
:param exclude_column: Exclude column option. What columns do not need to be involved in the comparison, default is None. (e.g. --exclude_column=col4,col5,col6)
:param include_column: Include column option. What columns need to be involved in the comparison, default is ALL. (e.g. --include_column=col1,col2,col3)
:param filter: Condition to filter rows. (e.g. --filter “col1=value1 and col2=value2”)
:param output_path: Output directory, the tool will generate a data file to a path. (e.g. --output_path=/data/output)
:param output_format: Output format, default is parquet. (e.g. --output_format=parquet)
:param precision: Precision, if it is set to 4 digits, then 0.11113 == 0.11114 will return true for numeric columns. (e.g. --precision=4)
:param debug: True or False to enable verbosity
"""
if not cluster or not region:
raise Exception('Invalid cluster or region for Dataproc environment. '
'Please provide options "--cluster=<CLUSTER_NAME> --region=<REGION>" properly.')
validate = DataValidationDataproc(cluster, region, check, format, table1, table1_partition, table2,
table2_partition, pk, exclude_column, include_column, filter,
output_path, output_format, precision, debug, spark_conf)
if any(p is None for p in [cluster, region, table1, table2, format]):
print('|--cluster/region/format/table1/table2 should not be none--|')
return
if format not in ['hive', 'orc', 'parquet', 'csv']:
print('|--format should be one of hive/parquet/orc/csv--|')
return
if check == 'valid_data' and pk is None:
print('|--pk should be not be none if running valid_data--|')
return
getattr(validate, check)()
def validation(self, conf_file: str):
with open(conf_file, "r") as file:
validate_conf = yaml.safe_load(file)
spark_conf = validate_conf['sparkConf']
tool_conf = validate_conf['toolConf']
self.validation_parse(tool_conf['cluster'],
tool_conf['region'],
tool_conf['check'],
tool_conf['format'],
tool_conf['table1'],
tool_conf['table1_partition'],
tool_conf['table2'],
tool_conf['table2_partition'],
tool_conf['pk'],
tool_conf['exclude_column'],
tool_conf['include_column'],
tool_conf['filter'],
tool_conf['output_path'],
tool_conf['output_format'],
tool_conf['precision'],
tool_conf['debug'],
spark_conf)
def main():
fire.Fire(DataprocWrapper)
if __name__ == '__main__':
main() | spark-rapids-tools-dev | data_validation/src/spark_rapids_validation_tool/dataproc_wrapper.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fire
from spark_rapids_validation_tool.csp import new_csp
from spark_rapids_validation_tool.data_validation import Validation
class DataValidationDataproc(Validation):
"""DataValidation tool for Dataproc."""
def __init__(self, cluster_name, region, check, format, table1, table1_partition, table2,
table2_partition, pk, excluded_column: str, included_column: str, filter: str,
output_dir, output_format, precision, debug, spark_conf):
super().__init__(debug)
self.cluster = new_csp('dataproc', args={'cluster': cluster_name, 'region': region})
self.format = format
self.table1 = table1
self.table2 = table2
self.table1_partition = table1_partition
self.table2_partition = table2_partition
self.pk = pk
self.excluded_column = excluded_column
self.included_column = included_column
self.filter = filter
self.output_dir = output_dir
self.output_format = output_format
self.precision = precision
self.spark_conf = spark_conf
def on(node): # pylint: disable=invalid-name,no-self-argument,too-many-function-args
"""On decorator."""
def inner_decorator(func):
def wrapper(self, *args, **kwargs):
for i in self.cluster.get_nodes(node):
def run(cmd, check=True, capture=''):
return self.cluster.run_ssh_cmd(cmd, i, check, capture) # pylint: disable=cell-var-from-loop
self.run_cmd = run
func(self, *args, **kwargs)
return wrapper
return inner_decorator
def all(self):
self.valid_metadata()
self.valid_data()
def format_conf_with_quotation(self,conf):
if conf is None:
return 'None'
else:
return conf.replace('\'', '\\\'')
@on('master') # pylint: disable=too-many-function-args
def valid_metadata(self):
"""metadata validation spark via Dataproc job interface."""
print("|--Start Running Metadata Validation.....--|")
if self.excluded_column is None:
excluded_column = 'None'
else:
excluded_column = self.convert_tuple_to_string(self.excluded_column)
compare_job = {
'type': self.cluster.JOB_TYPE_PYSPARK,
'file': super().get_validation_scripts('metadata_validation.py'),
'properties': self.spark_conf,
'parameters': [
f'--table1={self.table1}',
f'--table2={self.table2}',
f'--format={self.format}',
f'--table1_partition={self.table1_partition}',
f'--table2_partition={self.table2_partition}',
f'--include_column={self.convert_tuple_to_string(self.included_column)}',
f'--pk={self.pk}',
f'--exclude_column={excluded_column}',
f'--filter={self.format_conf_with_quotation(self.filter)}',
f'--output_path={self.output_dir}',
f'--output_format={self.output_format}',
f'--precision={self.precision}'
]
}
output = self.cluster.submit_job(compare_job)
print(output)
def convert_tuple_to_string(self, conf):
'''fire automatically convert config with comma from str to tuple'''
if isinstance(conf, tuple):
return ','.join(map(str, conf))
elif isinstance(conf, str):
return conf
else:
raise Exception(f'invalid type of conf : {conf}')
fi
@Validation.banner
def valid_data(self):
"""data validation spark via Dataproc job interface."""
print("|--Start Running Data Validation.....--|")
compare_job = {
'type': self.cluster.JOB_TYPE_PYSPARK,
'file': super().get_validation_scripts('dataset_validation.py'),
'properties': self.spark_conf,
'parameters':[
f'--table1={self.table1}',
f'--table2={self.table2}',
f'--format={self.format}',
f'--table1_partition={self.table1_partition}',
f'--table2_partition={self.table2_partition}',
f'--include_column={self.convert_tuple_to_string(self.included_column)}',
f'--pk={self.pk}',
f'--exclude_column={self.excluded_column}',
f'--filter={self.format_conf_with_quotation(self.filter)}',
f'--output_path={self.output_dir}',
f'--output_format={self.output_format}',
f'--precision={self.precision}'
]
}
output = self.cluster.submit_job(compare_job)
print(output)
def main():
"""Main function."""
fire.Fire(DataValidationDataproc)
if __name__ == '__main__':
main() | spark-rapids-tools-dev | data_validation/src/spark_rapids_validation_tool/data_validation_dataproc.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data validation tool basic program."""
import logging
from typing import Callable
import fire
import pkg_resources
# Setup logging
logger = logging.getLogger('validation')
logger.setLevel(logging.INFO)
class Validation:
"""Data Validation tool basic class."""
def __init__(self, debug=False):
if debug:
logger.setLevel(logging.DEBUG)
self.summary = {}
def banner(func: Callable): # pylint: disable=no-self-argument
"""Banner decorator."""
def wrapper(self, *args, **kwargs):
name = func.__name__ # pylint: disable=no-member
logger.info('*** Running validation function "%s" ***', name)
result = True
try:
func(self, *args, **kwargs) # pylint: disable=not-callable
except Exception as exception: # pylint: disable=broad-except
logger.error('Error: %s', exception)
result = False
if result:
logger.info('*** Check "%s": PASS ***', name)
else:
logger.info('*** Check "%s": FAIL ***', name)
# Save result into summary
if name in self.summary:
self.summary[name] = any([result, self.summary[name]])
else:
self.summary[name] = result
return wrapper
def run_spark_submit(self, options, capture='all'):
"""Run spark application via spark-submit command."""
cmd = ['$SPARK_HOME/bin/spark-submit']
cmd += options
stdout, stderr = self.run_cmd(cmd, capture=capture)
return stdout + stderr
def get_validation_scripts(self, name):
"""Get validation script path by name"""
return pkg_resources.resource_filename(__name__, 'validation_scripts/' + name)
def main():
"""Main function."""
fire.Fire(Validation)
if __name__ == '__main__':
main() | spark-rapids-tools-dev | data_validation/src/spark_rapids_validation_tool/data_validation.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of global utilities and helpers methods."""
import json
import logging
import os
import secrets
import string
import subprocess
import sys
from dataclasses import dataclass, field
from functools import reduce
from json import JSONDecodeError
from operator import getitem
from pathlib import Path
from shutil import rmtree, which
from typing import Any
import yaml
def get_log_dict(args):
return {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '{asctime} {levelname} {name}: {message}',
'style': '{',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'root': {
'handlers': ['console'],
'level': 'DEBUG' if args.get('debug') else 'INFO',
},
}
logger = logging.getLogger(__name__)
def bail(msg, err):
"""
Print message and the error before terminating the program.
:param msg: message error to display.
:param err: the Error/Exception that caused the failure.
:return: NONE
"""
print(f'{msg}.\n\t> {err}.\nTerminated.')
sys.exit(1)
def get_elem_from_dict(data, keys):
try:
return reduce(getitem, keys, data)
except LookupError:
print(f'ERROR: Could not find elements [{keys}]')
return None
def get_elem_non_safe(data, keys):
try:
return reduce(getitem, keys, data)
except LookupError:
return None
def convert_dict_to_camel_case(dic: dict):
"""
Given a dictionary with underscore keys. This method converts the keys to a camelcase.
Example, gce_cluster_config -> gceClusterConfig
:param dic: the dictionary to be converted
:return: a dictionary where all the keys are camelcase.
"""
def to_camel_case(word: str) -> str:
return word.split('_')[0] + ''.join(x.capitalize() or '_' for x in word.split('_')[1:])
if isinstance(dic, list):
return [convert_dict_to_camel_case(i) if isinstance(i, (dict, list)) else i for i in dic]
res = {}
for key, value in dic.items():
if isinstance(value, (dict, list)):
res[to_camel_case(key)] = convert_dict_to_camel_case(value)
else:
res[to_camel_case(key)] = value
return res
def gen_random_string(str_length: int) -> str:
return ''.join(secrets.choice(string.hexdigits) for _ in range(str_length))
def get_gpu_device_list():
return ['T4', 'V100', 'K80', 'A100', 'P100']
def is_valid_gpu_device(val):
return val.upper() in get_gpu_device_list()
def get_gpu_short_name(val: str) -> str:
"""
Given a value string, return the short name of the GPU device.
:param val: the full name example nvidia-tesla-t4
:return: the shortname of the GPU device (T4). otherwise, None.
"""
upper_full_name = val.upper()
for short_name in get_gpu_device_list():
if upper_full_name.find(short_name) != -1:
return short_name
return None
def is_system_tool(tool_name):
"""
check whether a tool is installed on the system.
:param tool_name: name of the tool to check
:return: True or False
"""
return which(tool_name) is not None
def remove_dir(dir_path: str, fail_on_error: bool = True):
try:
rmtree(dir_path)
except OSError as error:
if fail_on_error:
bail(f'Could not remove directory {dir_path}', error)
def make_dirs(dir_path: str, exist_ok: bool = True):
try:
os.makedirs(dir_path, exist_ok=exist_ok)
except OSError as error:
bail(f'Error Creating directories {dir_path}', error)
def resource_path(resource_name: str) -> str:
# pylint: disable=import-outside-toplevel
if sys.version_info < (3, 9):
import importlib_resources
else:
import importlib.resources as importlib_resources
pkg = importlib_resources.files('spark_rapids_dataproc_tools')
return pkg / 'resources' / resource_name
@dataclass
class AbstractPropertiesContainer(object):
"""
An abstract class that loads properties (dictionary).
"""
prop_arg: str
file_load: bool = True
props: Any = field(default=None, init=False)
def get_value(self, *key_strs):
return get_elem_from_dict(self.props, key_strs)
def get_value_silent(self, *key_strs):
return get_elem_non_safe(self.props, key_strs)
def _init_fields(self):
pass
def _load_properties_from_file(self):
"""
In some case, we want to be able to accept both json and yaml format when the properties are saved as a file.
:return:
"""
file_suffix = Path(self.prop_arg).suffix
if file_suffix in ('.yaml', '.yml'):
# this is a yaml property
self.__open_yaml_file()
else:
# this is a jso file
self.__open_json_file()
def __open_json_file(self):
try:
with open(self.prop_arg, 'r', encoding='utf-8') as json_file:
try:
self.props = json.load(json_file)
except JSONDecodeError as e:
bail('Incorrect format of JSON File', e)
except TypeError as e:
bail('Incorrect Type of JSON content', e)
except OSError as err:
bail('Please ensure the json file exists and you have the required access privileges.', err)
def __open_yaml_file(self):
try:
with open(self.prop_arg, 'r', encoding='utf-8') as yaml_file:
try:
self.props = yaml.safe_load(yaml_file)
except yaml.YAMLError as e:
bail('Incorrect format of Yaml File', e)
except OSError as err:
bail('Please ensure the properties file exists and you have the required access privileges.', err)
def _load_as_yaml(self):
if self.file_load:
# this is a file argument
self._load_properties_from_file()
else:
try:
self.props = yaml.safe_load(self.prop_arg)
except yaml.YAMLError as e:
bail('Incorrect format of Yaml File', e)
def _load_as_json(self):
if self.file_load:
# this is a file argument
self._load_properties_from_file()
else:
try:
self.props = json.loads(self.prop_arg)
except JSONDecodeError as e:
bail('Incorrect format of JSON File', e)
except TypeError as e:
bail('Incorrect Type of JSON content', e)
@dataclass
class YAMLPropertiesContainer(AbstractPropertiesContainer):
def __post_init__(self):
self._load_as_yaml()
self._init_fields()
@dataclass
class JSONPropertiesContainer(AbstractPropertiesContainer):
def __post_init__(self):
self._load_as_json()
self._init_fields()
def run_cmd(cmd, check=True, capture=''):
"""Run command and check return code, capture output etc."""
stdout = None
stderr = None
if capture:
if capture == 'stdout':
stdout = subprocess.PIPE
elif capture == 'stderr':
stderr = subprocess.PIPE
elif capture == 'all':
stdout, stderr = subprocess.PIPE, subprocess.PIPE
else:
raise Exception(f'unknown capture value: {capture}')
# pylint: disable=subprocess-run-check
result = subprocess.run(' '.join(cmd), executable='/bin/bash', shell=True, stdout=stdout, stderr=stderr)
# pylint: enable=subprocess-run-check
logger.debug('run_cmd: %s', result)
if check:
if result.returncode == 0:
if stdout and stderr:
return result.stdout.decode('utf-8'), result.stderr.decode('utf-8')
if stdout:
return result.stdout.decode('utf-8')
if stderr:
return result.stderr.decode('utf-8')
else:
raise Exception(f'run cmd failed: {result}')
return result
| spark-rapids-tools-dev | data_validation/src/spark_rapids_validation_tool/utilities.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pyspark import SparkContext # pylint: disable=import-error
from pyspark.sql import SparkSession, DataFrame, functions as F # pylint: disable=import-error
from pyspark.sql.functions import col, when # pylint: disable=import-error
import time
from pyspark.sql.types import *
import fnmatch
from functools import reduce
def validation(spark, args):
if not valid_input(spark,args):
print('|--Please Check The Inputs --|')
return
# valid PK(s) only in table1
result = valid_pk_only_in_one_table(spark, args.format, args.table1, args.table2, args.table1_partition, args.table2_partition, args.pk,
args.exclude_column, args.include_column, args.filter, args.output_path, args.output_format)
print(f'|--PK(s) only in {args.table1} :--|')
print(result.show())
# valid PK(s) only in table2
result = valid_pk_only_in_one_table(spark, args.format, args.table2, args.table1, args.table2_partition, args.table1_partition, args.pk,
args.exclude_column, args.include_column, args.filter, args.output_path, args.output_format)
print(f'|--PK(s) only in {args.table2} :--|')
print(result.show())
# valid result table with the same PK but different values for that column(s)
result = get_cols_diff_with_same_pk(spark, args.format, args.table1, args.table2, args.pk, args.table1_partition, args.table2_partition,
args.filter, args.include_column, args.exclude_column, args.precision)
print("|--Columns with same PK(s) but diff values :--|")
print(result.show())
print('|--------------run validation success-------|')
save_result(result, args.output_path, args.output_format)
def save_result(df, path, output_format):
if path != 'None':
df.write.mode("overwrite").format(output_format).save(path)
def valid_input(spark, args):
"""
Check the input is valida for matadata validation tool
1- valid table
2- valid included column
3- check format supported
"""
if not valid_table(spark, args):
return False
if not valid_metadata_included_column(spark, args):
return False
if args.format != 'hive':
print('|--Currently only support hive format--|')
return True
def valid_table(spark, args):
"""
Check if the tables exist
"""
if not spark._jsparkSession.catalog().tableExists(args.table1):
print(f'|--Table {args.table1} does not exist!--|')
return False
if not spark._jsparkSession.catalog().tableExists(args.table2):
print(f'|--Table {args.table2} does not exist!--|')
return False
return True
def valid_metadata_included_column(spark, args):
"""
Check if the included column valid
"""
if args.include_column in ['None', 'all']:
return True
table_DF = load_table(spark, args.format, args.table1, args.table1_partition, args.pk, args.include_column, args.filter, "")
excluded_columns_list = [e.strip() for e in args.exclude_column.split(",")]
verify_column = [i.strip() for i in args.include_column.split(",") if i not in excluded_columns_list]
verify_DF = table_DF.select(verify_column)
for c in verify_DF.schema.fields:
# here only excluded 'date' because it will raise exception, we also should excluded str/map/nested
if(any(fnmatch.fnmatch(c.dataType.simpleString(), pattern) for pattern in
['*date*'])):
print(f'|--Unsupported metadata included data type: {c.dataType.simpleString()} for column: {c}--|')
return False
return True
# def row_counts(spark, format, table, t1p, t1f):
# """Get the row counts of a table according"""
# sql = "select count(*) from table"
# where_clause = ""
# if t1p != 'None' and t1f !='None':
# where_clause = f" where {t1p} and {t1f}"
# elif t1p != 'None':
# where_clause = f" where {t1p}"
# elif t1f != 'None':
# where_clause = f" where {t1f}"
# if format in ['parquet', 'orc', 'csv']:
# path = table
# spark.read.format(format).load(path).createOrReplaceTempView("table")
# sql += where_clause
#
# result = spark.sql(sql)
# return result
# elif format == "hive":
# print("----todo---hive--")
# return 0
def valid_pk_only_in_one_table(spark, format, table1, table2, table1_partition, table2_partition, pk,
exclude_column, include_column, filter, output_path, output_format):
"""valid PK(s) only in one table"""
if format in ['parquet', 'orc', 'csv']:
# load table1
load_table(spark, format, table1, table1_partition, pk, include_column, filter, "table1")
# load table2
load_table(spark, format, table2, table2_partition, pk, include_column, filter, "table2")
sql = f"select {pk} from table1 except select {pk} from table2"
result = spark.sql(sql)
return result
elif format == "hive":
sql1 = f"select {pk} from {table1} "
sql2 = f"select {pk} from {table2} "
if any(cond != 'None' for cond in [table1_partition,filter]):
where_clause = ' where ' + ' and '.join(x for x in [table1_partition, filter] if x != 'None')
sql1 += where_clause
if any(cond != 'None' for cond in [table2_partition,filter]):
where_clause = ' where ' + ' and '.join(x for x in [table2_partition, filter] if x != 'None')
sql2 += where_clause
sql = sql1 + " except " + sql2
result = spark.sql(sql)
return result
return
def get_cols_diff_with_same_pk(spark, format, table1_name, table2_name, pk, table1_partition, table2_partition, filter, included_columns, excluded_columns, precision):
if format in ['parquet', 'orc', 'csv']:
pk_list = [i.strip() for i in pk.split(",")]
included_columns_list = [i.strip() for i in included_columns.split(",")]
excluded_columns_list = [e.strip() for e in excluded_columns.split(",")]
select_columns = [f't1.{p}' for p in pk.split(',')] + [f't1.{c} as t1_{c}, t2.{c} as t2_{c}' for c in included_columns_list if
c not in excluded_columns_list]
sql = f"""
SELECT {', '.join(select_columns)}
FROM table1 t1
FULL OUTER JOIN table2 t2 ON {' AND '.join([f't1.{c} = t2.{c}' for c in pk_list])}
WHERE ({' or '.join([f't1.{c} <> t2.{c}' for c in included_columns_list if c not in excluded_columns_list])} )
"""
if table1_partition != 'None':
table1_partition = [p.strip() for p in table1_partition.split("and")]
sql += ' AND ( ' + ' AND '.join([f't1.{p} ' for p in table1_partition]) + ' )'
if filter != 'None':
filters = [f.strip() for f in filter.split("and")]
sql += ' AND ( ' + ' AND '.join([f't1.{f} ' for f in filters]) + ' )'
# Execute the query and return the result
result = spark.sql(sql)
return result
elif format == "hive":
# todo: convert nested type to string using udf
pk_list = [i.strip() for i in pk.split(",")]
included_columns_list = [i.strip() for i in included_columns.split(",")]
excluded_columns_list = [e.strip() for e in excluded_columns.split(",")]
@F.udf(returnType=StringType())
def map_to_string(data):
# Sort the keys and values in the map
sorted_data = sorted(data.items(), key=lambda x: x[0]) if isinstance(data, dict) else sorted(
[(k, sorted(v)) for k, v in data.items()], key=lambda x: x[0])
return str(dict(sorted_data))
table_DF1 = load_table(spark, format, table1_name, table1_partition, pk, included_columns, filter, "table1")
table_DF2 = load_table(spark, format, table2_name, table2_partition, pk, included_columns, filter, "table2")
if included_columns == 'all':
included_columns_list = list(set(table_DF1.columns) - set(excluded_columns_list) - set(pk_list))
joined_table = table_DF1.alias("t1").join(table_DF2.alias("t2"), pk_list)
map_cols = []
cond = []
for c in table_DF1.schema.fields:
if (any(fnmatch.fnmatch(c.dataType.simpleString(), pattern) for pattern in
['*map*'])):
map_cols.append(c.name)
normal_cols = list(set(table_DF1.columns) - set(map_cols))
for c in normal_cols:
cond.append(col("t1." + c) != col("t2." + c))
for c in map_cols:
cond.append(map_to_string(col("t1." + c)) != map_to_string(col("t2." + c)))
normal_columns_list = [(when(col('t1.' + c) != col('t2.' + c), col('t1.' + c)).otherwise('').alias('t1_' + c),
when(col('t2.' + c) != col('t1.' + c), col('t2.' + c)).otherwise('').alias('t2_' + c)) for c in
normal_cols if
c not in excluded_columns_list and c not in pk_list]
map_columns_list = [(when(map_to_string(col('t1.' + c)) != map_to_string(col('t2.' + c)), map_to_string(col('t1.' + c))).otherwise('').alias('t1_' + c),
when(map_to_string(col('t2.' + c)) != map_to_string(col('t1.' + c)), map_to_string(col('t2.' + c))).otherwise('').alias('t2_' + c))
for c in
map_cols if
c not in excluded_columns_list]
select_columns_list = normal_columns_list + map_columns_list
##flatten select_columns_list
select_columns_flattened_list = [select_column for sublist in select_columns_list for select_column in sublist]
select_columns = [col('t1.' + p) for p in pk.split(',')] + select_columns_flattened_list
result_table = joined_table.select(select_columns).where(reduce(lambda a, b: a | b,cond))
return result_table
def load_table(spark, format, table, table_partition, pk, include_column, filter, view_name):
if format in ['parquet', 'orc', 'csv']:
# select column clause
cols = '*' if include_column is None else include_column
# cols = cols if e is None else cols + f", EXCEPT ({e}) "
sql = f"select {pk},{cols} from {view_name}"
# where clause
where_clause = ""
path = table
if table_partition != 'None' and filter != 'None':
where_clause = f" where {table_partition} and {filter}"
elif table_partition != 'None':
where_clause = f" where {table_partition}"
# partition clause should be in real order as data path
# path += partition_to_path(t1p)
elif filter != 'None':
where_clause = f" where {filter}"
spark.read.format(format).load(path).createOrReplaceTempView(view_name)
sql += where_clause
result = spark.sql(sql)
return result
elif format == "hive":
if include_column in ['None', 'all']:
sql = f"select * from {table} "
else:
# select_column = [include_column.strip() for include_column in i.split(",") if
# i not in excluded_columns_list]
# select_column_str = select_column
sql = f"select {pk},{include_column} from {table} "
if any(cond != 'None' for cond in [table_partition, filter]):
where_clause = ' where ' + ' and '.join(x for x in [table_partition, filter] if x != 'None')
sql += where_clause
result = spark.sql(sql)
return result
def partition_to_path(partition_str, path):
partition = {}
if partition_str:
partition_items = partition_str.split("and")
partition = dict(item.split("=") for item in partition_items)
partition_path = "/".join([f"{col}={val}" for col, val in partition.items()])
return f"{path}/{partition_path}".replace(" ", "")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--format',
type=str,
help='The format of tables')
parser.add_argument('--table1',
type=str,
help='table1')
parser.add_argument('--table2',
type=str,
help='table2')
parser.add_argument('--table1_partition',
type=str,
help='table1 partition')
parser.add_argument('--table2_partition',
type=str,
help='table2 partition')
parser.add_argument('--pk',
type=str,
help='primary key')
parser.add_argument('--exclude_column',
type=str,
help='Exclude column option')
parser.add_argument('--include_column',
type=str,
help='Include column option')
parser.add_argument('--filter',
type=str,
help='Condition to filter rows')
parser.add_argument('--output_path',
type=str,
help='Output directory')
parser.add_argument('--output_format',
type=str,
help='Output format, default is parquet')
parser.add_argument('--precision',
type=int,
help='Precision, default is 4')
args = parser.parse_args()
sc = SparkContext(appName='data-validation')
spark = SparkSession(sc)
validation(spark, args) | spark-rapids-tools-dev | data_validation/src/spark_rapids_validation_tool/validation_scripts/dataset_validation.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pyspark import SparkContext
from pyspark.sql import SparkSession, functions as F
from pyspark.sql.functions import col, min, max, avg, stddev, countDistinct, when, asc, round
import fnmatch
from pyspark.sql.types import *
def validation(spark, args):
if not valid_input(spark,args):
print('|--Please Check The Inputs --|')
return
result = top_level_metadata(spark, args.format, args.table1, args.table2, args.table1_partition, args.table2_partition, args.filter)
print('|--Top Level Metadata Info--|')
print(result.show())
result = metrics_metadata(spark, args.format, args.table1, args.table2, args.table1_partition,
args.table2_partition, args.pk, args.include_column, args.exclude_column, args.filter, args.precision)
if result.count() == 0:
print(f'|--Table {args.table1} and Table {args.table2} has identical metadata info--|')
print(result.show())
else:
print('|--Metadata Diff Info--|')
print(result.show())
save_result(result, args.output_path, args.output_format)
print('|--Run Metadata Validation Success--|')
def save_result(df, path, output_format):
if path != 'None':
df.write.mode("overwrite").format(output_format).save(path)
def valid_input(spark, args):
"""
Check the input is valida for matadata validation tool
1- valid table
2- valid included column
3- check format supported
"""
if not valid_table(spark, args):
return False
if not valid_metadata_included_column(spark, args):
return False
if args.format != 'hive':
print('|--Currently only support hive format--|')
return True
def valid_table(spark, args):
"""
Check if the tables exist
"""
if not spark._jsparkSession.catalog().tableExists(args.table1):
print(f'|--Table {args.table1} does not exist!--|')
return False
if not spark._jsparkSession.catalog().tableExists(args.table2):
print(f'|--Table {args.table2} does not exist!--|')
return False
return True
def valid_metadata_included_column(spark, args):
"""
Check if the included column valid
"""
if args.include_column in ['None', 'all']:
return True
table_DF = load_table(spark, args.format, args.table1, args.table1_partition, args.pk, args.include_column, args.filter, "")
excluded_columns_list = [e.strip() for e in args.exclude_column.split(",")]
verify_column = [i.strip() for i in args.include_column.split(",") if i not in excluded_columns_list]
verify_DF = table_DF.select(verify_column)
for c in verify_DF.schema.fields:
# here only excluded 'date' because it will raise exception, we also should excluded str/map/nested
if(any(fnmatch.fnmatch(c.dataType.simpleString(), pattern) for pattern in
['*date*'])):
print(f'|--Unsupported metadata included data type: {c.dataType.simpleString()} for column: {c}--|')
return False
return True
def top_level_metadata(spark, format, table1, table2, table1_partition, table2_partition, filter):
"""
Check whether the columns number and row count could match for table1 and table2
"""
if format in ['parquet', 'orc', 'csv']:
print('todo')
elif format == "hive":
results = []
table_confs = [(table1,table1_partition), (table2, table2_partition)]
for (table_name,partition) in table_confs:
sql = f'select * from {table_name}'
if any(cond != 'None' for cond in [partition, filter]):
where_clause = ' where ' + ' and '.join(x for x in [partition, filter] if x != 'None')
sql += where_clause
df = spark.sql(sql)
row_count = df.count()
col_count = len(df.columns)
results.append((table_name, row_count, col_count))
resultsDF = spark.createDataFrame(results, ["TableName", "RowCount", "ColumnCount"])
return resultsDF
def generate_metric_df(spark, table_DF, include_column, exclude_column, table):
"""
Return the metrics dataframe for table, the return dataframe should be like:
+-----------+------------+------------+------------+-------------------+-------------------+
|Column Name| mintable1| maxtable1| avgtable1| stddevtable1|countDistincttable1|
+-----------+------------+------------+------------+-------------------+-------------------+
| col1| 1.0| 11.0| 6.0| 3.3| 11.0|
| col2| 1.1| 9.8| 5.9| 4.5| 2.0|
| ...
| coln| 1.3| 9.3| 5.0| 3.2| 6.0|
+-----------+------------+------------+------------+-------------------+-------------------+
"""
@F.udf(returnType=StringType())
def map_to_string(data):
# Sort the keys and values in the map
sorted_data = sorted(data.items(), key=lambda x: x[0]) if isinstance(data, dict) else sorted(
[(k, sorted(v)) for k, v in data.items()], key=lambda x: x[0])
return str(dict(sorted_data))
result = None
agg_functions = [min, max, avg, stddev, countDistinct]
# if not specified any included_columns, then get all numeric cols and string and map cols
excluded_columns_list = [e.strip() for e in exclude_column.split(",")]
metrics_cols = [i.strip() for i in include_column.split(",") if i not in excluded_columns_list]
if include_column in ['None', 'all']:
metrics_cols = [c.name for c in table_DF.schema.fields if
any(fnmatch.fnmatch(c.dataType.simpleString(), pattern) for pattern in ['*int*', '*decimal*', '*float*', '*double*', 'string', '*map*'])]
map_metrics_cols = [c.name for c in table_DF.schema.fields if
any(fnmatch.fnmatch(c.dataType.simpleString(), pattern) for pattern in ['*map*'])]
normal_metrics_cols = list(set(metrics_cols) - set(map_metrics_cols))
for col in normal_metrics_cols:
dfc = spark.createDataFrame(([col],), ["ColumnName"])
table1_agg = table_DF.select(
[f(col).alias(f.__name__ + table) for f in
agg_functions])
tmp_df = dfc.join(table1_agg)
if result is None:
result = tmp_df
else:
result = result.union(tmp_df)
for col in map_metrics_cols:
dfc = spark.createDataFrame(([col],), ["ColumnName"])
table1_agg = table_DF.select(
[f(map_to_string(col)).alias(f.__name__ + table) for f in
agg_functions])
tmp_df = dfc.join(table1_agg)
if result is None:
result = tmp_df
else:
result = result.union(tmp_df)
return result
def metrics_metadata(spark, format, table1, table2, table1_partition, table2_partition,
pk, include_column, exclude_column, filter, precision):
"""
The different metadata of each column in each table(min/max/avg/stddev/count_distinct):
(If the values are identical, then a specific cell is empty, aka NULL. So we only show differences),
the result dataframe should be:
|--Metadata Diff Info--|
+----------+-----+-----+-----+-----+-----+-----+--------+--------+-----------+-----------+
|ColumnName|min_A|min_B|max_A|max_B|avg_A|avg_B|stddev_A|stddev_B|countdist_A|countdist_B|
+----------+-----+-----+-----+-----+-----+-----+--------+--------+-----------+-----------+
| col1| | | 12.0| 11.0| 6.09| 6.0| 3.48| 3.32| | |
| col3| | | | | 5.09| 5.18| 3.18| 3.06| | |
| col4| | | | | | | | | 4| 5|
| col6| | | | | | | | | 10| 11|
| col7| | | | | | | | | 10| 11|
| col8|12.34|12.33| | | | | | | 3| 4|
+----------+-----+-----+-----+-----+-----+-----+--------+--------+-----------+-----------+
"""
table1_DF = load_table(spark, format, table1, table1_partition, pk, include_column, filter, "")
table2_DF = load_table(spark, format, table2, table2_partition, pk, include_column, filter, "")
table_metric_df1 = generate_metric_df(spark, table1_DF, include_column, exclude_column, table1)
table_metric_df2 = generate_metric_df(spark, table2_DF, include_column, exclude_column, table2)
joined_table = table_metric_df1.alias("t1").join(table_metric_df2.alias("t2"), ["ColumnName"])
cond = (round("t1.min" + table1, precision) != round("t2.min" + table2, precision)) | \
(round("t1.max" + table1, precision) != round("t2.max" + table2, precision)) | \
(round("t1.avg" + table1, precision) != round("t2.avg" + table2, precision)) | \
(round("t1.stddev" + table1, precision) != round("t2.stddev" + table2, precision)) | \
(round("t1.countDistinct" + table1, precision) != round("t2.countDistinct" + table2, precision))
# apply condition on the joined table, return the final dataframe
result_table = joined_table.select("ColumnName",
when(round(col("t1.min"+table1), precision) != round(col("t2.min"+table2), precision), round(col("t1.min"+table1), precision)).otherwise('').alias("min_A"),
when(round(col("t1.min"+table1), precision) != round(col("t2.min"+table2), precision), round(col("t2.min"+table2), precision)).otherwise('').alias("min_B"),
when(round(col("t1.max"+table1), precision) != round(col("t2.max"+table2), precision), round(col("t1.max"+table1), precision)).otherwise('').alias("max_A"),
when(round(col("t1.max"+table1), precision) != round(col("t2.max"+table2), precision), round(col("t2.max"+table2), precision)).otherwise('').alias("max_B"),
when(round(col("t1.avg"+table1), precision) != round(col("t2.avg"+table2), precision), round(col("t1.avg"+table1), precision)).otherwise('').alias("avg_A"),
when(round(col("t1.avg"+table1), precision) != round(col("t2.avg"+table2), precision), round(col("t2.avg"+table2), precision)).otherwise('').alias("avg_B"),
when(round(col("t1.stddev"+table1), precision) != round(col("t2.stddev"+table2), precision), round(col("t1.stddev"+table1), precision)).otherwise('').alias("stddev_A"),
when(round(col("t1.stddev"+table1), precision) != round(col("t2.stddev"+table2), precision), round(col("t2.stddev"+table2), precision)).otherwise('').alias("stddev_B"),
when(round(col("t1.countDistinct"+table1), precision) != round(col("t2.countDistinct"+table2), precision), round(col("t1.countDistinct"+table1), precision)).otherwise('').alias("countdist_A"),
when(round(col("t1.countDistinct"+table1), precision) != round(col("t2.countDistinct"+table2), precision), round(col("t2.countDistinct"+table2), precision)).otherwise('').alias("countdist_B")
).where(cond).sort(asc("ColumnName"))
return result_table
def load_table(spark, format, table, table_partition, pk, include_column, filter, view_name):
"""
Load dataframe according to different format type
"""
if format in ['parquet', 'orc', 'csv']:
# select column clause
cols = '*' if include_column is None else include_column
# cols = cols if e is None else cols + f", EXCEPT ({e}) " only works on databricks
sql = f"select {pk},{cols} from {view_name}"
# where clause
where_clause = ""
path = table
if table_partition != 'None' and filter != 'None':
where_clause = f" where {table_partition} and {filter}"
elif table_partition != 'None':
where_clause = f" where {table_partition}"
# partition clause should be in real order as data path
# path += partition_to_path(t1p)
elif filter != 'None':
where_clause = f" where {filter}"
spark.read.format(format).load(path).createOrReplaceTempView(view_name)
sql += where_clause
result = spark.sql(sql)
return result
elif format == "hive":
cols = '*' if include_column is None or include_column == 'all' else include_column
sql = f"select {cols} from {table}"
# where clause
if any(cond != 'None' for cond in [table_partition,filter]):
where_clause = ' where ' + ' and '.join(x for x in [table_partition, filter] if x != 'None')
sql += where_clause
df = spark.sql(sql)
return df
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--format',
type=str,
help='The format of tables')
parser.add_argument('--table1',
type=str,
help='table1')
parser.add_argument('--table2',
type=str,
help='table2')
parser.add_argument('--table1_partition',
type=str,
help='table1 partition')
parser.add_argument('--table2_partition',
type=str,
help='table2 partition')
parser.add_argument('--pk',
type=str,
help='primary key')
parser.add_argument('--exclude_column',
type=str,
help='Exclude column option')
parser.add_argument('--include_column',
type=str,
help='Include column option')
parser.add_argument('--filter',
type=str,
help='Condition to filter rows')
parser.add_argument('--output_path',
type=str,
help='Output directory')
parser.add_argument('--output_format',
type=str,
help='Output format, default is parquet')
parser.add_argument('--precision',
type=int,
help='Precision, default is 4')
args = parser.parse_args()
sc = SparkContext(appName='metadata-validation')
spark = SparkSession(sc)
validation(spark, args) | spark-rapids-tools-dev | data_validation/src/spark_rapids_validation_tool/validation_scripts/metadata_validation.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CSP init file."""
from .csp import CspBase
from .dataproc import Dataproc as _ # noqa: F401
def new_csp(csp_type, args):
"""Create new CSP instance by CSP type."""
for cls in CspBase.__subclasses__():
if cls.is_csp(csp_type):
return cls(args)
raise Exception(f'unknown CSP type: {csp_type}')
| spark-rapids-tools-dev | data_validation/src/spark_rapids_validation_tool/csp/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CSP base class."""
import logging
from spark_rapids_validation_tool.utilities import run_cmd
logger = logging.getLogger('csp.cspbase')
class CspBase:
"""Base class for CSP object."""
@classmethod
def is_csp(cls, csp_name):
"""Test CSP class by name."""
return csp_name == 'cspbase'
@classmethod
def run_local_cmd(cls, cmd, check=True, capture=''):
"""Run command and capture output."""
return run_cmd(cmd, check, capture)
def __init__(self):
"""Init method."""
def get_nodes(self, node='all'):
"""Get cluster node address."""
raise NotImplementedError
def run_ssh_cmd(self, cmd, node, check=True, capture=''):
"""Run command on cluster node via ssh and check return code, capture output etc."""
raise NotImplementedError
def run_scp_cmd(self, src, dest, node):
"""Run scp command to copy file to cluster node."""
raise NotImplementedError
def submit_job(self, job):
"""Submit job to the cluster."""
raise NotImplementedError
| spark-rapids-tools-dev | data_validation/src/spark_rapids_validation_tool/csp/csp.py |
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CSP object for Dataproc."""
import logging
import os
import yaml
from .csp import CspBase
logger = logging.getLogger('csp.dataproc')
class Dataproc(CspBase):
"""Class for Dataproc."""
JOB_TYPE_PYTHON = 'python'
JOB_TYPE_NOTEBOOK = 'notebook'
JOB_TYPE_SPARK = 'spark'
JOB_TYPE_PYSPARK = 'pyspark'
@classmethod
def is_csp(cls, csp_name):
"""Test CSP class by name."""
return csp_name == 'dataproc'
def __init__(self, args):
"""Init method (required cluster & region in args)."""
super().__init__()
name = args.get('cluster', None)
region = args.get('region', None)
if not name or not region:
raise Exception('Invalid cluster or region for Dataproc')
self.name = name
self.region = region
self.nodes = {}
self.zone = None
def get_info(self):
"""Get cluster info."""
result = self.run_local_cmd(['gcloud', 'dataproc', 'clusters', 'describe', self.name,
f'--region={self.region}'], capture='stdout')
return yaml.safe_load(result)
def get_nodes(self, node='all'):
"""Get cluster node address."""
# node format: <all|master|workers|workers-n>
if not self.nodes:
info = self.get_info()
master = info.get('config', {}).get('masterConfig', {}).get('instanceNames')
if master:
self.nodes['master'] = master
else:
raise Exception("not found 'masterConfig' from cluster info")
workers = info.get('config', {}).get('workerConfig', {}).get('instanceNames')
if workers and len(workers) > 0:
self.nodes['workers'] = workers
else:
raise Exception('sorry, single node cluster (1 master, 0 workers) not supported')
zone_uri = info.get('config', {}).get('gceClusterConfig', {}).get('zoneUri')
if zone_uri:
self.zone = os.path.basename(zone_uri)
else:
raise Exception("not found 'zoneUri' from cluster info")
logger.debug('cluster nodes: %s from zone: %s', self.nodes, self.zone)
if not node or node == 'master':
# Return master node by default
return self.nodes['master']
if node == 'all':
# Return both master & worker nodes
nodes = []
for i in self.nodes.values():
nodes += i
return nodes
if node == 'workers':
# Return worker nodes
return self.nodes['workers']
# Node format: workers-n
node_type, index_str = node.split('-')
nodes = self.nodes.get(node_type)
if not nodes or int(index_str) >= len(nodes):
raise Exception(f"not found node: '{node}'")
return [nodes[int(index_str)]]
def run_ssh_cmd(self, cmd, node, check=True, capture=''):
"""Run command on cluster node via ssh and check return code, capture output etc."""
ssh_cmd = ['gcloud', 'compute', 'ssh', '--zone', self.zone, node, '--']
ssh_cmd.append(' '.join(['\'' + e + '\'' for e in cmd]))
return self.run_local_cmd(ssh_cmd, check, capture)
def run_scp_cmd(self, src, dest, node):
"""Run scp command to copy file to cluster node."""
return self.run_local_cmd(['gcloud', 'compute', 'scp', '--zone', self.zone, src, f'{node}:{dest}'])
def submit_job(self, job):
"""Submit job to the cluster."""
cmd = ['gcloud', 'dataproc', 'jobs', 'submit', job['type'], f'--cluster={self.name}',
f'--region={self.region}']
# Add job class
if 'class' in job:
cmd += ['--class', job['class']]
# Add job jars and properties
if 'jars' in job:
jars = job['jars']
if jars:
cmd += ['--jars', ','.join(jars)]
if 'properties' in job:
properties = job['properties']
if properties:
prop_items = []
for key, value in properties.items():
prop_items.append(f"{key}='{value}'")
cmd += ['--properties', ','.join(prop_items)]
# Add job file
if 'file' in job:
cmd.append(job['file'])
# Add job parameters
if 'parameters' in job and job['parameters']:
cmd += ['--'] + job['parameters']
# Capture stderr as job output to stderr
return self.run_local_cmd(cmd, capture='stderr')
| spark-rapids-tools-dev | data_validation/src/spark_rapids_validation_tool/csp/dataproc.py |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import sphinx_rtd_theme
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'torchfort'
copyright = '2023, NVIDIA Corporation'
author = 'NVIDIA Corporation'
# The full version, including alpha/beta/rc tags
release = '2023'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'breathe',
'sphinx.ext.mathjax',
'sphinx_tabs.tabs',
'sphinxfortran.fortran_domain',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
highlight_language = 'cpp'
def setup(app):
app.add_css_file('style.css')
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
breathe_projects = { "torchfort": "xml/" }
breathe_default_project = "torchfort"
| TorchFort-master | docs/conf.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import torch
from torch import nn
import torch.nn.functional as F
def weight_init(model, scale=0.02):
with torch.no_grad():
for m in model.modules():
if isinstance(m, nn.Linear):
sqrtk = math.sqrt(1./float(m.weight.shape[1]))
nn.init.uniform_(m.weight, a=-sqrtk, b=sqrtk)
if m.bias is not None:
m.bias.data.zero_()
class PolicyFunc(nn.Module):
def __init__(self, hidden_features=128):
super(PolicyFunc, self).__init__()
layers = [nn.Linear(in_features = 4,
out_features = hidden_features,
bias=True),
nn.ReLU(),
nn.Linear(in_features = hidden_features,
out_features = hidden_features // 2,
bias=True),
nn.ReLU(),
nn.Linear(in_features = hidden_features // 2,
out_features = 1,
bias=True),
nn.Tanh()]
self.fwd = nn.Sequential(*layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.fwd(x)
class ValueFunc(nn.Module):
def __init__(self, hidden_features=128):
super(ValueFunc, self).__init__()
layers = [nn.Linear(in_features = 5,
out_features = hidden_features,
bias=True),
nn.ReLU(),
nn.Linear(in_features = hidden_features,
out_features = hidden_features // 2,
bias=True),
nn.ReLU(),
nn.Linear(in_features = hidden_features // 2,
out_features = 1,
bias=True)]
self.fwd = nn.Sequential(*layers)
def forward(self, s: torch.Tensor, a: torch.Tensor) -> torch.Tensor:
x = torch.cat([s, a], dim=1)
return self.fwd(x)
| TorchFort-master | examples/cpp/cart_pole/python/models.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import argparse as ap
import math
import numpy as np
from tqdm import tqdm
import torch
from functools import partial
from torch import nn
import torch.nn.functional as F
from models import PolicyFunc
from PyEnvironments import CartPoleEnv
# rendering stuff
import pygame
from pygame import gfxdraw
from moviepy.editor import ImageSequenceClip
# the implementation of the cartpole renderer was taken from
# the OpenAI gym repo: https://github.com/openai/gym/blob/master/gym/envs/classic_control/cartpole.py
class Renderer(object):
def __init__(self, x_threshold=5, length=0.5):
self.screen_width = 600
self.screen_height = 400
self.screen = None
self.clock = None
self.x_threshold = int(x_threshold)
self.length = length
def render(self, state):
if self.screen is None:
pygame.init()
self.screen = pygame.Surface((self.screen_width, self.screen_height))
if self.clock is None:
self.clock = pygame.time.Clock()
world_width = self.x_threshold * 2
scale = self.screen_width / world_width
polewidth = 10.0
polelen = scale * (2 * self.length)
cartwidth = 50.0
cartheight = 30.0
x = state
self.surf = pygame.Surface((self.screen_width, self.screen_height))
self.surf.fill((255, 255, 255))
l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2
axleoffset = cartheight / 4.0
cartx = x[0] * scale + self.screen_width / 2.0 # MIDDLE OF CART
carty = 100 # TOP OF CART
cart_coords = [(l, b), (l, t), (r, t), (r, b)]
cart_coords = [(c[0] + cartx, c[1] + carty) for c in cart_coords]
gfxdraw.aapolygon(self.surf, cart_coords, (0, 0, 0))
gfxdraw.filled_polygon(self.surf, cart_coords, (0, 0, 0))
l, r, t, b = (
-polewidth / 2,
polewidth / 2,
polelen - polewidth / 2,
-polewidth / 2,
)
pole_coords = []
for coord in [(l, b), (l, t), (r, t), (r, b)]:
coord = pygame.math.Vector2(coord).rotate_rad(-x[2])
coord = (coord[0] + cartx, coord[1] + carty + axleoffset)
pole_coords.append(coord)
gfxdraw.aapolygon(self.surf, pole_coords, (202, 152, 101))
gfxdraw.filled_polygon(self.surf, pole_coords, (202, 152, 101))
gfxdraw.aacircle(
self.surf,
int(cartx),
int(carty + axleoffset),
int(polewidth / 2),
(129, 132, 203),
)
gfxdraw.filled_circle(
self.surf,
int(cartx),
int(carty + axleoffset),
int(polewidth / 2),
(129, 132, 203),
)
gfxdraw.hline(self.surf, 0, self.screen_width, carty, (0, 0, 0))
self.surf = pygame.transform.flip(self.surf, False, True)
self.screen.blit(self.surf, (0, 0))
return np.transpose(
np.array(pygame.surfarray.pixels3d(self.screen)), axes=(1, 0, 2)
)
def main(args):
# set seed
torch.manual_seed(666)
torch.cuda.manual_seed(666)
# script model:
device = torch.device("cuda:0")
# parameters
batch_size = 1
# policy model
pmodel = torch.jit.load(args.policy_checkpoint)
# env
cenv = CartPoleEnv()
# renderer
renderer = Renderer()
# reset and get initial state
state = cenv.reset()
frames = []
for step in tqdm(range(args.num_steps)):
stens = torch.Tensor(state).unsqueeze(0).to(device)
atens = pmodel(stens)
action = atens.item()
# render state
imarray = renderer.render(state)
frames.append(imarray)
# take step
state_new, reward, terminate = cenv.step(action)
if terminate:
break
state = state_new
# print number of steps
print(f"Episode finished with {step} steps")
video = ImageSequenceClip(frames, fps=50)
video.write_gif(os.path.join(args.output_path, "cartpole.gif"))
video.write_videofile(os.path.join(args.output_path, "cartpole.mp4"))
if __name__ == "__main__":
parser = ap.ArgumentParser()
parser.add_argument("--policy_checkpoint", type=str, help="Checkpoint for policy to restore", required=True)
parser.add_argument("--output_path", type=str, help="Directory where to store the generated videos", required=True)
parser.add_argument("--num_steps", type=int, default=500, help="Number of steps to run")
args = parser.parse_args()
main(args)
| TorchFort-master | examples/cpp/cart_pole/python/visualize.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse as ap
import math
import torch
from functools import partial
from torch import nn
import torch.nn.functional as F
from models import weight_init, PolicyFunc, ValueFunc
def main(args):
# set seed
torch.manual_seed(666)
torch.cuda.manual_seed(666)
# script model:
device = torch.device("cuda:0")
# parameters
batch_size = 64
# policy model
pmodel = PolicyFunc(hidden_features=args.num_hidden_features).to(device)
weight_init(pmodel)
jpmodel = torch.jit.script(pmodel)
inp = torch.ones((batch_size, 4), dtype=torch.float32, device=device)
out = jpmodel(inp)
print("Policy model:", pmodel)
print("Policy model output shape:", out.shape)
torch.jit.save(jpmodel, "policy.pt")
# value model
qmodel = ValueFunc(hidden_features=args.num_hidden_features).to(device)
weight_init(qmodel)
jqmodel = torch.jit.script(qmodel)
inp_a = torch.ones((batch_size, 1), dtype=torch.float32, device=device)
out = jqmodel(inp, inp_a)
print("Value model:", qmodel)
print("Value model output shape:", out.shape)
torch.jit.save(jqmodel, "value.pt")
if __name__ == "__main__":
parser = ap.ArgumentParser()
parser.add_argument("--num_hidden_features", type=int, default=128, help="Number of hidden features")
args = parser.parse_args()
main(args)
| TorchFort-master | examples/cpp/cart_pole/python/initialize_models.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 3, padding=1, padding_mode="circular")
def forward(self, x):
return self.conv1(x)
def main():
# Create model
model = Net()
print("FCN model:", model)
# Move model to GPU, JIT, and save
model.to("cuda")
model_jit = torch.jit.script(model)
model_jit.save("fcn_torchscript.pt")
if __name__ == "__main__":
main()
| TorchFort-master | examples/fortran/simulation/generate_fcn_model.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse as ap
import glob
import h5py as h5
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation, PillowWriter
import numpy as np
import os
import time
def main(args):
global infiles, labelfiles, outfiles, artists
print(f"processing files in {args.input_path}...")
infiles = sorted(glob.glob(os.path.join(args.input_path, "input_0*")))
labelfiles = sorted(glob.glob(os.path.join(args.input_path, "label_0*")))
outfiles = sorted(glob.glob(os.path.join(args.input_path, "output_0*")))
artists = []
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
ax1.set_title(r"$u$")
ax1.set_xlabel(r"$x$")
ax1.set_ylabel(r"$y$")
ax2.set_title(r"$\nabla \cdot \mathbf{a}u$ (true)")
ax2.set_xlabel(r"$x$")
ax2.set_ylabel(r"$y$")
ax3.set_title(r"$\nabla \cdot \mathbf{a}u$ (prediction)")
ax3.set_xlabel(r"$x$")
ax3.set_ylabel(r"$y$")
ax4.set_title(r"1D sample along dotted line")
ax4.set_xlabel(r"$x$")
with h5.File(infiles[0], 'r') as f:
idata = f["data"][...]
with h5.File(labelfiles[0], 'r') as f:
ldata = f["data"][...]
with h5.File(outfiles[0], 'r') as f:
odata = f["data"][...]
c = ax1.contourf(idata)
artists += c.collections
c = ax1.hlines(idata.shape[0]//2 + 1, 0, idata.shape[1]-1, colors="black", linestyles="dashed")
artists.append(c)
c = ax2.contourf(ldata)
artists += c.collections
c = ax3.contourf(odata)
artists += c.collections
c, = ax4.plot(idata[idata.shape[0]//2 + 1,:], 'k')
artists.append(c)
c, = ax4.plot(ldata[idata.shape[0]//2 + 1,:], 'b')
artists.append(c)
c, = ax4.plot(odata[idata.shape[0]//2 + 1,:], 'g.')
artists.append(c)
fig.tight_layout()
def animate(i):
global infiles, labelfiles, outfiles, artists
for c in artists:
c.remove()
artists.clear()
with h5.File(infiles[i], 'r') as f:
idata = f["data"][...]
with h5.File(labelfiles[i], 'r') as f:
ldata = f["data"][...]
with h5.File(outfiles[i], 'r') as f:
odata = f["data"][...]
c = ax1.contourf(idata)
artists += c.collections
c = ax1.hlines(idata.shape[0]//2 + 1, 0, idata.shape[1]-1, colors="black", linestyles="dashed")
artists.append(c)
c = ax2.contourf(ldata)
artists += c.collections
c = ax3.contourf(odata)
artists += c.collections
c, = ax4.plot(idata[idata.shape[0]//2 + 1,:], 'k')
artists.append(c)
c, = ax4.plot(ldata[idata.shape[0]//2 + 1,:], 'b')
artists.append(c)
c, = ax4.plot(odata[idata.shape[0]//2 + 1,:], 'g.')
artists.append(c)
ani = FuncAnimation(fig, animate, frames=len(infiles), repeat=False, interval=1)
os.makedirs(args.output_path, exist_ok=True)
def log(i, n):
print(f"processed {i+1} of {n} frames..." )
ani.save(os.path.join(args.output_path, "validation_results.gif"), writer=PillowWriter(fps=5), progress_callback=lambda i, n: log(i,n))
print(f"video written to {os.path.join(args.output_path, 'validation_results.gif')}...")
if __name__ == "__main__":
parser = ap.ArgumentParser()
parser.add_argument("--input_path", type=str, help="Directory containing validation hdf5 files", required=True)
parser.add_argument("--output_path", type=str, help="Directory to store the generated videos", required=True)
args = parser.parse_args()
main(args)
| TorchFort-master | examples/fortran/simulation/visualize.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import os
import signal
import re
from glob import iglob
import argparse as ap
import subprocess as sp
import wandb
import ruamel.yaml as ry
import time
def signal_handler(sig, frame):
print("Terminating wandb watcher")
wandb.finish()
sys.exit(0)
def spin_wait(cond_func, num_retries, timeout):
is_ok = True
for _ in range(num_retries):
if cond_func():
break
else:
time.sleep(int(timeout / float(num_retries)))
else:
is_ok = True
return is_ok
def find_config(logdir, config_table, model_name, parser):
# check for all configs we have not visited yet
config_list = [x for x in iglob(os.path.join(logdir, "*.yaml")) if x not in config_table.values()]
# check the config files for the right config
config = None
config_file = None
for config_file in config_list:
with open(config_file, 'r') as f:
model_config = parser.load(f)
if model_config["identifier"] == model_name:
del model_config["identifier"]
config = model_config
break
else:
continue
return config, config_file
def main(args):
# global parameters
num_retries = 10
# get logging directory:
logging_dir = os.getenv("TORCHFORT_LOGDIR", "")
if not logging_dir:
raise IOError(f"TORCHFORT_LOGDIR is unset.")
is_ok = spin_wait(lambda: os.path.isdir(logging_dir), num_retries, args.timeout)
if not is_ok:
raise IOError(f"logging directory {logging_dir} does not exist.")
# we need the parser in order to read torchfort configs
yaml = ry.YAML(typ="safe", pure=True)
# if logfile path not specified, exit here:
num_retries = 10
logfile_exists = True
errmsg = None
logfilename = os.path.join(logging_dir, "torchfort.log")
# the file might not have been created yet, so we retry a couple of times
is_ok = spin_wait(lambda: os.path.isfile(logfilename), num_retries, args.timeout)
if not is_ok:
raise IOError(f"logfile {logfilename} not found. Check your paths or try to increase the timeout variable.")
print(f"Watching file {logfilename} for updates.", flush=True)
# init wandb
wandb.init(job_type="train",
dir=args.wandb_dir,
project=args.wandb_project,
group=args.wandb_group,
entity=args.wandb_entity,
name=args.run_tag)
# install signal handler
signal.signal(signal.SIGINT, signal_handler)
# regex pattern we are trying to match
pattern = re.compile(r"^TORCHFORT::WANDB: model: (.*?), step: (\d{1,}), (.*?): (.*)")
# parse the logfile
# we keep track of new models popping up:
config_table = {}
with open(logfilename) as logfile:
# get the start time, needed for computing timeouts
start_time = time.time()
# store the position of the parser
last_pos = logfile.tell()
# infinite parsing loop
while True:
# read the line
line = logfile.readline()
# check if line is empty:
if line:
# check if line was only partially read,
# rewind in that case
if not (os.linesep in line):
logfile.seek(last_pos)
continue
# preprocess the line
line = line.replace(os.linesep, "").strip()
start_time = time.time()
lmatch = pattern.match(line)
# make this fail safe, in case there are some
# race conditions
try:
if lmatch is not None:
# log to wandb
mgroups = lmatch.groups()
modelname = mgroups[0].strip()
step = int(mgroups[1].strip())
metric_name = mgroups[2].strip()
value = float(mgroups[3].strip())
wandb.log({f"{modelname} {metric_name}": value, f"{modelname} {metric_name} step": step})
# remember the current position in the file
last_pos = logfile.tell()
# if the model string was not observed yet, find the new config:
if modelname not in config_table.keys():
if args.verbose:
print(f"New model {modelname} found in logfile, looking up config")
config, filename = find_config(logging_dir, config_table, modelname, yaml)
if config is not None:
if args.verbose:
print(f"Config file for model {modelname} found, logging to wandb")
config_table[modelname] = filename
wandb.config.update({modelname: config})
else:
if args.verbose:
print(f"No configuration for model {modelname} found yet")
except:
continue
else:
end_time = time.time()
if (end_time - start_time) > args.timeout:
print("Timeout reached, quitting wandb watcher process.", flush=True)
wandb.finish()
return
if args.polling_interval > 0:
time.sleep(args.polling_interval)
if __name__ == "__main__":
parser = ap.ArgumentParser()
parser.add_argument("--wandb_dir", type=str, default=None, help="Directory where wandb stores its intermediates.")
parser.add_argument("--wandb_group", type=str)
parser.add_argument("--wandb_project", type=str)
parser.add_argument("--wandb_entity", type=str)
parser.add_argument("--run_tag", type=str)
parser.add_argument("--polling_interval", type=int, default=5, help="Polling interval in seconds with which the file will be polled for updates.")
parser.add_argument("--timeout", type=int, default=60, help="Timeout in seconds.")
parser.add_argument("--verbose", action='store_true')
args = parser.parse_args()
# launch main app
main(args)
| TorchFort-master | src/python/wandb_helper.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""Commands used to automate testing gdb pretty printers.
This script is part of a larger framework to test gdb pretty printers. It
runs the program, detects test cases, checks them, and prints results.
See gdb_pretty_printer_test.sh.cpp on how to write a test case.
"""
from __future__ import print_function
import re
import gdb
test_failures = 0
class CheckResult(gdb.Command):
def __init__(self):
super(CheckResult, self).__init__(
"print_and_compare", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
try:
# Stack frame is:
# 0. StopForDebugger
# 1. ComparePrettyPrintToChars or ComparePrettyPrintToRegex
# 2. TestCase
compare_frame = gdb.newest_frame().older()
testcase_frame = compare_frame.older()
test_loc = testcase_frame.find_sal()
# Use interactive commands in the correct context to get the pretty
# printed version
value_str = self._get_value_string(compare_frame, testcase_frame)
# Ignore the convenience variable name and newline
value = value_str[value_str.find("= ") + 2:-1]
gdb.newest_frame().select()
expectation_val = compare_frame.read_var("expectation")
if "PrettyPrintToRegex" in compare_frame.name():
check_literal = expectation_val.string()
test_fails = not re.match(check_literal, value)
else:
check_literal_string = expectation_val.string(encoding="utf-8")
check_literal = str(check_literal_string.encode("utf-8"))
test_fails = value != check_literal
if test_fails:
global test_failures
print("FAIL: " + test_loc.symtab.filename +
":" + str(test_loc.line))
print("GDB printed:")
print(" " + value)
print("Value should match:")
print(" " + check_literal)
test_failures += 1
else:
print("PASS: " + test_loc.symtab.filename +
":" + str(test_loc.line))
except RuntimeError as e:
# At this point, lots of different things could be wrong, so don't try to
# recover or figure it out. Don't exit either, because then it's
# impossible debug the framework itself.
print("FAIL: Something is wrong in the test framework.")
print(str(e))
test_failures += 1
def _get_value_string(self, compare_frame, testcase_frame):
compare_frame.select()
if "ComparePrettyPrint" in compare_frame.name():
return gdb.execute("p value", to_string=True)
value_str = str(compare_frame.read_var("value"))
clean_expression_str = value_str.strip("'\"")
testcase_frame.select()
return gdb.execute("p " + clean_expression_str, to_string=True)
def exit_handler(event=None):
global test_failures
if test_failures:
print("FAILED %d cases" % test_failures)
exit(test_failures)
# Start code executed at load time
# Disable terminal paging
gdb.execute("set height 0")
gdb.execute("set python print-stack full")
test_failures = 0
CheckResult()
test_bp = gdb.Breakpoint("StopForDebugger")
test_bp.enabled = True
test_bp.silent = True
test_bp.commands = "print_and_compare\ncontinue"
# "run" won't return if the program exits; ensure the script regains control.
gdb.events.exited.connect(exit_handler)
gdb.execute("run")
# If the program didn't exit, something went wrong, but we don't
# know what. Fail on exit.
test_failures += 1
exit_handler(None)
| libcudacxx-main | libcxx/test/pretty_printers/gdb_pretty_printer_test.py |
import sys
import os
import socket
import stat
# Ensure that this is being run on a specific platform
assert sys.platform.startswith('linux') or sys.platform.startswith('darwin') \
or sys.platform.startswith('cygwin') or sys.platform.startswith('freebsd') \
or sys.platform.startswith('netbsd')
def env_path():
ep = os.environ.get('LIBCXX_FILESYSTEM_DYNAMIC_TEST_ROOT')
assert ep is not None
ep = os.path.realpath(ep)
assert os.path.isdir(ep)
return ep
env_path_global = env_path()
# Make sure we don't try and write outside of env_path.
# All paths used should be sanitized
def sanitize(p):
p = os.path.realpath(p)
if os.path.commonprefix([env_path_global, p]):
return p
assert False
"""
Some of the tests restrict permissions to induce failures.
Before we delete the test environment, we have to walk it and re-raise the
permissions.
"""
def clean_recursive(root_p):
if not os.path.islink(root_p):
os.chmod(root_p, 0o777)
for ent in os.listdir(root_p):
p = os.path.join(root_p, ent)
if os.path.islink(p) or not os.path.isdir(p):
os.remove(p)
else:
assert os.path.isdir(p)
clean_recursive(p)
os.rmdir(p)
def init_test_directory(root_p):
root_p = sanitize(root_p)
assert not os.path.exists(root_p)
os.makedirs(root_p)
def destroy_test_directory(root_p):
root_p = sanitize(root_p)
clean_recursive(root_p)
os.rmdir(root_p)
def create_file(fname, size):
with open(sanitize(fname), 'w') as f:
f.write('c' * size)
def create_dir(dname):
os.mkdir(sanitize(dname))
def create_symlink(source, link):
os.symlink(sanitize(source), sanitize(link))
def create_hardlink(source, link):
os.link(sanitize(source), sanitize(link))
def create_fifo(source):
os.mkfifo(sanitize(source))
def create_socket(source):
sock = socket.socket(socket.AF_UNIX)
sanitized_source = sanitize(source)
# AF_UNIX sockets may have very limited path length, so split it
# into chdir call (with technically unlimited length) followed
# by bind() relative to the directory
os.chdir(os.path.dirname(sanitized_source))
sock.bind(os.path.basename(sanitized_source))
if __name__ == '__main__':
command = " ".join(sys.argv[1:])
eval(command)
sys.exit(0)
| libcudacxx-main | libcxx/test/support/filesystem_dynamic_test_helper.py |
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
from argparse import ArgumentParser
import sys
def print_and_exit(msg):
sys.stderr.write(msg + '\n')
sys.exit(1)
def main():
parser = ArgumentParser(
description="Concatenate two files into a single file")
parser.add_argument(
'-o', '--output', dest='output', required=True,
help='The output file. stdout is used if not given',
type=str, action='store')
parser.add_argument(
'files', metavar='files', nargs='+',
help='The files to concatenate')
args = parser.parse_args()
if len(args.files) < 2:
print_and_exit('fewer than 2 inputs provided')
data = ''
for filename in args.files:
with open(filename, 'r') as f:
data += f.read()
if len(data) != 0 and data[-1] != '\n':
data += '\n'
assert len(data) > 0 and "cannot cat empty files"
with open(args.output, 'w') as f:
f.write(data)
if __name__ == '__main__':
main()
sys.exit(0)
| libcudacxx-main | libcxx/utils/cat_files.py |
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""
Generate a linker script that links libc++ to the proper ABI library.
An example script for c++abi would look like "INPUT(libc++.so.1 -lc++abi)".
"""
import argparse
import os
import sys
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--dryrun", help="Don't write any output",
action="store_true", default=False)
parser.add_argument("--rename", action="store_true", default=False,
help="Rename the output as input so we can replace it")
parser.add_argument("--input", help="Path to libc++ library", required=True)
parser.add_argument("--output", help="Path to libc++ linker script",
required=True)
parser.add_argument("libraries", nargs="+",
help="List of libraries libc++ depends on")
args = parser.parse_args()
# Use the relative path for the libc++ library.
libcxx = os.path.relpath(args.input, os.path.dirname(args.output))
# Prepare the list of public libraries to link.
public_libs = ['-l%s' % l for l in args.libraries]
# Generate the linker script contents.
contents = "INPUT(%s)" % ' '.join([libcxx] + public_libs)
if args.dryrun:
print("GENERATING SCRIPT: '%s' as file %s" % (contents, args.output))
return 0
# Remove the existing libc++ symlink if it exists.
if os.path.islink(args.output):
os.unlink(args.output)
# Replace it with the linker script.
with open(args.output, 'w') as f:
f.write(contents + "\n")
return 0
if __name__ == '__main__':
sys.exit(main())
| libcudacxx-main | libcxx/utils/gen_link_script.py |
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""
sym_extract - Extract and output a list of symbols from a shared library.
"""
from argparse import ArgumentParser
from libcxx.sym_check import extract, util
def main():
parser = ArgumentParser(
description='Extract a list of symbols from a shared library.')
parser.add_argument('library', metavar='shared-lib', type=str,
help='The library to extract symbols from')
parser.add_argument('-o', '--output', dest='output',
help='The output file. stdout is used if not given',
type=str, action='store', default=None)
parser.add_argument('--names-only', dest='names_only',
help='Output only the name of the symbol',
action='store_true', default=False)
parser.add_argument('--only-stdlib-symbols', dest='only_stdlib',
help="Filter all symbols not related to the stdlib",
action='store_true', default=False)
parser.add_argument('--defined-only', dest='defined_only',
help="Filter all symbols that are not defined",
action='store_true', default=False)
parser.add_argument('--undefined-only', dest='undefined_only',
help="Filter all symbols that are defined",
action='store_true', default=False)
args = parser.parse_args()
assert not (args.undefined_only and args.defined_only)
if args.output is not None:
print('Extracting symbols from %s to %s.'
% (args.library, args.output))
syms = extract.extract_symbols(args.library)
if args.only_stdlib:
syms, other_syms = util.filter_stdlib_symbols(syms)
filter = lambda x: x
if args.defined_only:
filter = lambda l: list([x for x in l if x['is_defined']])
if args.undefined_only:
filter = lambda l: list([x for x in l if not x['is_defined']])
util.write_syms(syms, out=args.output, names_only=args.names_only, filter=filter)
if __name__ == '__main__':
main()
| libcudacxx-main | libcxx/utils/sym_extract.py |
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""
sym_diff - Compare two symbol lists and output the differences.
"""
from argparse import ArgumentParser
import sys
from libcxx.sym_check import diff, util
def main():
parser = ArgumentParser(
description='Extract a list of symbols from a shared library.')
parser.add_argument(
'--names-only', dest='names_only',
help='Only print symbol names',
action='store_true', default=False)
parser.add_argument(
'--removed-only', dest='removed_only',
help='Only print removed symbols',
action='store_true', default=False)
parser.add_argument('--only-stdlib-symbols', dest='only_stdlib',
help="Filter all symbols not related to the stdlib",
action='store_true', default=False)
parser.add_argument('--strict', dest='strict',
help="Exit with a non-zero status if any symbols "
"differ",
action='store_true', default=False)
parser.add_argument(
'-o', '--output', dest='output',
help='The output file. stdout is used if not given',
type=str, action='store', default=None)
parser.add_argument(
'--demangle', dest='demangle', action='store_true', default=False)
parser.add_argument(
'old_syms', metavar='old-syms', type=str,
help='The file containing the old symbol list or a library')
parser.add_argument(
'new_syms', metavar='new-syms', type=str,
help='The file containing the new symbol list or a library')
args = parser.parse_args()
old_syms_list = util.extract_or_load(args.old_syms)
new_syms_list = util.extract_or_load(args.new_syms)
if args.only_stdlib:
old_syms_list, _ = util.filter_stdlib_symbols(old_syms_list)
new_syms_list, _ = util.filter_stdlib_symbols(new_syms_list)
added, removed, changed = diff.diff(old_syms_list, new_syms_list)
if args.removed_only:
added = {}
report, is_break, is_different = diff.report_diff(
added, removed, changed, names_only=args.names_only,
demangle=args.demangle)
if args.output is None:
print(report)
else:
with open(args.output, 'w') as f:
f.write(report + '\n')
exit_code = 1 if is_break or (args.strict and is_different) else 0
sys.exit(exit_code)
if __name__ == '__main__':
main()
| libcudacxx-main | libcxx/utils/sym_diff.py |
#!/usr/bin/env python
import os
import tempfile
def get_libcxx_paths():
utils_path = os.path.dirname(os.path.abspath(__file__))
script_name = os.path.basename(__file__)
assert os.path.exists(utils_path)
src_root = os.path.dirname(utils_path)
include_path = os.path.join(src_root, 'include')
assert os.path.exists(include_path)
docs_path = os.path.join(src_root, 'docs')
assert os.path.exists(docs_path)
macro_test_path = os.path.join(src_root, 'test', 'std', 'language.support',
'support.limits', 'support.limits.general')
assert os.path.exists(macro_test_path)
assert os.path.exists(os.path.join(macro_test_path, 'version.version.pass.cpp'))
return script_name, src_root, include_path, docs_path, macro_test_path
script_name, source_root, include_path, docs_path, macro_test_path = get_libcxx_paths()
def has_header(h):
h_path = os.path.join(include_path, h)
return os.path.exists(h_path)
def add_version_header(tc):
tc["headers"].append("version")
return tc
feature_test_macros = sorted([ add_version_header(x) for x in [
# C++14 macros
{"name": "__cpp_lib_integer_sequence",
"values": {
"c++14": 201304L
},
"headers": ["utility"],
},
{"name": "__cpp_lib_exchange_function",
"values": {
"c++14": 201304L
},
"headers": ["utility"],
},
{"name": "__cpp_lib_tuples_by_type",
"values": {
"c++14": 201304L
},
"headers": ["utility", "tuple"],
},
{"name": "__cpp_lib_tuple_element_t",
"values": {
"c++14": 201402L
},
"headers": ["tuple"],
},
{"name": "__cpp_lib_make_unique",
"values": {
"c++14": 201304L
},
"headers": ["memory"],
},
{"name": "__cpp_lib_transparent_operators",
"values": {
"c++14": 201210L,
"c++17": 201510L,
},
"headers": ["functional"],
},
{"name": "__cpp_lib_integral_constant_callable",
"values": {
"c++14": 201304L
},
"headers": ["type_traits"],
},
{"name": "__cpp_lib_transformation_trait_aliases",
"values": {
"c++14": 201304L,
},
"headers": ["type_traits"]
},
{"name": "__cpp_lib_result_of_sfinae",
"values": {
"c++14": 201210L,
},
"headers": ["functional", "type_traits"]
},
{"name": "__cpp_lib_is_final",
"values": {
"c++14": 201402L,
},
"headers": ["type_traits"]
},
{"name": "__cpp_lib_is_null_pointer",
"values": {
"c++14": 201309L,
},
"headers": ["type_traits"]
},
{"name": "__cpp_lib_chrono_udls",
"values": {
"c++14": 201304L,
},
"headers": ["chrono"]
},
{"name": "__cpp_lib_string_udls",
"values": {
"c++14": 201304L,
},
"headers": ["string"]
},
{"name": "__cpp_lib_generic_associative_lookup",
"values": {
"c++14": 201304L,
},
"headers": ["map", "set"]
},
{"name": "__cpp_lib_null_iterators",
"values": {
"c++14": 201304L,
},
"headers": ["iterator"]
},
{"name": "__cpp_lib_make_reverse_iterator",
"values": {
"c++14": 201402L,
},
"headers": ["iterator"]
},
{"name": "__cpp_lib_robust_nonmodifying_seq_ops",
"values": {
"c++14": 201304L,
},
"headers": ["algorithm"]
},
{"name": "__cpp_lib_complex_udls",
"values": {
"c++14": 201309L,
},
"headers": ["complex"]
},
{"name": "__cpp_lib_quoted_string_io",
"values": {
"c++14": 201304L,
},
"headers": ["iomanip"]
},
{"name": "__cpp_lib_shared_timed_mutex",
"values": {
"c++14": 201402L,
},
"headers": ["shared_mutex"],
"depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)",
"internal_depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)",
},
# C++17 macros
{"name": "__cpp_lib_atomic_is_always_lock_free",
"values": {
"c++17": 201603L,
},
"headers": ["atomic"],
"depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)",
"internal_depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)",
},
{"name": "__cpp_lib_filesystem",
"values": {
"c++17": 201703L,
},
"headers": ["filesystem"]
},
{"name": "__cpp_lib_invoke",
"values": {
"c++17": 201411L,
},
"headers": ["functional"]
},
{"name": "__cpp_lib_void_t",
"values": {
"c++17": 201411L,
},
"headers": ["type_traits"]
},
{"name": "__cpp_lib_node_extract",
"values": {
"c++17": 201606L,
},
"headers": ["map", "set", "unordered_map", "unordered_set"]
},
{"name": "__cpp_lib_byte",
"values": {
"c++17": 201603L,
},
"headers": ["cstddef"],
},
{"name": "__cpp_lib_hardware_interference_size",
"values": {
"c++17": 201703L,
},
"headers": ["new"],
},
{"name": "__cpp_lib_launder",
"values": {
"c++17": 201606L,
},
"headers": ["new"],
},
{"name": "__cpp_lib_uncaught_exceptions",
"values": {
"c++17": 201411L,
},
"headers": ["exception"],
},
{"name": "__cpp_lib_as_const",
"values": {
"c++17": 201510L,
},
"headers": ["utility"],
},
{"name": "__cpp_lib_make_from_tuple",
"values": {
"c++17": 201606L,
},
"headers": ["tuple"],
},
{"name": "__cpp_lib_apply",
"values": {
"c++17": 201603L,
},
"headers": ["tuple"],
},
{"name": "__cpp_lib_optional",
"values": {
"c++17": 201606L,
},
"headers": ["optional"],
},
{"name": "__cpp_lib_variant",
"values": {
"c++17": 201606L,
},
"headers": ["variant"],
},
{"name": "__cpp_lib_any",
"values": {
"c++17": 201606L,
},
"headers": ["any"],
},
{"name": "__cpp_lib_addressof_constexpr",
"values": {
"c++17": 201603L,
},
"headers": ["memory"],
"depends": "TEST_HAS_BUILTIN(__builtin_addressof) || TEST_GCC_VER >= 700",
"internal_depends": "defined(_LIBCUDACXX_ADDRESSOF)",
},
{"name": "__cpp_lib_raw_memory_algorithms",
"values": {
"c++17": 201606L,
},
"headers": ["memory"],
},
{"name": "__cpp_lib_enable_shared_from_this",
"values": {
"c++17": 201603L,
},
"headers": ["memory"],
},
{"name": "__cpp_lib_shared_ptr_weak_type",
"values": {
"c++17": 201606L,
},
"headers": ["memory"],
},
{"name": "__cpp_lib_shared_ptr_arrays",
"values": {
"c++17": 201611L,
},
"headers": ["memory"],
"unimplemented": True,
},
{"name": "__cpp_lib_memory_resource",
"values": {
"c++17": 201603L,
},
"headers": ["memory_resource"],
"unimplemented": True,
},
{"name": "__cpp_lib_boyer_moore_searcher",
"values": {
"c++17": 201603L,
},
"headers": ["functional"],
"unimplemented": True,
},
{"name": "__cpp_lib_not_fn",
"values": {
"c++17": 201603L,
},
"headers": ["functional"],
},
{"name": "__cpp_lib_bool_constant",
"values": {
"c++17": 201505L,
},
"headers": ["type_traits"],
},
{"name": "__cpp_lib_type_trait_variable_templates",
"values": {
"c++17": 201510L,
},
"headers": ["type_traits"],
},
{"name": "__cpp_lib_logical_traits",
"values": {
"c++17": 201510L,
},
"headers": ["type_traits"],
},
{"name": "__cpp_lib_is_swappable",
"values": {
"c++17": 201603L,
},
"headers": ["type_traits"],
},
{"name": "__cpp_lib_is_invocable",
"values": {
"c++17": 201703L,
},
"headers": ["type_traits"],
},
{"name": "__cpp_lib_has_unique_object_representations",
"values": {
"c++17": 201606L,
},
"headers": ["type_traits"],
"depends": "TEST_HAS_BUILTIN_IDENTIFIER(__has_unique_object_representations) || TEST_GCC_VER >= 700",
"internal_depends": "defined(_LIBCUDACXX_HAS_UNIQUE_OBJECT_REPRESENTATIONS)",
},
{"name": "__cpp_lib_is_aggregate",
"values": {
"c++17": 201703L,
},
"headers": ["type_traits"],
"depends": "TEST_HAS_BUILTIN_IDENTIFIER(__is_aggregate) || TEST_GCC_VER_NEW >= 7001",
"internal_depends": "!defined(_LIBCUDACXX_HAS_NO_IS_AGGREGATE)",
},
{"name": "__cpp_lib_chrono",
"values": {
"c++17": 201611L,
},
"headers": ["chrono"],
},
{"name": "__cpp_lib_execution",
"values": {
"c++17": 201603L,
},
"headers": ["execution"],
"unimplemented": True
},
{"name": "__cpp_lib_parallel_algorithm",
"values": {
"c++17": 201603L,
},
"headers": ["algorithm", "numeric"],
"unimplemented": True,
},
{"name": "__cpp_lib_to_chars",
"values": {
"c++17": 201611L,
},
"headers": ["utility"],
"unimplemented": True,
},
{"name": "__cpp_lib_string_view",
"values": {
"c++17": 201606L,
},
"headers": ["string", "string_view"],
},
{"name": "__cpp_lib_allocator_traits_is_always_equal",
"values": {
"c++17": 201411L,
},
"headers": ["memory", "scoped_allocator", "string", "deque", "forward_list", "list", "vector", "map", "set", "unordered_map", "unordered_set"],
},
{"name": "__cpp_lib_incomplete_container_elements",
"values": {
"c++17": 201505L,
},
"headers": ["forward_list", "list", "vector"],
},
{"name": "__cpp_lib_map_try_emplace",
"values": {
"c++17": 201411L,
},
"headers": ["map"],
},
{"name": "__cpp_lib_unordered_map_try_emplace",
"values": {
"c++17": 201411L,
},
"headers": ["unordered_map"],
},
{"name": "__cpp_lib_array_constexpr",
"values": {
"c++17": 201603L,
},
"headers": ["iterator", "array"],
},
{"name": "__cpp_lib_nonmember_container_access",
"values": {
"c++17": 201411L,
},
"headers": ["iterator", "array", "deque", "forward_list", "list", "map", "regex",
"set", "string", "unordered_map", "unordered_set", "vector"],
},
{"name": "__cpp_lib_sample",
"values": {
"c++17": 201603L,
},
"headers": ["algorithm"],
},
{"name": "__cpp_lib_clamp",
"values": {
"c++17": 201603L,
},
"headers": ["algorithm"],
},
{"name": "__cpp_lib_gcd_lcm",
"values": {
"c++17": 201606L,
},
"headers": ["numeric"],
},
{"name": "__cpp_lib_hypot",
"values": {
"c++17": 201603L,
},
"headers": ["cmath"],
},
{"name": "__cpp_lib_math_special_functions",
"values": {
"c++17": 201603L,
},
"headers": ["cmath"],
"unimplemented": True,
},
{"name": "__cpp_lib_shared_mutex",
"values": {
"c++17": 201505L,
},
"headers": ["shared_mutex"],
"depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)",
"internal_depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)",
},
{"name": "__cpp_lib_scoped_lock",
"values": {
"c++17": 201703L,
},
"headers": ["mutex"],
},
# C++2a
{"name": "__cpp_lib_char8_t",
"values": {
"c++2a": 201811L,
},
"headers": ["atomic", "filesystem", "istream", "limits", "locale", "ostream",
"string", "string_view"],
"depends": "defined(__cpp_char8_t)",
"internal_depends": "!defined(_LIBCUDACXX_NO_HAS_CHAR8_T)",
},
{"name": "__cpp_lib_erase_if",
"values": {
"c++2a": 201811L,
},
"headers": ["string", "deque", "forward_list", "list", "vector", "map",
"set", "unordered_map", "unordered_set"]
},
{"name": "__cpp_lib_destroying_delete",
"values": {
"c++2a": 201806L,
},
"headers": ["new"],
"depends":
"TEST_STD_VER > 17"
" && defined(__cpp_impl_destroying_delete)"
" && __cpp_impl_destroying_delete >= 201806L",
"internal_depends":
"_LIBCUDACXX_STD_VER > 17"
" && defined(__cpp_impl_destroying_delete)"
" && __cpp_impl_destroying_delete >= 201806L",
},
{"name": "__cpp_lib_three_way_comparison",
"values": {
"c++2a": 201711L,
},
"headers": ["compare"],
"unimplemented": True,
},
{"name": "__cpp_lib_concepts",
"values": {
"c++14": 202002L,
},
"headers": ["concepts"],
},
{"name": "__cpp_lib_constexpr_swap_algorithms",
"values": {
"c++2a": 201806L,
},
"headers": ["algorithm"],
"unimplemented": True,
},
{"name": "__cpp_lib_constexpr_misc",
"values": {
"c++2a": 201811L,
},
"headers": ["array", "functional", "iterator", "string_view", "tuple", "utility"],
"unimplemented": True,
},
{"name": "__cpp_lib_bind_front",
"values": {
"c++17": 201907L,
},
"headers": ["functional"],
},
{"name": "__cpp_lib_is_constant_evaluated",
"values": {
"c++2a": 201811L,
},
"headers": ["type_traits"],
"depends": "TEST_HAS_BUILTIN(__builtin_is_constant_evaluated) || TEST_GCC_VER >= 900",
"internal_depends": "defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED)",
},
{"name": "__cpp_lib_list_remove_return_type",
"values": {
"c++2a": 201806L,
},
"headers": ["forward_list", "list"],
"unimplemented": True,
},
{"name": "__cpp_lib_generic_unordered_lookup",
"values": {
"c++2a": 201811L,
},
"headers": ["unordered_map", "unordered_set"],
"unimplemented": True,
},
{"name": "__cpp_lib_ranges",
"values": {
"c++2a": 201811L,
},
"headers": ["algorithm", "functional", "iterator", "memory", "ranges"],
"unimplemented": True,
},
{"name": "__cpp_lib_bit_cast",
"values": {
"c++2a": 201806L,
},
"headers": ["bit"],
"unimplemented": True,
},
{"name": "__cpp_lib_atomic_ref",
"values": {
"c++2a": 201806L,
},
"headers": ["atomic"],
"unimplemented": True,
"depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)",
"internal_depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)",
},
{"name": "__cpp_lib_interpolate",
"values": {
"c++2a": 201902L,
},
"headers": ["numeric"],
},
]], key=lambda tc: tc["name"])
def get_std_dialects():
std_dialects = ['c++14', 'c++17', 'c++2a']
return list(std_dialects)
def get_first_std(d):
for s in get_std_dialects():
if s in d.keys():
return s
return None
def get_last_std(d):
rev_dialects = get_std_dialects()
rev_dialects.reverse()
for s in rev_dialects:
if s in d.keys():
return s
return None
def get_std_before(d, std):
std_dialects = get_std_dialects()
candidates = std_dialects[0:std_dialects.index(std)]
candidates.reverse()
for cand in candidates:
if cand in d.keys():
return cand
return None
def get_value_before(d, std):
new_std = get_std_before(d, std)
if new_std is None:
return None
return d[new_std]
def get_for_std(d, std):
# This catches the C++11 case for which there should be no defined feature
# test macros.
std_dialects = get_std_dialects()
if std not in std_dialects:
return None
# Find the value for the newest C++ dialect between C++14 and std
std_list = list(std_dialects[0:std_dialects.index(std)+1])
std_list.reverse()
for s in std_list:
if s in d.keys():
return d[s]
return None
"""
Functions to produce the <version> header
"""
def produce_macros_definition_for_std(std):
result = ""
indent = 56
for tc in feature_test_macros:
if std not in tc["values"]:
continue
inner_indent = 1
if 'depends' in tc.keys():
assert 'internal_depends' in tc.keys()
result += "# if %s\n" % tc["internal_depends"]
inner_indent += 2
if get_value_before(tc["values"], std) is not None:
assert 'depends' not in tc.keys()
result += "# undef %s\n" % tc["name"]
line = "#%sdefine %s" % ((" " * inner_indent), tc["name"])
line += " " * (indent - len(line))
line += "%sL" % tc["values"][std]
if 'unimplemented' in tc.keys():
line = "// " + line
result += line
result += "\n"
if 'depends' in tc.keys():
result += "# endif\n"
return result
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def produce_version_synopsis():
indent = 56
header_indent = 56 + len("20XXYYL ")
result = ""
def indent_to(s, val):
if len(s) >= val:
return s
s += " " * (val - len(s))
return s
line = indent_to("Macro name", indent) + "Value"
line = indent_to(line, header_indent) + "Headers"
result += line + "\n"
for tc in feature_test_macros:
prev_defined_std = get_last_std(tc["values"])
line = "{name: <{indent}}{value}L ".format(name=tc['name'], indent=indent,
value=tc["values"][prev_defined_std])
headers = list(tc["headers"])
headers.remove("version")
for chunk in chunks(headers, 3):
line = indent_to(line, header_indent)
chunk = ['<%s>' % header for header in chunk]
line += ' '.join(chunk)
result += line
result += "\n"
line = ""
while True:
prev_defined_std = get_std_before(tc["values"], prev_defined_std)
if prev_defined_std is None:
break
result += "%s%sL // %s\n" % (indent_to("", indent), tc["values"][prev_defined_std],
prev_defined_std.replace("c++", "C++"))
return result
def produce_version_header():
template="""// -*- C++ -*-
//===--------------------------- version ----------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef _LIBCUDACXX_VERSIONH
#define _LIBCUDACXX_VERSIONH
/*
version synopsis
{synopsis}
*/
#include <__config>
#if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER)
#pragma GCC system_header
#endif
#if _LIBCUDACXX_STD_VER > 11
{cxx14_macros}
#endif
#if _LIBCUDACXX_STD_VER > 14
{cxx17_macros}
#endif
#if _LIBCUDACXX_STD_VER > 17
{cxx2a_macros}
#endif
#endif // _LIBCUDACXX_VERSIONH
"""
return template.format(
synopsis=produce_version_synopsis().strip(),
cxx14_macros=produce_macros_definition_for_std('c++14').strip(),
cxx17_macros=produce_macros_definition_for_std('c++17').strip(),
cxx2a_macros=produce_macros_definition_for_std('c++2a').strip())
"""
Functions to produce test files
"""
test_types = {
"undefined": """
# ifdef {name}
# error "{name} should not be defined before {std_first}"
# endif
""",
"depends": """
# if {depends}
# ifndef {name}
# error "{name} should be defined in {std}"
# endif
# if {name} != {value}
# error "{name} should have the value {value} in {std}"
# endif
# else
# ifdef {name}
# error "{name} should not be defined when {depends} is not defined!"
# endif
# endif
""",
"unimplemented": """
# if !defined(_LIBCUDACXX_VERSION)
# ifndef {name}
# error "{name} should be defined in {std}"
# endif
# if {name} != {value}
# error "{name} should have the value {value} in {std}"
# endif
# else // _LIBCUDACXX_VERSION
# ifdef {name}
# error "{name} should not be defined because it is unimplemented in libc++!"
# endif
# endif
""",
"defined":"""
# ifndef {name}
# error "{name} should be defined in {std}"
# endif
# if {name} != {value}
# error "{name} should have the value {value} in {std}"
# endif
"""
}
def generate_std_test(test_list, std):
result = ""
for tc in test_list:
val = get_for_std(tc["values"], std)
if val is not None:
val = "%sL" % val
if val is None:
result += test_types["undefined"].format(name=tc["name"], std_first=get_first_std(tc["values"]))
elif 'unimplemented' in tc.keys():
result += test_types["unimplemented"].format(name=tc["name"], value=val, std=std)
elif "depends" in tc.keys():
result += test_types["depends"].format(name=tc["name"], value=val, std=std, depends=tc["depends"])
else:
result += test_types["defined"].format(name=tc["name"], value=val, std=std)
return result
def generate_synopsis(test_list):
max_name_len = max([len(tc["name"]) for tc in test_list])
indent = max_name_len + 8
def mk_line(prefix, suffix):
return "{prefix: <{max_len}}{suffix}\n".format(prefix=prefix, suffix=suffix,
max_len=indent)
result = ""
result += mk_line("/* Constant", "Value")
for tc in test_list:
prefix = " %s" % tc["name"]
for std in [s for s in get_std_dialects() if s in tc["values"].keys()]:
result += mk_line(prefix, "%sL [%s]" % (tc["values"][std], std.replace("c++", "C++")))
prefix = ""
result += "*/"
return result
def is_threading_header_unsafe_to_include(h):
# NOTE: "<mutex>" does not blow up when included without threads.
return h in ['atomic', 'shared_mutex']
def produce_tests():
headers = set([h for tc in feature_test_macros for h in tc["headers"]])
for h in headers:
test_list = [tc for tc in feature_test_macros if h in tc["headers"]]
if not has_header(h):
for tc in test_list:
assert 'unimplemented' in tc.keys()
continue
test_tags = ""
if is_threading_header_unsafe_to_include(h):
test_tags += '\n// UNSUPPORTED: libcpp-has-no-threads\n'
test_body = \
"""//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// WARNING: This test was generated by {script_name}
// and should not be edited manually.
{test_tags}
// <{header}>
// Test the feature test macros defined by <{header}>
{synopsis}
#include <{header}>
#include "test_macros.h"
#if TEST_STD_VER < 14
{cxx11_tests}
#elif TEST_STD_VER == 14
{cxx14_tests}
#elif TEST_STD_VER == 17
{cxx17_tests}
#elif TEST_STD_VER > 17
{cxx2a_tests}
#endif // TEST_STD_VER > 17
int main(int, char**) {{ return 0; }}
""".format(script_name=script_name,
header=h,
test_tags=test_tags,
synopsis=generate_synopsis(test_list),
cxx11_tests=generate_std_test(test_list, 'c++11').strip(),
cxx14_tests=generate_std_test(test_list, 'c++14').strip(),
cxx17_tests=generate_std_test(test_list, 'c++17').strip(),
cxx2a_tests=generate_std_test(test_list, 'c++2a').strip())
test_name = "{header}.version.pass.cpp".format(header=h)
out_path = os.path.join(macro_test_path, test_name)
with open(out_path, 'w') as f:
f.write(test_body)
"""
Produce documentation for the feature test macros
"""
def make_widths(grid):
widths = []
for i in range(0, len(grid[0])):
cell_width = 2 + max(reduce(lambda x,y: x+y, [[len(row[i])] for row in grid], []))
widths += [cell_width]
return widths
def create_table(grid, indent):
indent_str = ' '*indent
col_widths = make_widths(grid)
num_cols = len(grid[0])
result = indent_str + add_divider(col_widths, 2)
header_flag = 2
for row_i in xrange(0, len(grid)):
row = grid[row_i]
result = result + indent_str + ' '.join([pad_cell(row[i], col_widths[i]) for i in range(0, len(row))]) + '\n'
is_cxx_header = row[0].startswith('**')
if row_i == len(grid) - 1:
header_flag = 2
result = result + indent_str + add_divider(col_widths, 1 if is_cxx_header else header_flag)
header_flag = 0
return result
def add_divider(widths, header_flag):
if header_flag == 2:
return ' '.join(['='*w for w in widths]) + '\n'
if header_flag == 1:
return '-'.join(['-'*w for w in widths]) + '\n'
else:
return ' '.join(['-'*w for w in widths]) + '\n'
def pad_cell(s, length, left_align=True):
padding = ((length - len(s)) * ' ')
return s + padding
def get_status_table():
table = [["Macro Name", "Value"]]
for std in get_std_dialects():
table += [["**" + std.replace("c++", "C++ ") + "**", ""]]
for tc in feature_test_macros:
if std not in tc["values"].keys():
continue
value = "``%sL``" % tc["values"][std]
if 'unimplemented' in tc.keys():
value = '*unimplemented*'
table += [["``%s``" % tc["name"], value]]
return table
def produce_docs():
doc_str = """.. _FeatureTestMacroTable:
==========================
Feature Test Macro Support
==========================
.. contents::
:local:
Overview
========
This file documents the feature test macros currently supported by libc++.
.. _feature-status:
Status
======
.. table:: Current Status
:name: feature-status-table
:widths: auto
{status_tables}
""".format(status_tables=create_table(get_status_table(), 4))
table_doc_path = os.path.join(docs_path, 'FeatureTestMacroTable.rst')
with open(table_doc_path, 'w') as f:
f.write(doc_str)
def main():
with tempfile.NamedTemporaryFile(mode='w', prefix='version.', delete=False) as tmp_file:
print("producing new <version> header as %s" % tmp_file.name)
tmp_file.write(produce_version_header())
produce_tests()
produce_docs()
if __name__ == '__main__':
main()
| libcudacxx-main | libcxx/utils/generate_feature_test_macro_components.py |
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""
sym_match - Match all symbols in a list against a list of regexes.
"""
from argparse import ArgumentParser
import sys
from libcxx.sym_check import util, match, extract
def main():
parser = ArgumentParser(
description='Extract a list of symbols from a shared library.')
parser.add_argument(
'--blacklist', dest='blacklist',
type=str, action='store', default=None)
parser.add_argument(
'symbol_list', metavar='symbol_list', type=str,
help='The file containing the old symbol list')
parser.add_argument(
'regexes', metavar='regexes', default=[], nargs='*',
help='The file containing the new symbol list or a library')
args = parser.parse_args()
if not args.regexes and args.blacklist is None:
sys.stderr.write('Either a regex or a blacklist must be specified.\n')
sys.exit(1)
if args.blacklist:
search_list = util.read_blacklist(args.blacklist)
else:
search_list = args.regexes
symbol_list = util.extract_or_load(args.symbol_list)
matching_count, report = match.find_and_report_matching(
symbol_list, search_list)
sys.stdout.write(report)
if matching_count != 0:
print('%d matching symbols found...' % matching_count)
if __name__ == '__main__':
main()
| libcudacxx-main | libcxx/utils/sym_match.py |
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
from argparse import ArgumentParser
from ctypes.util import find_library
import distutils.spawn
import glob
import tempfile
import os
import shutil
import subprocess
import signal
import sys
temp_directory_root = None
def exit_with_cleanups(status):
if temp_directory_root is not None:
shutil.rmtree(temp_directory_root)
sys.exit(status)
def print_and_exit(msg):
sys.stderr.write(msg + '\n')
exit_with_cleanups(1)
def find_and_diagnose_missing(lib, search_paths):
if os.path.exists(lib):
return os.path.abspath(lib)
if not lib.startswith('lib') or not lib.endswith('.a'):
print_and_exit(("input file '%s' not not name a static library. "
"It should start with 'lib' and end with '.a") % lib)
for sp in search_paths:
assert type(sp) is list and len(sp) == 1
path = os.path.join(sp[0], lib)
if os.path.exists(path):
return os.path.abspath(path)
print_and_exit("input '%s' does not exist" % lib)
def execute_command(cmd, cwd=None):
"""
Execute a command, capture and return its output.
"""
kwargs = {
'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'cwd': cwd,
'universal_newlines': True
}
p = subprocess.Popen(cmd, **kwargs)
out, err = p.communicate()
exitCode = p.wait()
if exitCode == -signal.SIGINT:
raise KeyboardInterrupt
return out, err, exitCode
def execute_command_verbose(cmd, cwd=None, verbose=False):
"""
Execute a command and print its output on failure.
"""
out, err, exitCode = execute_command(cmd, cwd=cwd)
if exitCode != 0 or verbose:
report = "Command: %s\n" % ' '.join(["'%s'" % a for a in cmd])
if exitCode != 0:
report += "Exit Code: %d\n" % exitCode
if out:
report += "Standard Output:\n--\n%s--" % out
if err:
report += "Standard Error:\n--\n%s--" % err
if exitCode != 0:
report += "\n\nFailed!"
sys.stderr.write('%s\n' % report)
if exitCode != 0:
exit_with_cleanups(exitCode)
return out
def main():
parser = ArgumentParser(
description="Merge multiple archives into a single library")
parser.add_argument(
'-v', '--verbose', dest='verbose', action='store_true', default=False)
parser.add_argument(
'-o', '--output', dest='output', required=True,
help='The output file. stdout is used if not given',
type=str, action='store')
parser.add_argument(
'-L', dest='search_paths',
help='Paths to search for the libraries along', action='append',
nargs=1)
parser.add_argument(
'--ar', dest='ar_exe', required=False,
help='The ar executable to use, finds \'ar\' in the path if not given',
type=str, action='store')
parser.add_argument(
'--use-libtool', dest='use_libtool', action='store_true', default=False)
parser.add_argument(
'--libtool', dest='libtool_exe', required=False,
help='The libtool executable to use, finds \'libtool\' in the path if not given',
type=str, action='store')
parser.add_argument(
'archives', metavar='archives', nargs='+',
help='The archives to merge')
args = parser.parse_args()
ar_exe = args.ar_exe
if not ar_exe:
ar_exe = distutils.spawn.find_executable('ar')
if not ar_exe:
print_and_exit("failed to find 'ar' executable")
if args.use_libtool:
libtool_exe = args.libtool_exe
if not libtool_exe:
libtool_exe = distutils.spawn.find_executable('libtool')
if not libtool_exe:
print_and_exit("failed to find 'libtool' executable")
if len(args.archives) < 2:
print_and_exit('fewer than 2 inputs provided')
archives = [find_and_diagnose_missing(ar, args.search_paths)
for ar in args.archives]
print ('Merging archives: %s' % archives)
if not os.path.exists(os.path.dirname(args.output)):
print_and_exit("output path doesn't exist: '%s'" % args.output)
global temp_directory_root
temp_directory_root = tempfile.mkdtemp('.libcxx.merge.archives')
files = []
for arc in archives:
execute_command_verbose([ar_exe, 'x', arc],
cwd=temp_directory_root, verbose=args.verbose)
out = execute_command_verbose([ar_exe, 't', arc])
files.extend(out.splitlines())
if args.use_libtool:
files = [f for f in files if not f.startswith('__.SYMDEF')]
execute_command_verbose([libtool_exe, '-static', '-o', args.output] + files,
cwd=temp_directory_root, verbose=args.verbose)
else:
execute_command_verbose([ar_exe, 'rcs', args.output] + files,
cwd=temp_directory_root, verbose=args.verbose)
if __name__ == '__main__':
main()
exit_with_cleanups(0)
| libcudacxx-main | libcxx/utils/merge_archives.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""not.py is a utility for inverting the return code of commands.
It acts similar to llvm/utils/not.
ex: python /path/to/not.py ' echo hello
echo $? // (prints 1)
"""
import subprocess
import sys
def which_cannot_find_program(prog):
# Allow for import errors on distutils.spawn
try:
import distutils.spawn
prog = distutils.spawn.find_executable(prog[0])
if prog is None:
sys.stderr.write('Failed to find program %s' % prog[0])
return True
return False
except:
return False
def main():
argv = list(sys.argv)
del argv[0]
if len(argv) > 0 and argv[0] == '--crash':
del argv[0]
expectCrash = True
else:
expectCrash = False
if len(argv) == 0:
return 1
if which_cannot_find_program(argv[0]):
return 1
rc = subprocess.call(argv)
if rc < 0:
return 0 if expectCrash else 1
if expectCrash:
return 1
return rc == 0
if __name__ == '__main__':
exit(main())
| libcudacxx-main | libcxx/utils/not.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import platform
import os
import libcxx.util
class CXXCompiler(object):
CM_Default = 0
CM_PreProcess = 1
CM_Compile = 2
CM_Link = 3
def __init__(self, path, first_arg,
flags=None, compile_flags=None, link_flags=None,
warning_flags=None, verify_supported=None,
verify_flags=None, use_verify=False,
modules_flags=None, use_modules=False,
use_ccache=False, use_warnings=False, compile_env=None,
cxx_type=None, cxx_version=None):
self.source_lang = 'c++'
self.path = path
self.first_arg = first_arg or ''
self.flags = list(flags or [])
self.compile_flags = list(compile_flags or [])
self.link_flags = list(link_flags or [])
self.warning_flags = list(warning_flags or [])
self.verify_supported = verify_supported
self.use_verify = use_verify
self.verify_flags = list(verify_flags or [])
assert not use_verify or verify_supported
assert not use_verify or verify_flags is not None
self.modules_flags = list(modules_flags or [])
self.use_modules = use_modules
assert not use_modules or modules_flags is not None
self.use_ccache = use_ccache
self.use_warnings = use_warnings
if compile_env is not None:
self.compile_env = dict(compile_env)
else:
self.compile_env = None
self.type = cxx_type
self.version = cxx_version
if self.type is None or self.version is None:
self._initTypeAndVersion()
def isVerifySupported(self):
if self.verify_supported is None:
self.verify_supported = self.hasCompileFlag(['-Xclang',
'-verify-ignore-unexpected'])
if self.verify_supported:
self.verify_flags = [
'-Xclang', '-verify',
'-Xclang', '-verify-ignore-unexpected=note',
'-ferror-limit=1024'
]
return self.verify_supported
def useVerify(self, value=True):
self.use_verify = value
assert not self.use_verify or self.verify_flags is not None
def useModules(self, value=True):
self.use_modules = value
assert not self.use_modules or self.modules_flags is not None
def useCCache(self, value=True):
self.use_ccache = value
def useWarnings(self, value=True):
self.use_warnings = value
def _initTypeAndVersion(self):
# Get compiler type and version
try:
macros = self.dumpMacros()
compiler_type = None
major_ver = minor_ver = patchlevel = None
self.is_nvrtc = False
if '__NVCC__' in macros.keys():
compiler_type = 'nvcc'
major_ver = macros['__CUDACC_VER_MAJOR__']
minor_ver = macros['__CUDACC_VER_MINOR__']
patchlevel = macros['__CUDACC_VER_BUILD__']
if '__LIBCUDACXX_NVRTC_TEST__' in macros.keys():
self.is_nvrtc = True
elif '__NVCOMPILER' in macros.keys():
compiler_type = "nvhpc"
# nvhpc, unfortunately, adds an extra space between the macro name
# and macro value in their macro dump mode.
major_ver = macros['__NVCOMPILER'].strip()
minor_ver = macros['___NVCOMPILER_MINOR__'].strip()
patchlevel = macros['___NVCOMPILER_PATCHLEVEL__'].strip()
elif '__INTEL_COMPILER' in macros.keys():
compiler_type = "icc"
major_ver = int(macros['__INTEL_COMPILER']) / 100
minor_ver = (int(macros['__INTEL_COMPILER']) % 100) / 10
patchlevel = int(macros['__INTEL_COMPILER']) % 10
elif '__clang__' in macros.keys():
compiler_type = 'clang'
# Treat Apple's LLVM fork differently.
if '__apple_build_version__' in macros.keys():
compiler_type = 'apple-clang'
major_ver = macros['__clang_major__']
minor_ver = macros['__clang_minor__']
patchlevel = macros['__clang_patchlevel__']
elif '__GNUC__' in macros.keys():
compiler_type = 'gcc'
major_ver = macros['__GNUC__']
minor_ver = macros['__GNUC_MINOR__']
patchlevel = macros['__GNUC_PATCHLEVEL__']
if '__cplusplus' in macros.keys():
cplusplus = macros['__cplusplus']
if cplusplus[-1] == 'L':
cplusplus = cplusplus[:-1]
cpp_standard = int(cplusplus)
if cpp_standard <= 199711:
default_dialect = "c++03"
elif cpp_standard <= 201103:
default_dialect = "c++11"
elif cpp_standard <= 201402:
default_dialect = "c++14"
elif cpp_standard <= 201703:
default_dialect = "c++17"
else:
default_dialect = "c++20"
else:
default_dialect = "c++03"
self.type = compiler_type
self.version = (major_ver, minor_ver, patchlevel)
self.default_dialect = default_dialect
except:
(self.type, self.version, self.default_dialect, self.is_nvrtc) = \
self.dumpVersion()
if self.type == 'nvcc':
# Treat C++ as CUDA when the compiler is NVCC.
self.source_lang = 'cu'
def _basicCmd(self, source_files, out, mode=CM_Default, flags=[],
input_is_cxx=False):
cmd = []
if self.use_ccache \
and not mode == self.CM_Link \
and not mode == self.CM_PreProcess:
cmd += ['ccache']
cmd += [self.path] + ([self.first_arg] if self.first_arg != '' else [])
if out is not None:
cmd += ['-o', out]
if input_is_cxx:
cmd += ['-x', self.source_lang]
if isinstance(source_files, list):
cmd += source_files
elif isinstance(source_files, str):
cmd += [source_files]
else:
raise TypeError('source_files must be a string or list')
if mode == self.CM_PreProcess:
cmd += ['-E']
elif mode == self.CM_Compile:
cmd += ['-c']
cmd += self.flags
if self.use_verify:
cmd += self.verify_flags
assert mode in [self.CM_Default, self.CM_Compile]
if self.use_modules:
cmd += self.modules_flags
if mode != self.CM_Link:
cmd += self.compile_flags
if self.use_warnings:
cmd += self.warning_flags
if mode != self.CM_PreProcess and mode != self.CM_Compile:
cmd += self.link_flags
cmd += flags
return cmd
def preprocessCmd(self, source_files, out=None, flags=[]):
return self._basicCmd(source_files, out, flags=flags,
mode=self.CM_PreProcess,
input_is_cxx=True)
def compileCmd(self, source_files, out=None, flags=[]):
return self._basicCmd(source_files, out, flags=flags,
mode=self.CM_Compile,
input_is_cxx=True) + ['-c']
def linkCmd(self, source_files, out=None, flags=[]):
return self._basicCmd(source_files, out, flags=flags,
mode=self.CM_Link)
def compileLinkCmd(self, source_files, out=None, flags=[]):
return self._basicCmd(source_files, out, flags=flags)
def preprocess(self, source_files, out=None, flags=[], cwd=None):
cmd = self.preprocessCmd(source_files, out, flags)
out, err, rc = libcxx.util.executeCommand(cmd, env=self.compile_env,
cwd=cwd)
return cmd, out, err, rc
def compile(self, source_files, out=None, flags=[], cwd=None):
cmd = self.compileCmd(source_files, out, flags)
out, err, rc = libcxx.util.executeCommand(cmd, env=self.compile_env,
cwd=cwd)
return cmd, out, err, rc
def link(self, source_files, out=None, flags=[], cwd=None):
cmd = self.linkCmd(source_files, out, flags)
out, err, rc = libcxx.util.executeCommand(cmd, env=self.compile_env,
cwd=cwd)
return cmd, out, err, rc
def compileLink(self, source_files, out=None, flags=[],
cwd=None):
cmd = self.compileLinkCmd(source_files, out, flags)
out, err, rc = libcxx.util.executeCommand(cmd, env=self.compile_env,
cwd=cwd)
return cmd, out, err, rc
def compileLinkTwoSteps(self, source_file, out=None, object_file=None,
flags=[], cwd=None):
if not isinstance(source_file, str):
raise TypeError('This function only accepts a single input file')
if object_file is None:
# Create, use and delete a temporary object file if none is given.
with_fn = lambda: libcxx.util.guardedTempFilename(suffix='.o')
else:
# Otherwise wrap the filename in a context manager function.
with_fn = lambda: libcxx.util.nullContext(object_file)
with with_fn() as object_file:
cc_cmd, cc_stdout, cc_stderr, rc = self.compile(
source_file, object_file, flags=flags, cwd=cwd)
if rc != 0:
return cc_cmd, cc_stdout, cc_stderr, rc
link_cmd, link_stdout, link_stderr, rc = self.link(
object_file, out=out, flags=flags, cwd=cwd)
return (cc_cmd + ['&&'] + link_cmd, cc_stdout + link_stdout,
cc_stderr + link_stderr, rc)
def dumpVersion(self, flags=[], cwd=None):
dumpversion_cpp = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "dumpversion.cpp")
with_fn = lambda: libcxx.util.guardedTempFilename(suffix=".exe")
with with_fn() as exe:
cmd, out, err, rc = self.compileLink([dumpversion_cpp], out=exe,
flags=flags, cwd=cwd)
if rc != 0:
return ("unknown", (0, 0, 0), "c++03", False)
out, err, rc = libcxx.util.executeCommand(exe, env=self.compile_env,
cwd=cwd)
version = None
try:
version = eval(out)
except:
pass
if not (isinstance(version, tuple) and 4 == len(version)):
version = ("unknown", (0, 0, 0), "c++03", False)
return version
def dumpMacros(self, source_files=None, flags=[], cwd=None):
if source_files is None:
source_files = os.devnull
flags = ['-dM'] + flags
cmd, out, err, rc = self.preprocess(source_files, flags=flags, cwd=cwd)
if rc != 0:
flags = ['-Xcompiler'] + flags
cmd, out, err, rc = self.preprocess(source_files, flags=flags, cwd=cwd)
if rc != 0:
return cmd, out, err, rc
parsed_macros = {}
lines = [l.strip() for l in out.split('\n') if l.strip()]
for l in lines:
# NVHPC also outputs the file contents from -E -dM for some reason; handle that
if not l.startswith('#define '):
if '__NVCOMPILER' not in parsed_macros.keys():
assert False, "a line not starting with '#define' encountered in predefined macro dump"
else:
continue
l = l[len('#define '):]
macro, _, value = l.partition(' ')
parsed_macros[macro] = value
return parsed_macros
def getTriple(self):
if self.type == "msvc":
return "x86_64-pc-windows-msvc"
cmd = [self.path] + self.flags + ['-dumpmachine']
return libcxx.util.capture(cmd).strip()
def hasCompileFlag(self, flag):
if isinstance(flag, list):
flags = list(flag)
else:
flags = [flag]
# Add -Werror to ensure that an unrecognized flag causes a non-zero
# exit code. -Werror is supported on all known non-nvcc compiler types.
if self.type is not None and self.type != 'nvcc' and self.type != 'msvc':
flags += ['-Werror', '-fsyntax-only']
empty_cpp = os.path.join(os.path.dirname(os.path.abspath(__file__)), "empty.cpp")
cmd, out, err, rc = self.compile(empty_cpp, out=os.devnull,
flags=flags)
if out.find('flag is not supported with the configured host compiler') != -1:
return False
if err.find('flag is not supported with the configured host compiler') != -1:
return False
return rc == 0
def addFlagIfSupported(self, flag):
if isinstance(flag, list):
flags = list(flag)
else:
flags = [flag]
if self.hasCompileFlag(flags):
self.flags += flags
return True
else:
return False
def addCompileFlagIfSupported(self, flag):
if isinstance(flag, list):
flags = list(flag)
else:
flags = [flag]
if self.hasCompileFlag(flags):
self.compile_flags += flags
return True
else:
return False
def hasWarningFlag(self, flag):
"""
hasWarningFlag - Test if the compiler supports a given warning flag.
Unlike addCompileFlagIfSupported, this function detects when
"-Wno-<warning>" flags are unsupported. If flag is a
"-Wno-<warning>" GCC will not emit an unknown option diagnostic unless
another error is triggered during compilation.
"""
assert isinstance(flag, str)
assert flag.startswith('-W')
if not flag.startswith('-Wno-'):
return self.hasCompileFlag(flag)
flags = ['-Werror', flag]
old_use_warnings = self.use_warnings
self.useWarnings(False)
cmd = self.compileCmd('-', os.devnull, flags)
self.useWarnings(old_use_warnings)
# Remove '-v' because it will cause the command line invocation
# to be printed as part of the error output.
# TODO(EricWF): Are there other flags we need to worry about?
if '-v' in cmd:
cmd.remove('-v')
out, err, rc = libcxx.util.executeCommand(
cmd, input=libcxx.util.to_bytes('#error\n'))
assert rc != 0
if flag in err:
return False
return True
def addWarningFlagIfSupported(self, flag):
if self.hasWarningFlag(flag):
if flag not in self.warning_flags:
self.warning_flags += [flag]
return True
return False
| libcudacxx-main | libcxx/utils/libcxx/compiler.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
from contextlib import contextmanager
import errno
import os
import platform
import signal
import subprocess
import sys
import tempfile
import threading
# FIXME: Most of these functions are cribbed from LIT
def to_bytes(str):
# Encode to UTF-8 to get binary data.
if isinstance(str, bytes):
return str
return str.encode('utf-8')
def to_string(bytes):
if isinstance(bytes, str):
return bytes
return to_bytes(bytes)
def convert_string(bytes):
try:
return to_string(bytes.decode('utf-8'))
except AttributeError: # 'str' object has no attribute 'decode'.
return str(bytes)
except UnicodeError:
return str(bytes)
def cleanFile(filename):
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def guardedTempFilename(suffix='', prefix='', dir=None):
# Creates and yeilds a temporary filename within a with statement. The file
# is removed upon scope exit.
handle, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir)
os.close(handle)
yield name
cleanFile(name)
@contextmanager
def guardedFilename(name):
# yeilds a filename within a with statement. The file is removed upon scope
# exit.
yield name
cleanFile(name)
@contextmanager
def nullContext(value):
# yeilds a variable within a with statement. No action is taken upon scope
# exit.
yield value
def makeReport(cmd, out, err, rc):
report = "Command: %s\n" % cmd
report += "Exit Code: %d\n" % rc
if out:
report += "Standard Output:\n--\n%s--\n" % out
if err:
report += "Standard Error:\n--\n%s--\n" % err
report += '\n'
return report
def capture(args, env=None):
"""capture(command) - Run the given command (or argv list) in a shell and
return the standard output. Raises a CalledProcessError if the command
exits with a non-zero status."""
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
out = convert_string(out)
err = convert_string(err)
if p.returncode != 0:
raise subprocess.CalledProcessError(cmd=args,
returncode=p.returncode,
output="{}\n{}".format(out, err))
return out
def which(command, paths = None):
"""which(command, [paths]) - Look up the given command in the paths string
(or the PATH environment variable, if unspecified)."""
if paths is None:
paths = os.environ.get('PATH','')
# Check for absolute match first.
if os.path.isfile(command):
return command
# Would be nice if Python had a lib function for this.
if not paths:
paths = os.defpath
# Get suffixes to search.
# On Cygwin, 'PATHEXT' may exist but it should not be used.
if os.pathsep == ';':
pathext = os.environ.get('PATHEXT', '').split(';')
else:
pathext = ['']
# Search the paths...
for path in paths.split(os.pathsep):
for ext in pathext:
p = os.path.join(path, command + ext)
if os.path.exists(p) and not os.path.isdir(p):
return p
return None
def checkToolsPath(dir, tools):
for tool in tools:
if not os.path.exists(os.path.join(dir, tool)):
return False
return True
def whichTools(tools, paths):
for path in paths.split(os.pathsep):
if checkToolsPath(path, tools):
return path
return None
def mkdir_p(path):
"""mkdir_p(path) - Make the "path" directory, if it does not exist; this
will also make directories for any missing parent directories."""
if not path or os.path.exists(path):
return
parent = os.path.dirname(path)
if parent != path:
mkdir_p(parent)
try:
os.mkdir(path)
except OSError:
e = sys.exc_info()[1]
# Ignore EEXIST, which may occur during a race condition.
if e.errno != errno.EEXIST:
raise
class ExecuteCommandTimeoutException(Exception):
def __init__(self, msg, out, err, exitCode):
assert isinstance(msg, str)
assert isinstance(out, str)
assert isinstance(err, str)
assert isinstance(exitCode, int)
self.msg = msg
self.out = out
self.err = err
self.exitCode = exitCode
# Close extra file handles on UNIX (on Windows this cannot be done while
# also redirecting input).
kUseCloseFDs = not (platform.system() == 'Windows')
def executeCommand(command, cwd=None, env=None, input=None, timeout=0):
"""
Execute command ``command`` (list of arguments or string)
with
* working directory ``cwd`` (str), use None to use the current
working directory
* environment ``env`` (dict), use None for none
* Input to the command ``input`` (str), use string to pass
no input.
* Max execution time ``timeout`` (int) seconds. Use 0 for no timeout.
Returns a tuple (out, err, exitCode) where
* ``out`` (str) is the standard output of running the command
* ``err`` (str) is the standard error of running the command
* ``exitCode`` (int) is the exitCode of running the command
If the timeout is hit an ``ExecuteCommandTimeoutException``
is raised.
"""
if input is not None:
input = to_bytes(input)
p = subprocess.Popen(command, cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env, close_fds=kUseCloseFDs)
timerObject = None
# FIXME: Because of the way nested function scopes work in Python 2.x we
# need to use a reference to a mutable object rather than a plain
# bool. In Python 3 we could use the "nonlocal" keyword but we need
# to support Python 2 as well.
hitTimeOut = [False]
try:
if timeout > 0:
def killProcess():
# We may be invoking a shell so we need to kill the
# process and all its children.
hitTimeOut[0] = True
killProcessAndChildren(p.pid)
timerObject = threading.Timer(timeout, killProcess)
timerObject.start()
out,err = p.communicate(input=input)
exitCode = p.wait()
finally:
if timerObject != None:
timerObject.cancel()
# Ensure the resulting output is always of string type.
out = convert_string(out)
err = convert_string(err)
if hitTimeOut[0]:
raise ExecuteCommandTimeoutException(
msg='Reached timeout of {} seconds'.format(timeout),
out=out,
err=err,
exitCode=exitCode
)
# Detect Ctrl-C in subprocess.
if exitCode == -signal.SIGINT:
raise KeyboardInterrupt
return out, err, exitCode
def killProcessAndChildren(pid):
"""
This function kills a process with ``pid`` and all its
running children (recursively). It is currently implemented
using the psutil module which provides a simple platform
neutral implementation.
TODO: Reimplement this without using psutil so we can
remove our dependency on it.
"""
if platform.system() == 'AIX':
subprocess.call('kill -kill $(ps -o pid= -L{})'.format(pid), shell=True)
else:
import psutil
try:
psutilProc = psutil.Process(pid)
# Handle the different psutil API versions
try:
# psutil >= 2.x
children_iterator = psutilProc.children(recursive=True)
except AttributeError:
# psutil 1.x
children_iterator = psutilProc.get_children(recursive=True)
for child in children_iterator:
try:
child.kill()
except psutil.NoSuchProcess:
pass
psutilProc.kill()
except psutil.NoSuchProcess:
pass
def executeCommandVerbose(cmd, *args, **kwargs):
"""
Execute a command and print its output on failure.
"""
out, err, exitCode = executeCommand(cmd, *args, **kwargs)
if exitCode != 0:
report = makeReport(cmd, out, err, exitCode)
report += "\n\nFailed!"
sys.stderr.write('%s\n' % report)
return out, err, exitCode
| libcudacxx-main | libcxx/utils/libcxx/util.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""libcxx python utilities"""
__author__ = 'Eric Fiselier'
__email__ = '[email protected]'
__versioninfo__ = (0, 1, 0)
__version__ = ' '.join(str(v) for v in __versioninfo__) + 'dev'
__all__ = []
| libcudacxx-main | libcxx/utils/libcxx/__init__.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import os
import inspect
def trace_function(function, log_calls, log_results, label=''):
def wrapper(*args, **kwargs):
kwarg_strs = ['{}={}'.format(k, v) for (k, v) in kwargs]
arg_str = ', '.join([str(a) for a in args] + kwarg_strs)
call_str = '{}({})'.format(function.func_name, arg_str)
# Perform the call itself, logging before, after, and anything thrown.
try:
if log_calls:
print('{}: Calling {}'.format(label, call_str))
res = function(*args, **kwargs)
if log_results:
print('{}: {} -> {}'.format(label, call_str, res))
return res
except Exception as ex:
if log_results:
print('{}: {} raised {}'.format(label, call_str, type(ex)))
raise ex
return wrapper
def trace_object(obj, log_calls, log_results, label=''):
for name, member in inspect.getmembers(obj):
if inspect.ismethod(member):
# Skip meta-functions, decorate everything else
if not member.func_name.startswith('__'):
setattr(obj, name, trace_function(member, log_calls,
log_results, label))
return obj
| libcudacxx-main | libcxx/utils/libcxx/test/tracing.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import locale
import os
import platform
import pkgutil
import pipes
import re
import shlex
import shutil
import sys
from libcxx.compiler import CXXCompiler
from libcxx.test.target_info import make_target_info
from libcxx.test.executor import *
from libcxx.test.tracing import *
import libcxx.util
def loadSiteConfig(lit_config, config, param_name, env_name):
# We haven't loaded the site specific configuration (the user is
# probably trying to run on a test file directly, and either the site
# configuration hasn't been created by the build system, or we are in an
# out-of-tree build situation).
site_cfg = lit_config.params.get(param_name,
os.environ.get(env_name))
if not site_cfg:
lit_config.warning('No site specific configuration file found!'
' Running the tests in the default configuration.')
elif not os.path.isfile(site_cfg):
lit_config.fatal(
"Specified site configuration file does not exist: '%s'" %
site_cfg)
else:
lit_config.note('using site specific configuration at %s' % site_cfg)
ld_fn = lit_config.load_config
# Null out the load_config function so that lit.site.cfg doesn't
# recursively load a config even if it tries.
# TODO: This is one hell of a hack. Fix it.
def prevent_reload_fn(*args, **kwargs):
pass
lit_config.load_config = prevent_reload_fn
ld_fn(config, site_cfg)
lit_config.load_config = ld_fn
# Extract the value of a numeric macro such as __cplusplus or a feature-test
# macro.
def intMacroValue(token):
return int(token.rstrip('LlUu'))
class Configuration(object):
# pylint: disable=redefined-outer-name
def __init__(self, lit_config, config):
self.lit_config = lit_config
self.config = config
self.is_windows = platform.system() == 'Windows'
self.cxx = None
self.cxx_is_clang_cl = None
self.cxx_stdlib_under_test = None
self.project_obj_root = None
self.libcxx_src_root = None
self.libcxx_obj_root = None
self.cxx_library_root = None
self.cxx_runtime_root = None
self.abi_library_root = None
self.link_shared = self.get_lit_bool('enable_shared', default=True)
self.debug_build = self.get_lit_bool('debug_build', default=False)
self.exec_env = dict(os.environ)
self.use_target = False
self.use_system_cxx_lib = False
self.use_clang_verify = False
self.long_tests = None
self.execute_external = False
def get_lit_conf(self, name, default=None):
val = self.lit_config.params.get(name, None)
if val is None:
val = getattr(self.config, name, None)
if val is None:
val = default
return val
def get_lit_bool(self, name, default=None, env_var=None):
def check_value(value, var_name):
if value is None:
return default
if isinstance(value, bool):
return value
if not isinstance(value, str):
raise TypeError('expected bool or string')
if value.lower() in ('1', 'true'):
return True
if value.lower() in ('', '0', 'false'):
return False
self.lit_config.fatal(
"parameter '{}' should be true or false".format(var_name))
conf_val = self.get_lit_conf(name)
if env_var is not None and env_var in os.environ and \
os.environ[env_var] is not None:
val = os.environ[env_var]
if conf_val is not None:
self.lit_config.warning(
'Environment variable %s=%s is overriding explicit '
'--param=%s=%s' % (env_var, val, name, conf_val))
return check_value(val, env_var)
return check_value(conf_val, name)
def get_modules_enabled(self):
return self.get_lit_bool('enable_modules',
default=False,
env_var='LIBCXX_ENABLE_MODULES')
def make_static_lib_name(self, name):
"""Return the full filename for the specified library name"""
if self.is_windows:
assert name == 'c++' # Only allow libc++ to use this function for now.
return 'lib' + name + '.lib'
else:
return 'lib' + name + '.a'
def configure(self):
self.configure_executor()
self.configure_use_system_cxx_lib()
self.configure_target_info()
self.configure_cxx()
self.configure_triple()
self.configure_deployment()
self.configure_src_root()
self.configure_obj_root()
self.configure_cxx_stdlib_under_test()
self.configure_cxx_library_root()
self.configure_use_clang_verify()
self.configure_use_thread_safety()
self.configure_no_execute()
self.configure_execute_external()
self.configure_ccache()
self.configure_compile_flags()
self.configure_filesystem_compile_flags()
self.configure_link_flags()
self.configure_env()
self.configure_color_diagnostics()
self.configure_debug_mode()
self.configure_warnings()
self.configure_sanitizer()
self.configure_coverage()
self.configure_modules()
self.configure_coroutines()
self.configure_substitutions()
self.configure_features()
def print_config_info(self):
# Print the final compile and link flags.
self.lit_config.note('Using compiler: %s' % self.cxx.path)
self.lit_config.note('Using flags: %s' % self.cxx.flags)
if self.cxx.use_modules:
self.lit_config.note('Using modules flags: %s' %
self.cxx.modules_flags)
self.lit_config.note('Using compile flags: %s'
% self.cxx.compile_flags)
if len(self.cxx.warning_flags):
self.lit_config.note('Using warnings: %s' % self.cxx.warning_flags)
self.lit_config.note('Using link flags: %s' % self.cxx.link_flags)
# Print as list to prevent "set([...])" from being printed.
self.lit_config.note('Using available_features: %s' %
list(self.config.available_features))
show_env_vars = {}
for k,v in self.exec_env.items():
if k not in os.environ or os.environ[k] != v:
show_env_vars[k] = v
self.lit_config.note('Adding environment variables: %r' % show_env_vars)
sys.stderr.flush() # Force flushing to avoid broken output on Windows
def get_test_format(self):
from libcxx.test.format import LibcxxTestFormat
return LibcxxTestFormat(
self.cxx,
self.use_clang_verify,
self.execute_external,
self.executor,
exec_env=self.exec_env)
def configure_executor(self):
exec_str = self.get_lit_conf('executor', "None")
te = eval(exec_str)
if te:
self.lit_config.note("Using executor: %r" % exec_str)
if self.lit_config.useValgrind:
# We have no way of knowing where in the chain the
# ValgrindExecutor is supposed to go. It is likely
# that the user wants it at the end, but we have no
# way of getting at that easily.
selt.lit_config.fatal("Cannot infer how to create a Valgrind "
" executor.")
else:
te = LocalExecutor()
if self.lit_config.useValgrind:
te = ValgrindExecutor(self.lit_config.valgrindArgs, te)
self.executor = te
def configure_target_info(self):
self.target_info = make_target_info(self)
def configure_cxx(self):
# Gather various compiler parameters.
cxx = self.get_lit_conf('cxx_under_test')
cxx_first_arg = self.get_lit_conf('cxx_first_arg')
self.cxx_is_clang_cl = cxx is not None and \
os.path.basename(cxx) == 'clang-cl.exe'
# If no specific cxx_under_test was given, attempt to infer it as
# clang++.
if cxx is None or self.cxx_is_clang_cl:
search_paths = self.config.environment['PATH']
if cxx is not None and os.path.isabs(cxx):
search_paths = os.path.dirname(cxx)
clangxx = libcxx.util.which('clang++', search_paths)
if clangxx:
cxx = clangxx
self.lit_config.note(
"inferred cxx_under_test as: %r" % cxx)
elif self.cxx_is_clang_cl:
self.lit_config.fatal('Failed to find clang++ substitution for'
' clang-cl')
if not cxx:
self.lit_config.fatal('must specify user parameter cxx_under_test '
'(e.g., --param=cxx_under_test=clang++)')
self.cxx = CXXCompiler(cxx, cxx_first_arg) if not self.cxx_is_clang_cl else \
self._configure_clang_cl(cxx)
cxx_type = self.cxx.type
if cxx_type is not None:
assert self.cxx.version is not None
maj_v, min_v, patch_v = self.cxx.version
self.config.available_features.add(cxx_type)
self.config.available_features.add('%s-%s' % (cxx_type, maj_v))
self.config.available_features.add('%s-%s.%s' % (
cxx_type, maj_v, min_v))
self.config.available_features.add('%s-%s.%s.%s' % (
cxx_type, maj_v, min_v, patch_v))
self.lit_config.note("detected cxx.type as: {}".format(
self.cxx.type))
self.lit_config.note("detected cxx.version as: {}".format(
self.cxx.version))
self.lit_config.note("detected cxx.default_dialect as: {}".format(
self.cxx.default_dialect))
self.lit_config.note("detected cxx.is_nvrtc as: {}".format(
self.cxx.is_nvrtc))
self.cxx.compile_env = dict(os.environ)
# 'CCACHE_CPP2' prevents ccache from stripping comments while
# preprocessing. This is required to prevent stripping of '-verify'
# comments.
self.cxx.compile_env['CCACHE_CPP2'] = '1'
if self.cxx.type == 'nvcc' and not self.cxx.is_nvrtc:
nvcc_host_compiler = self.get_lit_conf('nvcc_host_compiler')
if len(nvcc_host_compiler.strip()) == 0:
if platform.system() == 'Darwin':
nvcc_host_compiler = 'clang'
elif platform.system() == 'Windows':
nvcc_host_compiler = 'cl.exe'
else:
nvcc_host_compiler = 'gcc'
self.host_cxx = CXXCompiler(nvcc_host_compiler, None)
self.host_cxx_type = self.host_cxx.type
if self.host_cxx_type is not None:
assert self.host_cxx.version is not None
maj_v, min_v, _ = self.host_cxx.version
self.config.available_features.add(self.host_cxx_type)
self.config.available_features.add('%s-%s' % (
self.host_cxx_type, maj_v))
self.config.available_features.add('%s-%s.%s' % (
self.host_cxx_type, maj_v, min_v))
self.lit_config.note("detected host_cxx.type as: {}".format(
self.host_cxx.type))
self.lit_config.note("detected host_cxx.version as: {}".format(
self.host_cxx.version))
self.lit_config.note("detected host_cxx.default_dialect as: {}".format(
self.host_cxx.default_dialect))
self.lit_config.note("detected host_cxx.is_nvrtc as: {}".format(
self.host_cxx.is_nvrtc))
if 'icc' in self.config.available_features:
self.cxx.link_flags += ['-lirc']
def _configure_clang_cl(self, clang_path):
def _split_env_var(var):
return [p.strip() for p in os.environ.get(var, '').split(';') if p.strip()]
def _prefixed_env_list(var, prefix):
from itertools import chain
return list(chain.from_iterable((prefix, path) for path in _split_env_var(var)))
assert self.cxx_is_clang_cl
flags = []
compile_flags = _prefixed_env_list('INCLUDE', '-isystem')
link_flags = _prefixed_env_list('LIB', '-L')
for path in _split_env_var('LIB'):
self.add_path(self.exec_env, path)
return CXXCompiler(clang_path, flags=flags,
compile_flags=compile_flags,
link_flags=link_flags)
def _dump_macros_verbose(self, *args, **kwargs):
macros_or_error = self.cxx.dumpMacros(*args, **kwargs)
if isinstance(macros_or_error, tuple):
cmd, out, err, rc = macros_or_error
report = libcxx.util.makeReport(cmd, out, err, rc)
report += "Compiler failed unexpectedly when dumping macros!"
self.lit_config.fatal(report)
return None
assert isinstance(macros_or_error, dict)
return macros_or_error
def configure_src_root(self):
self.libcxx_src_root = self.get_lit_conf(
'libcxx_src_root', os.path.dirname(self.config.test_source_root))
def configure_obj_root(self):
self.project_obj_root = self.get_lit_conf('project_obj_root')
self.libcxx_obj_root = self.get_lit_conf('libcxx_obj_root')
if not self.libcxx_obj_root and self.project_obj_root is not None:
possible_roots = [
os.path.join(self.project_obj_root, 'libcxx'),
os.path.join(self.project_obj_root, 'projects', 'libcxx'),
os.path.join(self.project_obj_root, 'runtimes', 'libcxx'),
]
for possible_root in possible_roots:
if os.path.isdir(possible_root):
self.libcxx_obj_root = possible_root
break
else:
self.libcxx_obj_root = self.project_obj_root
def configure_cxx_library_root(self):
self.cxx_library_root = self.get_lit_conf('cxx_library_root',
self.libcxx_obj_root)
self.cxx_runtime_root = self.get_lit_conf('cxx_runtime_root',
self.cxx_library_root)
def configure_use_system_cxx_lib(self):
# This test suite supports testing against either the system library or
# the locally built one; the former mode is useful for testing ABI
# compatibility between the current headers and a shipping dynamic
# library.
# Default to testing against the locally built libc++ library.
self.use_system_cxx_lib = self.get_lit_conf('use_system_cxx_lib')
if self.use_system_cxx_lib == 'true':
self.use_system_cxx_lib = True
elif self.use_system_cxx_lib == 'false':
self.use_system_cxx_lib = False
elif self.use_system_cxx_lib:
assert os.path.isdir(self.use_system_cxx_lib), "the specified use_system_cxx_lib parameter (%s) is not a valid directory" % self.use_system_cxx_lib
self.use_system_cxx_lib = os.path.abspath(self.use_system_cxx_lib)
self.lit_config.note(
"inferred use_system_cxx_lib as: %r" % self.use_system_cxx_lib)
def configure_cxx_stdlib_under_test(self):
self.cxx_stdlib_under_test = self.get_lit_conf(
'cxx_stdlib_under_test', 'libc++')
if self.cxx_stdlib_under_test not in \
['libc++', 'libstdc++', 'msvc', 'cxx_default']:
self.lit_config.fatal(
'unsupported value for "cxx_stdlib_under_test": %s'
% self.cxx_stdlib_under_test)
self.config.available_features.add(self.cxx_stdlib_under_test)
if self.cxx_stdlib_under_test == 'libstdc++':
self.config.available_features.add('libstdc++')
# Manually enable the experimental and filesystem tests for libstdc++
# if the options aren't present.
# FIXME this is a hack.
if self.get_lit_conf('enable_experimental') is None:
self.config.enable_experimental = 'true'
def configure_use_clang_verify(self):
'''If set, run clang with -verify on failing tests.'''
self.use_clang_verify = self.get_lit_bool('use_clang_verify')
if self.use_clang_verify is None:
# NOTE: We do not test for the -verify flag directly because
# -verify will always exit with non-zero on an empty file.
self.use_clang_verify = self.cxx.isVerifySupported()
self.lit_config.note(
"inferred use_clang_verify as: %r" % self.use_clang_verify)
if self.use_clang_verify:
self.config.available_features.add('verify-support')
def configure_use_thread_safety(self):
'''If set, run clang with -verify on failing tests.'''
has_thread_safety = self.cxx.hasCompileFlag('-Werror=thread-safety')
if has_thread_safety:
self.cxx.compile_flags += ['-Werror=thread-safety']
self.config.available_features.add('thread-safety')
self.lit_config.note("enabling thread-safety annotations")
def configure_execute_external(self):
# Choose between lit's internal shell pipeline runner and a real shell.
# If LIT_USE_INTERNAL_SHELL is in the environment, we use that as the
# default value. Otherwise we ask the target_info.
use_lit_shell_default = os.environ.get('LIT_USE_INTERNAL_SHELL')
if use_lit_shell_default is not None:
use_lit_shell_default = use_lit_shell_default != '0'
else:
use_lit_shell_default = self.target_info.use_lit_shell_default()
# Check for the command line parameter using the default value if it is
# not present.
use_lit_shell = self.get_lit_bool('use_lit_shell',
use_lit_shell_default)
self.execute_external = not use_lit_shell
def configure_no_execute(self):
if type(self.executor) == NoopExecutor:
self.config.available_features.add('no_execute')
def configure_ccache(self):
use_ccache_default = os.environ.get('LIBCXX_USE_CCACHE') is not None
use_ccache = self.get_lit_bool('use_ccache', use_ccache_default)
if use_ccache:
self.cxx.use_ccache = True
self.lit_config.note('enabling ccache')
def add_deployment_feature(self, feature):
(arch, name, version) = self.config.deployment
self.config.available_features.add('%s=%s-%s' % (feature, arch, name))
self.config.available_features.add('%s=%s' % (feature, name))
self.config.available_features.add('%s=%s%s' % (feature, name, version))
def configure_features(self):
additional_features = self.get_lit_conf('additional_features')
if additional_features:
for f in additional_features.split(','):
self.config.available_features.add(f.strip())
self.target_info.add_locale_features(self.config.available_features)
target_platform = self.target_info.platform()
# Write an "available feature" that combines the triple when
# use_system_cxx_lib is enabled. This is so that we can easily write
# XFAIL markers for tests that are known to fail with versions of
# libc++ as were shipped with a particular triple.
if self.use_system_cxx_lib:
self.config.available_features.add('with_system_cxx_lib')
self.config.available_features.add(
'with_system_cxx_lib=%s' % self.config.target_triple)
# Add subcomponents individually.
target_components = self.config.target_triple.split('-')
for component in target_components:
self.config.available_features.add(
'with_system_cxx_lib=%s' % component)
# Add available features for more generic versions of the target
# triple attached to with_system_cxx_lib.
if self.use_deployment:
self.add_deployment_feature('with_system_cxx_lib')
# Configure the availability feature. Availability is only enabled
# with libc++, because other standard libraries do not provide
# availability markup.
if self.use_deployment and self.cxx_stdlib_under_test == 'libc++':
self.config.available_features.add('availability')
self.add_deployment_feature('availability')
if platform.system() == 'Darwin':
self.config.available_features.add('apple-darwin')
# Insert the platform name into the available features as a lower case.
self.config.available_features.add(target_platform)
# Simulator testing can take a really long time for some of these tests
# so add a feature check so we can REQUIRES: long_tests in them
self.long_tests = self.get_lit_bool('long_tests')
if self.long_tests is None:
# Default to running long tests.
self.long_tests = True
self.lit_config.note(
"inferred long_tests as: %r" % self.long_tests)
if self.long_tests:
self.config.available_features.add('long_tests')
if not self.get_lit_bool('enable_filesystem', default=True):
self.config.available_features.add('c++filesystem-disabled')
self.config.available_features.add('dylib-has-no-filesystem')
# Run a compile test for the -fsized-deallocation flag. This is needed
# in test/std/language.support/support.dynamic/new.delete
if self.cxx.hasCompileFlag('-fsized-deallocation'):
self.config.available_features.add('-fsized-deallocation')
if self.cxx.hasCompileFlag('-faligned-allocation'):
self.config.available_features.add('-faligned-allocation')
else:
# FIXME remove this once more than just clang-4.0 support
# C++17 aligned allocation.
self.config.available_features.add('no-aligned-allocation')
if self.cxx.hasCompileFlag('-fdelayed-template-parsing'):
self.config.available_features.add('fdelayed-template-parsing')
if self.get_lit_bool('has_libatomic', False):
self.config.available_features.add('libatomic')
if 'msvc' not in self.config.available_features:
macros = self._dump_macros_verbose()
if '__cpp_if_constexpr' not in macros:
self.config.available_features.add('libcpp-no-if-constexpr')
if '__cpp_structured_bindings' not in macros:
self.config.available_features.add('libcpp-no-structured-bindings')
if '__cpp_deduction_guides' not in macros or \
intMacroValue(macros['__cpp_deduction_guides']) < 201611:
self.config.available_features.add('libcpp-no-deduction-guides')
if self.is_windows:
self.config.available_features.add('windows')
if self.cxx_stdlib_under_test == 'libc++':
# LIBCXX-WINDOWS-FIXME is the feature name used to XFAIL the
# initial Windows failures until they can be properly diagnosed
# and fixed. This allows easier detection of new test failures
# and regressions. Note: New failures should not be suppressed
# using this feature. (Also see llvm.org/PR32730)
self.config.available_features.add('LIBCXX-WINDOWS-FIXME')
if 'msvc' not in self.config.available_features:
# Attempt to detect the glibc version by querying for __GLIBC__
# in 'features.h'.
macros = self.cxx.dumpMacros(flags=['-include', 'features.h'])
if isinstance(macros, dict) and '__GLIBC__' in macros:
maj_v, min_v = (macros['__GLIBC__'], macros['__GLIBC_MINOR__'])
self.config.available_features.add('glibc')
self.config.available_features.add('glibc-%s' % maj_v)
self.config.available_features.add('glibc-%s.%s' % (maj_v, min_v))
libcxx_gdb = self.get_lit_conf('libcxx_gdb')
if libcxx_gdb and 'NOTFOUND' not in libcxx_gdb:
self.config.available_features.add('libcxx_gdb')
self.cxx.libcxx_gdb = libcxx_gdb
# Support Objective-C++ only on MacOS and if the compiler supports it.
if self.target_info.platform() == "darwin" and \
self.target_info.is_host_macosx() and \
self.cxx.hasCompileFlag(["-x", "objective-c++", "-fobjc-arc"]):
self.config.available_features.add("objective-c++")
def configure_compile_flags(self):
self.configure_default_compile_flags()
# Configure extra flags
compile_flags_str = self.get_lit_conf('compile_flags', '')
self.cxx.compile_flags += shlex.split(compile_flags_str)
if self.is_windows:
# FIXME: Can we remove this?
self.cxx.compile_flags += ['-D_CRT_SECURE_NO_WARNINGS']
# Required so that tests using min/max don't fail on Windows,
# and so that those tests don't have to be changed to tolerate
# this insanity.
self.cxx.compile_flags += ['-DNOMINMAX']
if 'msvc' in self.config.available_features:
if self.cxx.type == 'nvcc':
self.cxx.compile_flags += ['-Xcompiler']
self.cxx.compile_flags += ['/bigobj']
additional_flags = self.get_lit_conf('test_compiler_flags')
if additional_flags:
self.cxx.compile_flags += shlex.split(additional_flags)
compute_archs = self.get_lit_conf('compute_archs')
if self.cxx.is_nvrtc is True:
self.config.available_features.add("nvrtc")
if self.cxx.type == 'nvcc':
self.cxx.compile_flags += ['--extended-lambda']
if compute_archs and self.cxx.type == 'nvcc':
pre_sm_32 = False
pre_sm_60 = False
pre_sm_70 = False
pre_sm_90 = False
compute_archs = [int(a) for a in sorted(shlex.split(compute_archs))]
for arch in compute_archs:
if arch < 32: pre_sm_32 = True
if arch < 60: pre_sm_60 = True
if arch < 70: pre_sm_70 = True
if arch < 90: pre_sm_90 = True
arch_flag = '-gencode=arch=compute_{0},code=sm_{0}'.format(arch)
self.cxx.compile_flags += [arch_flag]
enable_compute_future = self.get_lit_conf('enable_compute_future')
if enable_compute_future:
arch_flag = '-gencode=arch=compute_{0},code=compute_{0}'.format(arch)
self.cxx.compile_flags += [arch_flag]
if pre_sm_32:
self.config.available_features.add("pre-sm-32")
if pre_sm_60:
self.config.available_features.add("pre-sm-60")
if pre_sm_70:
self.config.available_features.add("pre-sm-70")
if pre_sm_90:
self.config.available_features.add("pre-sm-90")
def configure_default_compile_flags(self):
nvcc_host_compiler = self.get_lit_conf('nvcc_host_compiler')
if nvcc_host_compiler and self.cxx.type == 'nvcc':
self.cxx.compile_flags += ['-ccbin={0}'.format(nvcc_host_compiler)]
# Try and get the std version from the command line. Fall back to
# default given in lit.site.cfg is not present. If default is not
# present then force c++11.
std = self.get_lit_conf('std')
if not std:
# Choose the newest possible language dialect if none is given.
possible_stds = ['c++2a', 'c++17', 'c++1z', 'c++14', 'c++11',
'c++03']
if self.cxx.type == 'gcc':
maj_v, _, _ = self.cxx.version
maj_v = int(maj_v)
if maj_v < 7:
possible_stds.remove('c++1z')
possible_stds.remove('c++17')
# FIXME: How many C++14 tests actually fail under GCC 5 and 6?
# Should we XFAIL them individually instead?
if maj_v < 6:
possible_stds.remove('c++14')
for s in possible_stds:
cxx = self.cxx
success = True
if self.cxx.type == 'nvcc':
# NVCC warns, but doesn't error, if the host compiler
# doesn't support the dialect. It's also possible that the
# host compiler supports the dialect, but NVCC doesn't.
# So, first we need to check if NVCC supports the dialect...
if not self.cxx.hasCompileFlag('-std=%s' % s):
# If it doesn't, give up on this dialect.
success = False
# ... then we need to check if host compiler supports the
# dialect.
cxx = self.host_cxx
if cxx.type == 'msvc':
if not cxx.hasCompileFlag('/std:%s' % s):
success = False
else:
if not cxx.hasCompileFlag('-std=%s' % s):
success = False
if success:
std = s
self.lit_config.note('inferred language dialect as: %s' % std)
break
if std:
# We found a dialect flag.
if self.cxx.type == 'msvc':
self.cxx.compile_flags += ['/std:{0}'.format(std)]
else:
self.cxx.compile_flags += ['-std={0}'.format(std)]
if not std:
# There is no dialect flag. This happens with older MSVC.
if self.cxx.type == 'nvcc':
std = self.host_cxx.default_dialect
else:
std = self.cxx.default_dialect
self.lit_config.note('using default language dialect: %s' % std)
std_feature = std.replace('gnu++', 'c++')
std_feature = std.replace('1z', '17')
std_feature = std.replace('2a', '20')
self.config.available_features.add(std_feature)
# Configure include paths
self.configure_compile_flags_header_includes()
self.target_info.add_cxx_compile_flags(self.cxx.compile_flags)
# Configure feature flags.
self.configure_compile_flags_exceptions()
self.configure_compile_flags_rtti()
self.configure_compile_flags_abi_version()
enable_32bit = self.get_lit_bool('enable_32bit', False)
if enable_32bit:
self.cxx.flags += ['-m32']
# Use verbose output for better errors
self.cxx.flags += ['-v']
sysroot = self.get_lit_conf('sysroot')
if sysroot:
self.cxx.flags += ['--sysroot=' + sysroot]
gcc_toolchain = self.get_lit_conf('gcc_toolchain')
if gcc_toolchain:
self.cxx.flags += ['--gcc-toolchain=' + gcc_toolchain]
# NOTE: the _DEBUG definition must preceed the triple check because for
# the Windows build of libc++, the forced inclusion of a header requires
# that _DEBUG is defined. Incorrect ordering will result in -target
# being elided.
if self.is_windows and self.debug_build:
self.cxx.compile_flags += ['-D_DEBUG']
if self.use_target:
if not self.cxx.addFlagIfSupported(
['--target=' + self.config.target_triple]):
self.lit_config.warning('use_target is true but --target is '\
'not supported by the compiler')
if self.use_deployment:
arch, name, version = self.config.deployment
self.cxx.flags += ['-arch', arch]
self.cxx.flags += ['-m' + name + '-version-min=' + version]
# Add includes for support headers used in the tests.
support_path = os.path.join(self.libcxx_src_root, 'test/support')
self.cxx.compile_flags += ['-I' + support_path]
# Add includes for the PSTL headers
pstl_src_root = self.get_lit_conf('pstl_src_root')
pstl_obj_root = self.get_lit_conf('pstl_obj_root')
if pstl_src_root is not None and pstl_obj_root is not None:
self.cxx.compile_flags += ['-I' + os.path.join(pstl_src_root, 'include')]
self.cxx.compile_flags += ['-I' + os.path.join(pstl_obj_root, 'generated_headers')]
self.cxx.compile_flags += ['-I' + os.path.join(pstl_src_root, 'test')]
self.config.available_features.add('parallel-algorithms')
# FIXME(EricWF): variant_size.pass.cpp requires a slightly larger
# template depth with older Clang versions.
self.cxx.addFlagIfSupported('-ftemplate-depth=270')
def configure_compile_flags_header_includes(self):
support_path = os.path.join(self.libcxx_src_root, 'test', 'support')
self.configure_config_site_header()
if self.cxx_stdlib_under_test != 'libstdc++' and \
not self.is_windows:
self.cxx.compile_flags += [
'-include', os.path.join(support_path, 'nasty_macros.h')]
if self.cxx_stdlib_under_test == 'msvc':
self.cxx.compile_flags += [
'-include', os.path.join(support_path,
'msvc_stdlib_force_include.h')]
pass
if self.is_windows and self.debug_build and \
self.cxx_stdlib_under_test != 'msvc':
self.cxx.compile_flags += [
'-include', os.path.join(support_path,
'set_windows_crt_report_mode.h')
]
cxx_headers = self.get_lit_conf('cxx_headers')
if cxx_headers == '' or (cxx_headers is None
and self.cxx_stdlib_under_test != 'libc++'):
self.lit_config.note('using the system cxx headers')
return
if self.cxx.type != 'nvcc' and self.cxx.type != 'nvhpc':
self.cxx.compile_flags += ['-nostdinc++']
if cxx_headers is None:
cxx_headers = os.path.join(self.libcxx_src_root, 'include')
if not os.path.isdir(cxx_headers):
self.lit_config.fatal("cxx_headers='%s' is not a directory."
% cxx_headers)
self.cxx.compile_flags += ['-I' + cxx_headers]
if self.libcxx_obj_root is not None:
cxxabi_headers = os.path.join(self.libcxx_obj_root, 'include',
'c++build')
if os.path.isdir(cxxabi_headers):
self.cxx.compile_flags += ['-I' + cxxabi_headers]
def configure_config_site_header(self):
# Check for a possible __config_site in the build directory. We
# use this if it exists.
if self.libcxx_obj_root is None:
return
config_site_header = os.path.join(self.libcxx_obj_root, '__config_site')
if not os.path.isfile(config_site_header):
return
contained_macros = self.parse_config_site_and_add_features(
config_site_header)
self.lit_config.note('Using __config_site header %s with macros: %r'
% (config_site_header, contained_macros))
# FIXME: This must come after the call to
# 'parse_config_site_and_add_features(...)' in order for it to work.
self.cxx.compile_flags += ['-include', config_site_header]
def parse_config_site_and_add_features(self, header):
""" parse_config_site_and_add_features - Deduce and add the test
features that that are implied by the #define's in the __config_site
header. Return a dictionary containing the macros found in the
'__config_site' header.
"""
# MSVC can't dump macros, so we just give up.
if 'msvc' in self.config.available_features:
return {}
# Parse the macro contents of __config_site by dumping the macros
# using 'c++ -dM -E' and filtering the predefines.
predefines = self._dump_macros_verbose()
macros = self._dump_macros_verbose(header)
feature_macros_keys = set(macros.keys()) - set(predefines.keys())
feature_macros = {}
for k in feature_macros_keys:
feature_macros[k] = macros[k]
# We expect the header guard to be one of the definitions
assert '_LIBCUDACXX_CONFIG_SITE' in feature_macros
del feature_macros['_LIBCUDACXX_CONFIG_SITE']
# The __config_site header should be non-empty. Otherwise it should
# have never been emitted by CMake.
assert len(feature_macros) > 0
# FIXME: This is a hack that should be fixed using module maps.
# If modules are enabled then we have to lift all of the definitions
# in __config_site onto the command line.
for m in feature_macros:
define = '-D%s' % m
if feature_macros[m]:
define += '=%s' % (feature_macros[m])
self.cxx.modules_flags += [define]
if self.cxx.hasCompileFlag('-Wno-macro-redefined'):
self.cxx.compile_flags += ['-Wno-macro-redefined']
# Transform each macro name into the feature name used in the tests.
# Ex. _LIBCUDACXX_HAS_NO_THREADS -> libcpp-has-no-threads
for m in feature_macros:
if m == '_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS' or \
m == '_LIBCUDACXX_HIDE_FROM_ABI_PER_TU_BY_DEFAULT':
continue
if m == '_LIBCUDACXX_ABI_VERSION':
self.config.available_features.add('libcpp-abi-version-v%s'
% feature_macros[m])
continue
if m == '_LIBCUDACXX_NO_VCRUNTIME':
self.config.available_features.add('libcpp-no-vcruntime')
continue
assert m.startswith('_LIBCUDACXX_HAS_') or m.startswith('_LIBCUDACXX_ABI_')
m = m.lower()[1:].replace('_', '-')
self.config.available_features.add(m)
return feature_macros
def configure_compile_flags_exceptions(self):
enable_exceptions = self.get_lit_bool('enable_exceptions', True)
if not enable_exceptions:
self.config.available_features.add('libcpp-no-exceptions')
if 'nvhpc' in self.config.available_features:
# NVHPC reports all expressions as `noexcept(true)` with its
# "no exceptions" mode. Override the setting from CMake as
# a temporary workaround for that.
pass
# TODO: I don't know how to shut off exceptions with MSVC.
elif 'msvc' not in self.config.available_features:
if self.cxx.type == 'nvcc':
self.cxx.compile_flags += ['-Xcompiler']
self.cxx.compile_flags += ['-fno-exceptions']
def configure_compile_flags_rtti(self):
enable_rtti = self.get_lit_bool('enable_rtti', True)
if not enable_rtti:
self.config.available_features.add('libcpp-no-rtti')
if self.cxx.type == 'nvcc':
self.cxx.compile_flags += ['-Xcompiler']
if 'nvhpc' in self.config.available_features:
self.cxx.compile_flags += ['--no_rtti']
elif 'msvc' in self.config.available_features:
self.cxx.compile_flags += ['/GR-']
else:
self.cxx.compile_flags += ['-fno-rtti']
self.cxx.compile_flags += ['-D_LIBCUDACXX_NO_RTTI']
def configure_compile_flags_abi_version(self):
abi_version = self.get_lit_conf('abi_version', '').strip()
abi_unstable = self.get_lit_bool('abi_unstable')
# Only add the ABI version when it is non-default.
# FIXME(EricWF): Get the ABI version from the "__config_site".
if abi_version and abi_version != '1':
self.cxx.compile_flags += ['-D_LIBCUDACXX_ABI_VERSION=' + abi_version]
if abi_unstable:
self.config.available_features.add('libcpp-abi-unstable')
self.cxx.compile_flags += ['-D_LIBCUDACXX_ABI_UNSTABLE']
def configure_filesystem_compile_flags(self):
if not self.get_lit_bool('enable_filesystem', default=True):
return
static_env = os.path.join(self.libcxx_src_root, 'test', 'std',
'input.output', 'filesystems', 'Inputs', 'static_test_env')
static_env = os.path.realpath(static_env)
assert os.path.isdir(static_env)
self.cxx.compile_flags += ['-DLIBCXX_FILESYSTEM_STATIC_TEST_ROOT="%s"' % static_env]
dynamic_env = os.path.join(self.config.test_exec_root,
'filesystem', 'Output', 'dynamic_env')
dynamic_env = os.path.realpath(dynamic_env)
if not os.path.isdir(dynamic_env):
os.makedirs(dynamic_env)
self.cxx.compile_flags += ['-DLIBCXX_FILESYSTEM_DYNAMIC_TEST_ROOT="%s"' % dynamic_env]
self.exec_env['LIBCXX_FILESYSTEM_DYNAMIC_TEST_ROOT'] = ("%s" % dynamic_env)
dynamic_helper = os.path.join(self.libcxx_src_root, 'test', 'support',
'filesystem_dynamic_test_helper.py')
assert os.path.isfile(dynamic_helper)
self.cxx.compile_flags += ['-DLIBCXX_FILESYSTEM_DYNAMIC_TEST_HELPER="%s %s"'
% (sys.executable, dynamic_helper)]
def configure_link_flags(self):
nvcc_host_compiler = self.get_lit_conf('nvcc_host_compiler')
if nvcc_host_compiler and self.cxx.type == 'nvcc':
self.cxx.link_flags += ['-ccbin={0}'.format(nvcc_host_compiler)]
# Configure library path
self.configure_link_flags_cxx_library_path()
self.configure_link_flags_abi_library_path()
# Configure libraries
if self.cxx_stdlib_under_test == 'libc++':
if self.get_lit_conf('name') != 'libcu++':
if 'nvhpc' not in self.config.available_features or not self.cxx.is_nvrtc:
if self.cxx.type == 'nvcc':
self.cxx.link_flags += ['-Xcompiler']
if self.cxx.type != 'nvhpc':
self.cxx.link_flags += ['-nodefaultlibs']
# FIXME: Handle MSVCRT as part of the ABI library handling.
if self.is_windows and 'msvc' not in self.config.available_features:
if self.cxx.type == 'nvcc':
self.cxx.link_flags += ['-Xcompiler']
self.cxx.link_flags += ['-nostdlib']
self.configure_link_flags_cxx_library()
self.configure_link_flags_abi_library()
self.configure_extra_library_flags()
elif self.cxx_stdlib_under_test == 'libstdc++':
self.config.available_features.add('c++experimental')
self.cxx.link_flags += ['-lstdc++fs', '-lm', '-pthread']
elif self.cxx_stdlib_under_test == 'msvc':
# FIXME: Correctly setup debug/release flags here.
pass
elif self.cxx_stdlib_under_test == 'cxx_default':
self.cxx.link_flags += ['-pthread']
else:
self.lit_config.fatal('invalid stdlib under test')
link_flags_str = self.get_lit_conf('link_flags', '')
self.cxx.link_flags += shlex.split(link_flags_str)
def configure_link_flags_cxx_library_path(self):
if not self.use_system_cxx_lib:
if self.cxx_library_root:
self.cxx.link_flags += ['-L' + self.cxx_library_root]
if self.is_windows and self.link_shared:
self.add_path(self.cxx.compile_env, self.cxx_library_root)
if self.cxx_runtime_root:
if not self.is_windows:
if self.cxx.type == 'nvcc':
self.cxx.link_flags += ['-Xcompiler',
'"-Wl,-rpath,' + self.cxx_runtime_root + '"']
else:
self.cxx.link_flags += ['-Wl,-rpath,' +
self.cxx_runtime_root]
elif self.is_windows and self.link_shared:
self.add_path(self.exec_env, self.cxx_runtime_root)
elif os.path.isdir(str(self.use_system_cxx_lib)):
self.cxx.link_flags += ['-L' + self.use_system_cxx_lib]
if not self.is_windows:
if self.cxx.type == 'nvcc':
self.cxx.link_flags += ['-Xcompiler',
'"-Wl,-rpath,' + self.cxx_runtime_root + '"']
else:
self.cxx.link_flags += ['-Wl,-rpath,' +
self.use_system_cxx_lib]
if self.is_windows and self.link_shared:
self.add_path(self.cxx.compile_env, self.use_system_cxx_lib)
additional_flags = self.get_lit_conf('test_linker_flags')
if additional_flags:
self.cxx.link_flags += shlex.split(additional_flags)
def configure_link_flags_abi_library_path(self):
# Configure ABI library paths.
self.abi_library_root = self.get_lit_conf('abi_library_path')
if self.abi_library_root:
self.cxx.link_flags += ['-L' + self.abi_library_root]
if not self.is_windows:
if self.cxx.type == 'nvcc':
self.cxx.link_flags += ['-Xcompiler',
'"-Wl,-rpath,' + self.cxx_runtime_root + '"']
else:
self.cxx.link_flags += ['-Wl,-rpath,' +
self.abi_library_root]
else:
self.add_path(self.exec_env, self.abi_library_root)
def configure_link_flags_cxx_library(self):
libcxx_experimental = self.get_lit_bool('enable_experimental', default=False)
if libcxx_experimental:
self.config.available_features.add('c++experimental')
self.cxx.link_flags += ['-lc++experimental']
if self.link_shared:
self.cxx.link_flags += ['-lc++']
elif self.cxx.type != 'nvcc':
cxx_library_root = self.get_lit_conf('cxx_library_root')
if cxx_library_root:
libname = self.make_static_lib_name('c++')
abs_path = os.path.join(cxx_library_root, libname)
assert os.path.exists(abs_path) and \
"static libc++ library does not exist"
self.cxx.link_flags += [abs_path]
else:
self.cxx.link_flags += ['-lc++']
def configure_link_flags_abi_library(self):
cxx_abi = self.get_lit_conf('cxx_abi', 'libcxxabi')
if cxx_abi == 'libstdc++':
self.cxx.link_flags += ['-lstdc++']
elif cxx_abi == 'libsupc++':
self.cxx.link_flags += ['-lsupc++']
elif cxx_abi == 'libcxxabi':
# If the C++ library requires explicitly linking to libc++abi, or
# if we're testing libc++abi itself (the test configs are shared),
# then link it.
testing_libcxxabi = self.get_lit_conf('name', '') == 'libc++abi'
if self.target_info.allow_cxxabi_link() or testing_libcxxabi:
libcxxabi_shared = self.get_lit_bool('libcxxabi_shared', default=True)
if libcxxabi_shared:
self.cxx.link_flags += ['-lc++abi']
else:
cxxabi_library_root = self.get_lit_conf('abi_library_path')
if cxxabi_library_root:
libname = self.make_static_lib_name('c++abi')
abs_path = os.path.join(cxxabi_library_root, libname)
self.cxx.link_flags += [abs_path]
else:
self.cxx.link_flags += ['-lc++abi']
elif cxx_abi == 'libcxxrt':
self.cxx.link_flags += ['-lcxxrt']
elif cxx_abi == 'vcruntime':
debug_suffix = 'd' if self.debug_build else ''
self.cxx.link_flags += ['-l%s%s' % (lib, debug_suffix) for lib in
['vcruntime', 'ucrt', 'msvcrt']]
elif cxx_abi == 'none' or cxx_abi == 'default':
if self.is_windows:
debug_suffix = 'd' if self.debug_build else ''
self.cxx.link_flags += ['-lmsvcrt%s' % debug_suffix]
else:
self.lit_config.fatal(
'C++ ABI setting %s unsupported for tests' % cxx_abi)
def configure_extra_library_flags(self):
if self.get_lit_bool('cxx_ext_threads', default=False):
self.cxx.link_flags += ['-lc++external_threads']
self.target_info.add_cxx_link_flags(self.cxx.link_flags)
def configure_color_diagnostics(self):
use_color = self.get_lit_conf('color_diagnostics')
if use_color is None:
use_color = os.environ.get('LIBCXX_COLOR_DIAGNOSTICS')
if use_color is None:
return
if use_color != '':
self.lit_config.fatal('Invalid value for color_diagnostics "%s".'
% use_color)
color_flag = '-fdiagnostics-color=always'
# Check if the compiler supports the color diagnostics flag. Issue a
# warning if it does not since color diagnostics have been requested.
if not self.cxx.hasCompileFlag(color_flag):
self.lit_config.warning(
'color diagnostics have been requested but are not supported '
'by the compiler')
else:
self.cxx.flags += [color_flag]
def configure_debug_mode(self):
debug_level = self.get_lit_conf('debug_level', None)
if not debug_level:
return
if debug_level not in ['0', '1']:
self.lit_config.fatal('Invalid value for debug_level "%s".'
% debug_level)
self.cxx.compile_flags += ['-D_LIBCUDACXX_DEBUG=%s' % debug_level]
def configure_warnings(self):
default_enable_warnings = 'clang' in self.config.available_features or \
'msvc' in self.config.available_features
enable_warnings = self.get_lit_bool('enable_warnings',
default_enable_warnings)
self.cxx.useWarnings(enable_warnings)
if 'nvcc' in self.config.available_features:
self.cxx.warning_flags += [ '-Xcudafe', '--display_error_number' ]
if 'msvc' in self.config.available_features:
self.cxx.warning_flags += [ '-Xcompiler', '/W4', '-Xcompiler', '/WX' ]
# warning C4100: 'quack': unreferenced formal parameter
self.cxx.warning_flags += [ '-Xcompiler', '-wd4100' ]
# warning C4127: conditional expression is constant
self.cxx.warning_flags += [ '-Xcompiler', '-wd4127' ]
# warning C4180: qualifier applied to function type has no meaning; ignored
self.cxx.warning_flags += [ '-Xcompiler', '-wd4180' ]
# warning C4309: 'moo': truncation of constant value
self.cxx.warning_flags += [ '-Xcompiler', '-wd4309' ]
else:
# TODO: Re-enable soon.
#self.cxx.warning_flags += [ '-Xcompiler', '-Wall', '-Xcompiler', '-Werror' ]
pass
else:
self.cxx.warning_flags += [
'-D_LIBCUDACXX_DISABLE_PRAGMA_GCC_SYSTEM_HEADER',
'-Wall', '-Wextra', '-Werror'
]
if self.cxx.hasWarningFlag('-Wuser-defined-warnings'):
self.cxx.warning_flags += ['-Wuser-defined-warnings']
self.config.available_features.add('diagnose-if-support')
self.cxx.addWarningFlagIfSupported('-Wshadow')
self.cxx.addWarningFlagIfSupported('-Wno-unused-command-line-argument')
self.cxx.addWarningFlagIfSupported('-Wno-attributes')
self.cxx.addWarningFlagIfSupported('-Wno-pessimizing-move')
self.cxx.addWarningFlagIfSupported('-Wno-c++11-extensions')
self.cxx.addWarningFlagIfSupported('-Wno-user-defined-literals')
self.cxx.addWarningFlagIfSupported('-Wno-noexcept-type')
self.cxx.addWarningFlagIfSupported('-Wno-aligned-allocation-unavailable')
# These warnings should be enabled in order to support the MSVC
# team using the test suite; They enable the warnings below and
# expect the test suite to be clean.
self.cxx.addWarningFlagIfSupported('-Wsign-compare')
self.cxx.addWarningFlagIfSupported('-Wunused-variable')
self.cxx.addWarningFlagIfSupported('-Wunused-parameter')
self.cxx.addWarningFlagIfSupported('-Wunreachable-code')
std = self.get_lit_conf('std', None)
if std in ['c++98', 'c++03']:
if 'nvcc' not in self.config.available_features:
# The '#define static_assert' provided by libc++ in C++03 mode
# causes an unused local typedef whenever it is used.
self.cxx.addWarningFlagIfSupported('-Wno-unused-local-typedef')
def configure_sanitizer(self):
san = self.get_lit_conf('use_sanitizer', '').strip()
if san:
self.target_info.add_sanitizer_features(san, self.config.available_features)
# Search for llvm-symbolizer along the compiler path first
# and then along the PATH env variable.
symbolizer_search_paths = os.environ.get('PATH', '')
cxx_path = libcxx.util.which(self.cxx.path)
if cxx_path is not None:
symbolizer_search_paths = (
os.path.dirname(cxx_path) +
os.pathsep + symbolizer_search_paths)
llvm_symbolizer = libcxx.util.which('llvm-symbolizer',
symbolizer_search_paths)
def add_ubsan():
self.cxx.flags += ['-fsanitize=undefined',
'-fno-sanitize=float-divide-by-zero',
'-fno-sanitize-recover=all']
self.exec_env['UBSAN_OPTIONS'] = 'print_stacktrace=1'
self.config.available_features.add('ubsan')
# Setup the sanitizer compile flags
self.cxx.flags += ['-g', '-fno-omit-frame-pointer']
if san == 'Address' or san == 'Address;Undefined' or san == 'Undefined;Address':
self.cxx.flags += ['-fsanitize=address']
if llvm_symbolizer is not None:
self.exec_env['ASAN_SYMBOLIZER_PATH'] = llvm_symbolizer
# FIXME: Turn ODR violation back on after PR28391 is resolved
# https://bugs.llvm.org/show_bug.cgi?id=28391
self.exec_env['ASAN_OPTIONS'] = 'detect_odr_violation=0'
self.config.available_features.add('asan')
self.config.available_features.add('sanitizer-new-delete')
self.cxx.compile_flags += ['-O1']
if san == 'Address;Undefined' or san == 'Undefined;Address':
add_ubsan()
elif san == 'Memory' or san == 'MemoryWithOrigins':
self.cxx.flags += ['-fsanitize=memory']
if san == 'MemoryWithOrigins':
self.cxx.compile_flags += [
'-fsanitize-memory-track-origins']
if llvm_symbolizer is not None:
self.exec_env['MSAN_SYMBOLIZER_PATH'] = llvm_symbolizer
self.config.available_features.add('msan')
self.config.available_features.add('sanitizer-new-delete')
self.cxx.compile_flags += ['-O1']
elif san == 'Undefined':
add_ubsan()
self.cxx.compile_flags += ['-O2']
elif san == 'Thread':
self.cxx.flags += ['-fsanitize=thread']
self.config.available_features.add('tsan')
self.config.available_features.add('sanitizer-new-delete')
else:
self.lit_config.fatal('unsupported value for '
'use_sanitizer: {0}'.format(san))
san_lib = self.get_lit_conf('sanitizer_library')
if san_lib:
if self.cxx.type == 'nvcc':
self.cxx.link_flags += ['-Xcompiler',
'"-Wl,-rpath,' + os.path.dirname(san_lib) + '"']
else:
self.cxx.link_flags += ['-Wl,-rpath,' +
os.path.dirname(san_lib)]
def configure_coverage(self):
self.generate_coverage = self.get_lit_bool('generate_coverage', False)
if self.generate_coverage:
self.cxx.flags += ['-g', '--coverage']
self.cxx.compile_flags += ['-O0']
def configure_coroutines(self):
if self.cxx.hasCompileFlag('-fcoroutines-ts'):
macros = self._dump_macros_verbose(flags=['-fcoroutines-ts'])
if '__cpp_coroutines' not in macros:
self.lit_config.warning('-fcoroutines-ts is supported but '
'__cpp_coroutines is not defined')
# Consider coroutines supported only when the feature test macro
# reflects a recent value.
if intMacroValue(macros['__cpp_coroutines']) >= 201703:
self.config.available_features.add('fcoroutines-ts')
def configure_modules(self):
modules_flags = ['-fmodules']
if platform.system() != 'Darwin':
modules_flags += ['-Xclang', '-fmodules-local-submodule-visibility']
supports_modules = self.cxx.hasCompileFlag(modules_flags)
enable_modules = self.get_modules_enabled()
if enable_modules and not supports_modules:
self.lit_config.fatal(
'-fmodules is enabled but not supported by the compiler')
if not supports_modules:
return
self.config.available_features.add('modules-support')
module_cache = os.path.join(self.config.test_exec_root,
'modules.cache')
module_cache = os.path.realpath(module_cache)
if os.path.isdir(module_cache):
shutil.rmtree(module_cache)
os.makedirs(module_cache)
self.cxx.modules_flags += modules_flags + \
['-fmodules-cache-path=' + module_cache]
if enable_modules:
self.config.available_features.add('-fmodules')
self.cxx.useModules()
def configure_substitutions(self):
sub = self.config.substitutions
cxx_path = pipes.quote(self.cxx.path)
# Configure compiler substitutions
sub.append(('%cxx', cxx_path))
sub.append(('%libcxx_src_root', self.libcxx_src_root))
# Configure flags substitutions
flags_str = ' '.join([pipes.quote(f) for f in self.cxx.flags])
compile_flags_str = ' '.join([pipes.quote(f) for f in self.cxx.compile_flags])
link_flags_str = ' '.join([pipes.quote(f) for f in self.cxx.link_flags])
all_flags = '%s %s %s' % (flags_str, compile_flags_str, link_flags_str)
sub.append(('%flags', flags_str))
sub.append(('%compile_flags', compile_flags_str))
sub.append(('%link_flags', link_flags_str))
sub.append(('%all_flags', all_flags))
if self.cxx.isVerifySupported():
verify_str = ' ' + ' '.join(self.cxx.verify_flags) + ' '
sub.append(('%verify', verify_str))
# Add compile and link shortcuts
compile_str = (cxx_path + ' -o %t.o %s -c ' + flags_str
+ ' ' + compile_flags_str)
link_str = (cxx_path + ' -o %t.exe %t.o ' + flags_str + ' '
+ link_flags_str)
assert type(link_str) is str
build_str = cxx_path + ' -o %t.exe %s ' + all_flags
if self.cxx.use_modules:
sub.append(('%compile_module', compile_str))
sub.append(('%build_module', build_str))
elif self.cxx.modules_flags is not None:
modules_str = ' '.join(self.cxx.modules_flags) + ' '
sub.append(('%compile_module', compile_str + ' ' + modules_str))
sub.append(('%build_module', build_str + ' ' + modules_str))
sub.append(('%compile', compile_str))
sub.append(('%link', link_str))
sub.append(('%build', build_str))
# Configure exec prefix substitutions.
# Configure run env substitution.
sub.append(('%run', '%t.exe'))
# Configure not program substitutions
not_py = os.path.join(self.libcxx_src_root, 'utils', 'not.py')
not_str = '%s %s ' % (pipes.quote(sys.executable), pipes.quote(not_py))
sub.append(('not ', not_str))
if self.get_lit_conf('libcxx_gdb'):
sub.append(('%libcxx_gdb', self.get_lit_conf('libcxx_gdb')))
sub.append(['%syntaxonly', '-fsyntax-only' if self.cxx.type != 'nvhpc' else ''])
sub.append(['%noexceptions', '-fno-exceptions' if self.cxx.type != 'nvhpc' else ''])
def can_use_deployment(self):
# Check if the host is on an Apple platform using clang.
if not self.target_info.platform() == "darwin":
return False
if not self.target_info.is_host_macosx():
return False
if not self.cxx.type.endswith('clang'):
return False
return True
def configure_triple(self):
# Get or infer the target triple.
target_triple = self.get_lit_conf('target_triple')
self.use_target = self.get_lit_bool('use_target', False)
if self.use_target and target_triple:
self.lit_config.warning('use_target is true but no triple is specified')
# Use deployment if possible.
self.use_deployment = not self.use_target and self.can_use_deployment()
if self.use_deployment:
return
# Save the triple (and warn on Apple platforms).
self.config.target_triple = target_triple
if self.use_target and 'apple' in target_triple:
self.lit_config.warning('consider using arch and platform instead'
' of target_triple on Apple platforms')
# If no target triple was given, try to infer it from the compiler
# under test.
if not self.config.target_triple:
target_triple = (self.cxx if self.cxx.type != 'nvcc' else
self.host_cxx).getTriple()
# Drop sub-major version components from the triple, because the
# current XFAIL handling expects exact matches for feature checks.
# Example: x86_64-apple-darwin14.0.0 -> x86_64-apple-darwin14
# The 5th group handles triples greater than 3 parts
# (ex x86_64-pc-linux-gnu).
target_triple = re.sub(r'([^-]+)-([^-]+)-([^.]+)([^-]*)(.*)',
r'\1-\2-\3\5', target_triple)
# linux-gnu is needed in the triple to properly identify linuxes
# that use GLIBC. Handle redhat and opensuse triples as special
# cases and append the missing `-gnu` portion.
if (target_triple.endswith('redhat-linux') or
target_triple.endswith('suse-linux')):
target_triple += '-gnu'
self.config.target_triple = target_triple
self.lit_config.note(
"inferred target_triple as: %r" % self.config.target_triple)
def configure_deployment(self):
assert not self.use_deployment is None
assert not self.use_target is None
if not self.use_deployment:
# Warn about ignored parameters.
if self.get_lit_conf('arch'):
self.lit_config.warning('ignoring arch, using target_triple')
if self.get_lit_conf('platform'):
self.lit_config.warning('ignoring platform, using target_triple')
return
assert not self.use_target
assert self.target_info.is_host_macosx()
# Always specify deployment explicitly on Apple platforms, since
# otherwise a platform is picked up from the SDK. If the SDK version
# doesn't match the system version, tests that use the system library
# may fail spuriously.
arch = self.get_lit_conf('arch')
if not arch:
arch = (self.cxx if self.cxx.type != 'nvcc' else
self.host_cxx).getTriple().split('-', 1)[0]
self.lit_config.note("inferred arch as: %r" % arch)
inferred_platform, name, version = self.target_info.get_platform()
if inferred_platform:
self.lit_config.note("inferred platform as: %r" % (name + version))
self.config.deployment = (arch, name, version)
# Set the target triple for use by lit.
self.config.target_triple = arch + '-apple-' + name + version
self.lit_config.note(
"computed target_triple as: %r" % self.config.target_triple)
# If we're testing a system libc++ as opposed to the upstream LLVM one,
# take the version of the system libc++ into account to compute which
# features are enabled/disabled. Otherwise, disable availability markup,
# which is not relevant for non-shipped flavors of libc++.
if self.use_system_cxx_lib:
# Dylib support for shared_mutex was added in macosx10.12.
if name == 'macosx' and version in ('10.%s' % v for v in range(7, 12)):
self.config.available_features.add('dylib-has-no-shared_mutex')
self.lit_config.note("shared_mutex is not supported by the deployment target")
# Throwing bad_optional_access, bad_variant_access and bad_any_cast is
# supported starting in macosx10.14.
if name == 'macosx' and version in ('10.%s' % v for v in range(7, 14)):
self.config.available_features.add('dylib-has-no-bad_optional_access')
self.lit_config.note("throwing bad_optional_access is not supported by the deployment target")
self.config.available_features.add('dylib-has-no-bad_variant_access')
self.lit_config.note("throwing bad_variant_access is not supported by the deployment target")
self.config.available_features.add('dylib-has-no-bad_any_cast')
self.lit_config.note("throwing bad_any_cast is not supported by the deployment target")
# Filesystem is support on Apple platforms starting with macosx10.15.
if name == 'macosx' and version in ('10.%s' % v for v in range(7, 15)):
self.config.available_features.add('dylib-has-no-filesystem')
self.lit_config.note("the deployment target does not support <filesystem>")
else:
self.cxx.flags += ['-D_LIBCUDACXX_DISABLE_AVAILABILITY']
def configure_env(self):
self.target_info.configure_env(self.exec_env)
def add_path(self, dest_env, new_path):
if 'PATH' not in dest_env:
dest_env['PATH'] = new_path
else:
split_char = ';' if self.is_windows else ':'
dest_env['PATH'] = '%s%s%s' % (new_path, split_char,
dest_env['PATH'])
| libcudacxx-main | libcxx/utils/libcxx/test/config.py |
libcudacxx-main | libcxx/utils/libcxx/test/__init__.py |
|
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import copy
import errno
import os
import time
import random
import lit.Test # pylint: disable=import-error
import lit.TestRunner # pylint: disable=import-error
from lit.TestRunner import ParserKind, IntegratedTestKeywordParser \
# pylint: disable=import-error
from libcxx.test.executor import LocalExecutor as LocalExecutor
import libcxx.util
class LibcxxTestFormat(object):
"""
Custom test format handler for use with the test format use by libc++.
Tests fall into two categories:
FOO.pass.cpp - Executable test which should compile, run, and exit with
code 0.
FOO.fail.cpp - Negative test case which is expected to fail compilation.
FOO.sh.cpp - A test that uses LIT's ShTest format.
"""
def __init__(self, cxx, use_verify_for_fail, execute_external,
executor, exec_env):
self.cxx = copy.deepcopy(cxx)
self.use_verify_for_fail = use_verify_for_fail
self.execute_external = execute_external
self.executor = executor
self.exec_env = dict(exec_env)
@staticmethod
def _make_custom_parsers():
return [
IntegratedTestKeywordParser('FLAKY_TEST.', ParserKind.TAG,
initial_value=False),
IntegratedTestKeywordParser('MODULES_DEFINES:', ParserKind.LIST,
initial_value=[])
]
@staticmethod
def _get_parser(key, parsers):
for p in parsers:
if p.keyword == key:
return p
assert False and "parser not found"
# TODO: Move this into lit's FileBasedTest
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for filename in os.listdir(source_path):
# Ignore dot files and excluded tests.
if filename.startswith('.') or filename in localConfig.excludes:
continue
filepath = os.path.join(source_path, filename)
if not os.path.isdir(filepath):
if any([filename.endswith(ext)
for ext in localConfig.suffixes]):
yield lit.Test.Test(testSuite, path_in_suite + (filename,),
localConfig)
def execute(self, test, lit_config):
while True:
try:
return self._execute(test, lit_config)
except OSError as oe:
if oe.errno != errno.ETXTBSY:
raise
time.sleep(0.1)
def _execute(self, test, lit_config):
name = test.path_in_suite[-1]
name_root, name_ext = os.path.splitext(name)
is_libcxx_test = test.path_in_suite[0] == 'libcxx'
is_sh_test = name_root.endswith('.sh')
is_pass_test = name.endswith('.pass.cpp') or name.endswith('.pass.mm')
is_fail_test = name.endswith('.fail.cpp') or name.endswith('.fail.mm')
is_objcxx_test = name.endswith('.mm')
is_objcxx_arc_test = name.endswith('.arc.pass.mm') or \
name.endswith('.arc.fail.mm')
assert is_sh_test or name_ext == '.cpp' or name_ext == '.mm', \
'non-cpp file must be sh test'
if test.config.unsupported:
return (lit.Test.UNSUPPORTED,
"A lit.local.cfg marked this unsupported")
if is_objcxx_test and not \
'objective-c++' in test.config.available_features:
return (lit.Test.UNSUPPORTED, "Objective-C++ is not supported")
parsers = self._make_custom_parsers()
script = lit.TestRunner.parseIntegratedTestScript(
test, additional_parsers=parsers, require_script=is_sh_test)
# Check if a result for the test was returned. If so return that
# result.
if isinstance(script, lit.Test.Result):
return script
if lit_config.noExecute:
return lit.Test.Result(lit.Test.PASS)
# Check that we don't have run lines on tests that don't support them.
if not is_sh_test and len(script) != 0:
lit_config.fatal('Unsupported RUN line found in test %s' % name)
tmpDir, tmpBase = lit.TestRunner.getTempPaths(test)
substitutions = lit.TestRunner.getDefaultSubstitutions(test, tmpDir,
tmpBase)
script = lit.TestRunner.applySubstitutions(script, substitutions)
test_cxx = copy.deepcopy(self.cxx)
if is_fail_test:
test_cxx.useCCache(False)
test_cxx.useWarnings(False)
extra_modules_defines = self._get_parser('MODULES_DEFINES:',
parsers).getValue()
if '-fmodules' in test.config.available_features:
test_cxx.compile_flags += [('-D%s' % mdef.strip()) for
mdef in extra_modules_defines]
test_cxx.addWarningFlagIfSupported('-Wno-macro-redefined')
# FIXME: libc++ debug tests #define _LIBCUDACXX_ASSERT to override it
# If we see this we need to build the test against uniquely built
# modules.
if is_libcxx_test:
with open(test.getSourcePath(), 'rb') as f:
contents = f.read()
if b'#define _LIBCUDACXX_ASSERT' in contents:
test_cxx.useModules(False)
if is_objcxx_test:
test_cxx.source_lang = 'objective-c++'
if is_objcxx_arc_test:
test_cxx.compile_flags += ['-fobjc-arc']
else:
test_cxx.compile_flags += ['-fno-objc-arc']
test_cxx.link_flags += ['-framework', 'Foundation']
# Dispatch the test based on its suffix.
if is_sh_test:
if not isinstance(self.executor, LocalExecutor):
# We can't run ShTest tests with a executor yet.
# For now, bail on trying to run them
return lit.Test.UNSUPPORTED, 'ShTest format not yet supported'
test.config.environment = dict(self.exec_env)
return lit.TestRunner._runShTest(test, lit_config,
self.execute_external, script,
tmpBase)
elif is_fail_test:
return self._evaluate_fail_test(test, test_cxx, parsers)
elif is_pass_test:
return self._evaluate_pass_test(test, tmpBase, lit_config,
test_cxx, parsers)
else:
# No other test type is supported
assert False
def _clean(self, exec_path): # pylint: disable=no-self-use
libcxx.util.cleanFile(exec_path)
def _evaluate_pass_test(self, test, tmpBase, lit_config,
test_cxx, parsers):
execDir = os.path.dirname(test.getExecPath())
source_path = test.getSourcePath()
exec_path = tmpBase + '.exe'
object_path = tmpBase + '.o'
# Create the output directory if it does not already exist.
libcxx.util.mkdir_p(os.path.dirname(tmpBase))
try:
# Compile the test
cmd, out, err, rc = test_cxx.compileLinkTwoSteps(
source_path, out=exec_path, object_file=object_path,
cwd=execDir)
compile_cmd = cmd
if rc != 0:
report = libcxx.util.makeReport(cmd, out, err, rc)
report += "Compilation failed unexpectedly!"
return lit.Test.Result(lit.Test.FAIL, report)
# Run the test
local_cwd = os.path.dirname(source_path)
env = None
if self.exec_env:
env = self.exec_env
# TODO: Only list actually needed files in file_deps.
# Right now we just mark all of the .dat files in the same
# directory as dependencies, but it's likely less than that. We
# should add a `// FILE-DEP: foo.dat` to each test to track this.
data_files = [os.path.join(local_cwd, f)
for f in os.listdir(local_cwd) if f.endswith('.dat')]
is_flaky = self._get_parser('FLAKY_TEST.', parsers).getValue()
max_retry = 5 if is_flaky else 1
for retry_count in range(max_retry):
cmd, out, err, rc = self.executor.run(exec_path, [exec_path],
local_cwd, data_files,
env)
report = "Compiled With: '%s'\n" % ' '.join(compile_cmd)
report += libcxx.util.makeReport(cmd, out, err, rc)
if rc == 0:
res = lit.Test.PASS if retry_count == 0 else lit.Test.FLAKYPASS
return lit.Test.Result(res, report)
elif rc != 0 and retry_count + 1 == max_retry:
report += "Compiled test failed unexpectedly!"
return lit.Test.Result(lit.Test.FAIL, report)
assert False # Unreachable
finally:
# Note that cleanup of exec_file happens in `_clean()`. If you
# override this, cleanup is your reponsibility.
libcxx.util.cleanFile(object_path)
self._clean(exec_path)
def _evaluate_fail_test(self, test, test_cxx, parsers):
source_path = test.getSourcePath()
# FIXME: lift this detection into LLVM/LIT.
with open(source_path, 'rb') as f:
contents = f.read()
verify_tags = [b'expected-note', b'expected-remark',
b'expected-warning', b'expected-error',
b'expected-no-diagnostics']
use_verify = self.use_verify_for_fail and \
any([tag in contents for tag in verify_tags])
# FIXME(EricWF): GCC 5 does not evaluate static assertions that
# are dependant on a template parameter when '-fsyntax-only' is passed.
# This is fixed in GCC 6. However for now we only pass "-fsyntax-only"
# when using Clang.
if test_cxx.type != 'gcc' and test_cxx.type != 'nvcc':
test_cxx.flags += ['-fsyntax-only']
if use_verify:
test_cxx.useVerify()
test_cxx.useWarnings()
if '-Wuser-defined-warnings' in test_cxx.warning_flags:
test_cxx.warning_flags += ['-Wno-error=user-defined-warnings']
else:
# We still need to enable certain warnings on .fail.cpp test when
# -verify isn't enabled. Such as -Werror=unused-result. However,
# we don't want it enabled too liberally, which might incorrectly
# allow unrelated failure tests to 'pass'.
#
# Therefore, we check if the test was expected to fail because of
# nodiscard before enabling it
test_str_list = [b'ignoring return value', b'nodiscard',
b'NODISCARD']
if any(test_str in contents for test_str in test_str_list):
test_cxx.flags += ['-Werror=unused-result']
cmd, out, err, rc = test_cxx.compile(source_path, out=os.devnull)
check_rc = lambda rc: rc == 0 if use_verify else rc != 0
report = libcxx.util.makeReport(cmd, out, err, rc)
if check_rc(rc):
return lit.Test.Result(lit.Test.PASS, report)
else:
report += ('Expected compilation to fail!\n' if not use_verify else
'Expected compilation using verify to pass!\n')
return lit.Test.Result(lit.Test.FAIL, report)
| libcudacxx-main | libcxx/utils/libcxx/test/format.py |
from __future__ import absolute_import
import os
import subprocess
import sys
import lit.Test
import lit.TestRunner
import lit.util
from lit.formats.base import TestFormat
kIsWindows = sys.platform in ['win32', 'cygwin']
class GoogleBenchmark(TestFormat):
def __init__(self, test_sub_dirs, test_suffix, benchmark_args=[]):
self.benchmark_args = list(benchmark_args)
self.test_sub_dirs = os.path.normcase(str(test_sub_dirs)).split(';')
# On Windows, assume tests will also end in '.exe'.
exe_suffix = str(test_suffix)
if kIsWindows:
exe_suffix += '.exe'
# Also check for .py files for testing purposes.
self.test_suffixes = {exe_suffix, test_suffix + '.py'}
def getBenchmarkTests(self, path, litConfig, localConfig):
"""getBenchmarkTests(path) - [name]
Return the tests available in gtest executable.
Args:
path: String path to a gtest executable
litConfig: LitConfig instance
localConfig: TestingConfig instance"""
# TODO: allow splitting tests according to the "benchmark family" so
# the output for a single family of tests all belongs to the same test
# target.
list_test_cmd = [path, '--benchmark_list_tests']
try:
output = subprocess.check_output(list_test_cmd,
env=localConfig.environment)
except subprocess.CalledProcessError as exc:
litConfig.warning(
"unable to discover google-benchmarks in %r: %s. Process output: %s"
% (path, sys.exc_info()[1], exc.output))
raise StopIteration
nested_tests = []
for ln in output.splitlines(False): # Don't keep newlines.
ln = lit.util.to_string(ln)
if not ln.strip():
continue
index = 0
while ln[index*2:index*2+2] == ' ':
index += 1
while len(nested_tests) > index:
nested_tests.pop()
ln = ln[index*2:]
if ln.endswith('.'):
nested_tests.append(ln)
elif any([name.startswith('DISABLED_')
for name in nested_tests + [ln]]):
# Gtest will internally skip these tests. No need to launch a
# child process for it.
continue
else:
yield ''.join(nested_tests) + ln
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for subdir in self.test_sub_dirs:
dir_path = os.path.join(source_path, subdir)
if not os.path.isdir(dir_path):
continue
for fn in lit.util.listdir_files(dir_path,
suffixes=self.test_suffixes):
# Discover the tests in this executable.
execpath = os.path.join(source_path, subdir, fn)
testnames = self.getBenchmarkTests(execpath, litConfig, localConfig)
for testname in testnames:
testPath = path_in_suite + (subdir, fn, testname)
yield lit.Test.Test(testSuite, testPath, localConfig,
file_path=execpath)
def execute(self, test, litConfig):
testPath,testName = os.path.split(test.getSourcePath())
while not os.path.exists(testPath):
# Handle GTest parametrized and typed tests, whose name includes
# some '/'s.
testPath, namePrefix = os.path.split(testPath)
testName = namePrefix + '/' + testName
cmd = [testPath, '--benchmark_filter=%s$' % testName ] + self.benchmark_args
if litConfig.noExecute:
return lit.Test.PASS, ''
try:
out, err, exitCode = lit.util.executeCommand(
cmd, env=test.config.environment,
timeout=litConfig.maxIndividualTestTime)
except lit.util.ExecuteCommandTimeoutException:
return (lit.Test.TIMEOUT,
'Reached timeout of {} seconds'.format(
litConfig.maxIndividualTestTime)
)
if exitCode:
return lit.Test.FAIL, ('exit code: %d\n' % exitCode) + out + err
passing_test_line = testName
if passing_test_line not in out:
msg = ('Unable to find %r in google benchmark output:\n\n%s%s' %
(passing_test_line, out, err))
return lit.Test.UNRESOLVED, msg
return lit.Test.PASS, err + out
| libcudacxx-main | libcxx/utils/libcxx/test/googlebenchmark.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import platform
import os
from libcxx.test import tracing
from libcxx.util import executeCommand
class Executor(object):
def run(self, exe_path, cmd, local_cwd, file_deps=None, env=None):
"""Execute a command.
Be very careful not to change shared state in this function.
Executor objects are shared between python processes in `lit -jN`.
Args:
exe_path: str: Local path to the executable to be run
cmd: [str]: subprocess.call style command
local_cwd: str: Local path to the working directory
file_deps: [str]: Files required by the test
env: {str: str}: Environment variables to execute under
Returns:
cmd, out, err, exitCode
"""
raise NotImplementedError
class LocalExecutor(Executor):
def __init__(self):
super(LocalExecutor, self).__init__()
self.is_windows = platform.system() == 'Windows'
def run(self, exe_path, cmd=None, work_dir='.', file_deps=None, env=None):
cmd = cmd or [exe_path]
if work_dir == '.':
work_dir = os.getcwd()
out, err, rc = executeCommand(cmd, cwd=work_dir, env=env)
return (cmd, out, err, rc)
class NoopExecutor(Executor):
def __init__(self):
super(NoopExecutor, self).__init__()
def run(self, exe_path, cmd=None, work_dir='.', file_deps=None, env=None):
return (cmd, '', '', 0)
class PrefixExecutor(Executor):
"""Prefix an executor with some other command wrapper.
Most useful for setting ulimits on commands, or running an emulator like
qemu and valgrind.
"""
def __init__(self, commandPrefix, chain):
super(PrefixExecutor, self).__init__()
self.commandPrefix = commandPrefix
self.chain = chain
def run(self, exe_path, cmd=None, work_dir='.', file_deps=None, env=None):
cmd = cmd or [exe_path]
return self.chain.run(exe_path, self.commandPrefix + cmd, work_dir,
file_deps, env=env)
class PostfixExecutor(Executor):
"""Postfix an executor with some args."""
def __init__(self, commandPostfix, chain):
super(PostfixExecutor, self).__init__()
self.commandPostfix = commandPostfix
self.chain = chain
def run(self, exe_path, cmd=None, work_dir='.', file_deps=None, env=None):
cmd = cmd or [exe_path]
return self.chain.run(cmd + self.commandPostfix, work_dir, file_deps,
env=env)
class TimeoutExecutor(PrefixExecutor):
"""Execute another action under a timeout.
Deprecated. http://reviews.llvm.org/D6584 adds timeouts to LIT.
"""
def __init__(self, duration, chain):
super(TimeoutExecutor, self).__init__(
['timeout', duration], chain)
class RemoteExecutor(Executor):
def __init__(self):
self.local_run = executeCommand
def remote_temp_dir(self):
return self._remote_temp(True)
def remote_temp_file(self):
return self._remote_temp(False)
def _remote_temp(self, is_dir):
raise NotImplementedError()
def copy_in(self, local_srcs, remote_dsts):
# This could be wrapped up in a tar->scp->untar for performance
# if there are lots of files to be copied/moved
for src, dst in zip(local_srcs, remote_dsts):
self._copy_in_file(src, dst)
def _copy_in_file(self, src, dst):
raise NotImplementedError()
def delete_remote(self, remote):
try:
self._execute_command_remote(['rm', '-rf', remote])
except OSError:
# TODO: Log failure to delete?
pass
def run(self, exe_path, cmd=None, work_dir='.', file_deps=None, env=None):
target_exe_path = None
target_cwd = None
try:
target_cwd = self.remote_temp_dir()
target_exe_path = os.path.join(target_cwd, 'libcxx_test.exe')
if cmd:
# Replace exe_path with target_exe_path.
cmd = [c if c != exe_path else target_exe_path for c in cmd]
else:
cmd = [target_exe_path]
srcs = [exe_path]
dsts = [target_exe_path]
if file_deps is not None:
dev_paths = [os.path.join(target_cwd, os.path.basename(f))
for f in file_deps]
srcs.extend(file_deps)
dsts.extend(dev_paths)
self.copy_in(srcs, dsts)
# TODO(jroelofs): capture the copy_in and delete_remote commands,
# and conjugate them with '&&'s around the first tuple element
# returned here:
return self._execute_command_remote(cmd, target_cwd, env)
finally:
if target_cwd:
self.delete_remote(target_cwd)
def _execute_command_remote(self, cmd, remote_work_dir='.', env=None):
raise NotImplementedError()
class SSHExecutor(RemoteExecutor):
def __init__(self, host, username=None):
super(SSHExecutor, self).__init__()
self.user_prefix = username + '@' if username else ''
self.host = host
self.scp_command = 'scp'
self.ssh_command = 'ssh'
# TODO(jroelofs): switch this on some -super-verbose-debug config flag
if False:
self.local_run = tracing.trace_function(
self.local_run, log_calls=True, log_results=True,
label='ssh_local')
def _remote_temp(self, is_dir):
# TODO: detect what the target system is, and use the correct
# mktemp command for it. (linux and darwin differ here, and I'm
# sure windows has another way to do it)
# Not sure how to do suffix on osx yet
dir_arg = '-d' if is_dir else ''
cmd = 'mktemp -q {} /tmp/libcxx.XXXXXXXXXX'.format(dir_arg)
_, temp_path, err, exitCode = self._execute_command_remote([cmd])
temp_path = temp_path.strip()
if exitCode != 0:
raise RuntimeError(err)
return temp_path
def _copy_in_file(self, src, dst):
scp = self.scp_command
remote = self.host
remote = self.user_prefix + remote
cmd = [scp, '-p', src, remote + ':' + dst]
self.local_run(cmd)
def _execute_command_remote(self, cmd, remote_work_dir='.', env=None):
remote = self.user_prefix + self.host
ssh_cmd = [self.ssh_command, '-oBatchMode=yes', remote]
if env:
env_cmd = ['env'] + ['%s="%s"' % (k, v) for k, v in env.items()]
else:
env_cmd = []
remote_cmd = ' '.join(env_cmd + cmd)
if remote_work_dir != '.':
remote_cmd = 'cd ' + remote_work_dir + ' && ' + remote_cmd
out, err, rc = self.local_run(ssh_cmd + [remote_cmd])
return (remote_cmd, out, err, rc)
| libcudacxx-main | libcxx/utils/libcxx/test/executor.py |
#===----------------------------------------------------------------------===//
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===//
import importlib
import locale
import os
import platform
import re
import subprocess
import sys
from libcxx.util import executeCommand
class DefaultTargetInfo(object):
def __init__(self, full_config):
self.full_config = full_config
def platform(self):
return sys.platform.lower().strip()
def add_locale_features(self, features):
self.full_config.lit_config.warning(
"No locales entry for target_system: %s" % self.platform())
def add_cxx_compile_flags(self, flags): pass
def add_cxx_link_flags(self, flags): pass
def configure_env(self, env): pass
def allow_cxxabi_link(self): return True
def add_sanitizer_features(self, sanitizer_type, features): pass
def use_lit_shell_default(self): return False
def test_locale(loc):
assert loc is not None
default_locale = locale.setlocale(locale.LC_ALL)
try:
locale.setlocale(locale.LC_ALL, loc)
return True
except locale.Error:
return False
finally:
locale.setlocale(locale.LC_ALL, default_locale)
def add_common_locales(features, lit_config, is_windows=False):
# A list of locales needed by the test-suite.
# The list uses the canonical name for the locale used in the test-suite
# TODO: On Linux ISO8859 *may* needs to hyphenated.
locales = [
('en_US.UTF-8', 'English_United States.1252'),
('fr_FR.UTF-8', 'French_France.1252'),
('ru_RU.UTF-8', 'Russian_Russia.1251'),
('zh_CN.UTF-8', 'Chinese_China.936'),
('fr_CA.ISO8859-1', 'French_Canada.1252'),
('cs_CZ.ISO8859-2', 'Czech_Czech Republic.1250')
]
for loc_id, windows_loc_name in locales:
loc_name = windows_loc_name if is_windows else loc_id
if test_locale(loc_name):
features.add('locale.{0}'.format(loc_id))
else:
lit_config.warning('The locale {0} is not supported by '
'your platform. Some tests will be '
'unsupported.'.format(loc_name))
class DarwinLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(DarwinLocalTI, self).__init__(full_config)
def is_host_macosx(self):
name = subprocess.check_output(['sw_vers', '-productName']).strip()
return name == "Mac OS X"
def get_macosx_version(self):
assert self.is_host_macosx()
version = subprocess.check_output(
['sw_vers', '-productVersion']).strip()
version = re.sub(r'([0-9]+\.[0-9]+)(\..*)?', r'\1', version)
return version
def get_sdk_version(self, name):
assert self.is_host_macosx()
cmd = ['xcrun', '--sdk', name, '--show-sdk-path']
try:
out = subprocess.check_output(cmd).strip()
except OSError:
pass
if not out:
self.full_config.lit_config.fatal(
"cannot infer sdk version with: %r" % cmd)
return re.sub(r'.*/[^0-9]+([0-9.]+)\.sdk', r'\1', out)
def get_platform(self):
platform = self.full_config.get_lit_conf('platform')
if platform:
platform = re.sub(r'([^0-9]+)([0-9\.]*)', r'\1-\2', platform)
name, version = tuple(platform.split('-', 1))
else:
name = 'macosx'
version = None
if version:
return (False, name, version)
# Infer the version, either from the SDK or the system itself. For
# macosx, ignore the SDK version; what matters is what's at
# /usr/lib/libc++.dylib.
if name == 'macosx':
version = self.get_macosx_version()
else:
version = self.get_sdk_version(name)
return (True, name, version)
def add_locale_features(self, features):
add_common_locales(features, self.full_config.lit_config)
def add_cxx_compile_flags(self, flags):
if self.full_config.use_deployment:
_, name, _ = self.full_config.config.deployment
cmd = ['xcrun', '--sdk', name, '--show-sdk-path']
else:
cmd = ['xcrun', '--show-sdk-path']
out, err, exit_code = executeCommand(cmd)
if exit_code != 0:
self.full_config.lit_config.warning("Could not determine macOS SDK path! stderr was " + err)
if exit_code == 0 and out:
sdk_path = out.strip()
self.full_config.lit_config.note('using SDKROOT: %r' % sdk_path)
assert isinstance(sdk_path, str)
flags += ["-isysroot", sdk_path]
def add_cxx_link_flags(self, flags):
flags += ['-lSystem']
def configure_env(self, env):
library_paths = []
# Configure the library path for libc++
if self.full_config.cxx_runtime_root:
library_paths += [self.full_config.cxx_runtime_root]
elif self.full_config.use_system_cxx_lib:
if (os.path.isdir(str(self.full_config.use_system_cxx_lib))):
library_paths += [self.full_config.use_system_cxx_lib]
# Configure the abi library path
if self.full_config.abi_library_root:
library_paths += [self.full_config.abi_library_root]
if library_paths:
env['DYLD_LIBRARY_PATH'] = ':'.join(library_paths)
def allow_cxxabi_link(self):
# Don't link libc++abi explicitly on OS X because the symbols
# should be available in libc++ directly.
return False
class FreeBSDLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(FreeBSDLocalTI, self).__init__(full_config)
def add_locale_features(self, features):
add_common_locales(features, self.full_config.lit_config)
def add_cxx_link_flags(self, flags):
flags += ['-lc', '-lm', '-lpthread', '-lgcc_s', '-lcxxrt']
class NetBSDLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(NetBSDLocalTI, self).__init__(full_config)
def add_locale_features(self, features):
add_common_locales(features, self.full_config.lit_config)
def add_cxx_link_flags(self, flags):
flags += ['-lc', '-lm', '-lpthread', '-lgcc_s', '-lc++abi',
'-lunwind']
class LinuxLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(LinuxLocalTI, self).__init__(full_config)
def platform(self):
return 'linux'
def add_locale_features(self, features):
add_common_locales(features, self.full_config.lit_config)
def add_cxx_compile_flags(self, flags):
flags += ['-D__STDC_FORMAT_MACROS',
'-D__STDC_LIMIT_MACROS',
'-D__STDC_CONSTANT_MACROS']
def add_cxx_link_flags(self, flags):
enable_threads = ('libcpp-has-no-threads' not in
self.full_config.config.available_features)
llvm_unwinder = self.full_config.get_lit_bool('llvm_unwinder', False)
shared_libcxx = self.full_config.get_lit_bool('enable_shared', True)
flags += ['-lm']
if not llvm_unwinder:
flags += ['-lgcc_s', '-lgcc']
if enable_threads:
flags += ['-lpthread']
if not shared_libcxx:
flags += ['-lrt']
flags += ['-lc']
if llvm_unwinder:
flags += ['-lunwind', '-ldl']
else:
flags += ['-lgcc_s']
builtins_lib = self.full_config.get_lit_conf('builtins_library')
if builtins_lib:
flags += [builtins_lib]
else:
flags += ['-lgcc']
use_libatomic = self.full_config.get_lit_bool('use_libatomic', False)
if use_libatomic:
flags += ['-latomic']
san = self.full_config.get_lit_conf('use_sanitizer', '').strip()
if san:
# The libraries and their order are taken from the
# linkSanitizerRuntimeDeps function in
# clang/lib/Driver/Tools.cpp
flags += ['-lpthread', '-lrt', '-lm', '-ldl']
class WindowsLocalTI(DefaultTargetInfo):
def __init__(self, full_config):
super(WindowsLocalTI, self).__init__(full_config)
def add_locale_features(self, features):
add_common_locales(features, self.full_config.lit_config,
is_windows=True)
def use_lit_shell_default(self):
# Default to the internal shell on Windows, as bash on Windows is
# usually very slow.
return True
def make_target_info(full_config):
default = "libcxx.test.target_info.LocalTI"
info_str = full_config.get_lit_conf('target_info', default)
if info_str != default:
mod_path, _, info = info_str.rpartition('.')
mod = importlib.import_module(mod_path)
target_info = getattr(mod, info)(full_config)
full_config.lit_config.note("inferred target_info as: %r" % info_str)
return target_info
target_system = platform.system()
if target_system == 'Darwin': return DarwinLocalTI(full_config)
if target_system == 'FreeBSD': return FreeBSDLocalTI(full_config)
if target_system == 'NetBSD': return NetBSDLocalTI(full_config)
if target_system == 'Linux': return LinuxLocalTI(full_config)
if target_system == 'Windows': return WindowsLocalTI(full_config)
return DefaultTargetInfo(full_config)
| libcudacxx-main | libcxx/utils/libcxx/test/target_info.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import ast
import distutils.spawn
import sys
import re
import libcxx.util
from pprint import pformat
def read_syms_from_list(slist):
"""
Read a list of symbols from a list of strings.
Each string is one symbol.
"""
return [ast.literal_eval(l) for l in slist]
def read_syms_from_file(filename):
"""
Read a list of symbols in from a file.
"""
with open(filename, 'r') as f:
data = f.read()
return read_syms_from_list(data.splitlines())
def read_blacklist(filename):
with open(filename, 'r') as f:
data = f.read()
lines = [l.strip() for l in data.splitlines() if l.strip()]
lines = [l for l in lines if not l.startswith('#')]
return lines
def write_syms(sym_list, out=None, names_only=False, filter=None):
"""
Write a list of symbols to the file named by out.
"""
out_str = ''
out_list = sym_list
out_list.sort(key=lambda x: x['name'])
if filter is not None:
out_list = filter(out_list)
if names_only:
out_list = [sym['name'] for sym in out_list]
for sym in out_list:
# Use pformat for consistent ordering of keys.
out_str += pformat(sym, width=100000) + '\n'
if out is None:
sys.stdout.write(out_str)
else:
with open(out, 'w') as f:
f.write(out_str)
_cppfilt_exe = distutils.spawn.find_executable('c++filt')
def demangle_symbol(symbol):
if _cppfilt_exe is None:
return symbol
out, _, exit_code = libcxx.util.executeCommandVerbose(
[_cppfilt_exe], input=symbol)
if exit_code != 0:
return symbol
return out
def is_elf(filename):
with open(filename, 'rb') as f:
magic_bytes = f.read(4)
return magic_bytes == b'\x7fELF'
def is_mach_o(filename):
with open(filename, 'rb') as f:
magic_bytes = f.read(4)
return magic_bytes in [
'\xfe\xed\xfa\xce', # MH_MAGIC
'\xce\xfa\xed\xfe', # MH_CIGAM
'\xfe\xed\xfa\xcf', # MH_MAGIC_64
'\xcf\xfa\xed\xfe', # MH_CIGAM_64
'\xca\xfe\xba\xbe', # FAT_MAGIC
'\xbe\xba\xfe\xca' # FAT_CIGAM
]
def is_library_file(filename):
if sys.platform == 'darwin':
return is_mach_o(filename)
else:
return is_elf(filename)
def extract_or_load(filename):
import libcxx.sym_check.extract
if is_library_file(filename):
return libcxx.sym_check.extract.extract_symbols(filename)
return read_syms_from_file(filename)
def adjust_mangled_name(name):
if not name.startswith('__Z'):
return name
return name[1:]
new_delete_std_symbols = [
'_Znam',
'_Znwm',
'_ZdaPv',
'_ZdaPvm',
'_ZdlPv',
'_ZdlPvm'
]
cxxabi_symbols = [
'___dynamic_cast',
'___gxx_personality_v0',
'_ZTIDi',
'_ZTIDn',
'_ZTIDs',
'_ZTIPDi',
'_ZTIPDn',
'_ZTIPDs',
'_ZTIPKDi',
'_ZTIPKDn',
'_ZTIPKDs',
'_ZTIPKa',
'_ZTIPKb',
'_ZTIPKc',
'_ZTIPKd',
'_ZTIPKe',
'_ZTIPKf',
'_ZTIPKh',
'_ZTIPKi',
'_ZTIPKj',
'_ZTIPKl',
'_ZTIPKm',
'_ZTIPKs',
'_ZTIPKt',
'_ZTIPKv',
'_ZTIPKw',
'_ZTIPKx',
'_ZTIPKy',
'_ZTIPa',
'_ZTIPb',
'_ZTIPc',
'_ZTIPd',
'_ZTIPe',
'_ZTIPf',
'_ZTIPh',
'_ZTIPi',
'_ZTIPj',
'_ZTIPl',
'_ZTIPm',
'_ZTIPs',
'_ZTIPt',
'_ZTIPv',
'_ZTIPw',
'_ZTIPx',
'_ZTIPy',
'_ZTIa',
'_ZTIb',
'_ZTIc',
'_ZTId',
'_ZTIe',
'_ZTIf',
'_ZTIh',
'_ZTIi',
'_ZTIj',
'_ZTIl',
'_ZTIm',
'_ZTIs',
'_ZTIt',
'_ZTIv',
'_ZTIw',
'_ZTIx',
'_ZTIy',
'_ZTSDi',
'_ZTSDn',
'_ZTSDs',
'_ZTSPDi',
'_ZTSPDn',
'_ZTSPDs',
'_ZTSPKDi',
'_ZTSPKDn',
'_ZTSPKDs',
'_ZTSPKa',
'_ZTSPKb',
'_ZTSPKc',
'_ZTSPKd',
'_ZTSPKe',
'_ZTSPKf',
'_ZTSPKh',
'_ZTSPKi',
'_ZTSPKj',
'_ZTSPKl',
'_ZTSPKm',
'_ZTSPKs',
'_ZTSPKt',
'_ZTSPKv',
'_ZTSPKw',
'_ZTSPKx',
'_ZTSPKy',
'_ZTSPa',
'_ZTSPb',
'_ZTSPc',
'_ZTSPd',
'_ZTSPe',
'_ZTSPf',
'_ZTSPh',
'_ZTSPi',
'_ZTSPj',
'_ZTSPl',
'_ZTSPm',
'_ZTSPs',
'_ZTSPt',
'_ZTSPv',
'_ZTSPw',
'_ZTSPx',
'_ZTSPy',
'_ZTSa',
'_ZTSb',
'_ZTSc',
'_ZTSd',
'_ZTSe',
'_ZTSf',
'_ZTSh',
'_ZTSi',
'_ZTSj',
'_ZTSl',
'_ZTSm',
'_ZTSs',
'_ZTSt',
'_ZTSv',
'_ZTSw',
'_ZTSx',
'_ZTSy'
]
def is_stdlib_symbol_name(name, sym):
name = adjust_mangled_name(name)
if re.search("@GLIBC|@GCC", name):
# Only when symbol is defined do we consider it ours
return sym['is_defined']
if re.search('(St[0-9])|(__cxa)|(__cxxabi)', name):
return True
if name in new_delete_std_symbols:
return True
if name in cxxabi_symbols:
return True
if name.startswith('_Z'):
return True
return False
def filter_stdlib_symbols(syms):
stdlib_symbols = []
other_symbols = []
for s in syms:
canon_name = adjust_mangled_name(s['name'])
if not is_stdlib_symbol_name(canon_name, s):
other_symbols += [s]
else:
stdlib_symbols += [s]
return stdlib_symbols, other_symbols
| libcudacxx-main | libcxx/utils/libcxx/sym_check/util.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""libcxx abi symbol checker"""
__author__ = 'Eric Fiselier'
__email__ = '[email protected]'
__versioninfo__ = (0, 1, 0)
__version__ = ' '.join(str(v) for v in __versioninfo__) + 'dev'
__all__ = ['diff', 'extract', 'util']
| libcudacxx-main | libcxx/utils/libcxx/sym_check/__init__.py |
# -*- Python -*- vim: set syntax=python tabstop=4 expandtab cc=80:
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""
diff - A set of functions for diff-ing two symbol lists.
"""
from libcxx.sym_check import util
def _symbol_difference(lhs, rhs):
lhs_names = set(((n['name'], n['type']) for n in lhs))
rhs_names = set(((n['name'], n['type']) for n in rhs))
diff_names = lhs_names - rhs_names
return [n for n in lhs if (n['name'], n['type']) in diff_names]
def _find_by_key(sym_list, k):
for sym in sym_list:
if sym['name'] == k:
return sym
return None
def added_symbols(old, new):
return _symbol_difference(new, old)
def removed_symbols(old, new):
return _symbol_difference(old, new)
def changed_symbols(old, new):
changed = []
for old_sym in old:
if old_sym in new:
continue
new_sym = _find_by_key(new, old_sym['name'])
if (new_sym is not None and not new_sym in old
and old_sym != new_sym):
changed += [(old_sym, new_sym)]
return changed
def diff(old, new):
added = added_symbols(old, new)
removed = removed_symbols(old, new)
changed = changed_symbols(old, new)
return added, removed, changed
def report_diff(added_syms, removed_syms, changed_syms, names_only=False,
demangle=True):
def maybe_demangle(name):
return util.demangle_symbol(name) if demangle else name
report = ''
for sym in added_syms:
report += 'Symbol added: %s\n' % maybe_demangle(sym['name'])
if not names_only:
report += ' %s\n\n' % sym
if added_syms and names_only:
report += '\n'
for sym in removed_syms:
report += 'SYMBOL REMOVED: %s\n' % maybe_demangle(sym['name'])
if not names_only:
report += ' %s\n\n' % sym
if removed_syms and names_only:
report += '\n'
if not names_only:
for sym_pair in changed_syms:
old_sym, new_sym = sym_pair
old_str = '\n OLD SYMBOL: %s' % old_sym
new_str = '\n NEW SYMBOL: %s' % new_sym
report += ('SYMBOL CHANGED: %s%s%s\n\n' %
(maybe_demangle(old_sym['name']),
old_str, new_str))
added = bool(len(added_syms) != 0)
abi_break = bool(len(removed_syms))
if not names_only:
abi_break = abi_break or len(changed_syms)
if added or abi_break:
report += 'Summary\n'
report += ' Added: %d\n' % len(added_syms)
report += ' Removed: %d\n' % len(removed_syms)
if not names_only:
report += ' Changed: %d\n' % len(changed_syms)
if not abi_break:
report += 'Symbols added.'
else:
report += 'ABI BREAKAGE: SYMBOLS ADDED OR REMOVED!'
else:
report += 'Symbols match.'
is_different = abi_break or bool(len(added_syms)) \
or bool(len(changed_syms))
return report, abi_break, is_different
| libcudacxx-main | libcxx/utils/libcxx/sym_check/diff.py |
# -*- Python -*- vim: set syntax=python tabstop=4 expandtab cc=80:
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""
extract - A set of function that extract symbol lists from shared libraries.
"""
import distutils.spawn
import os.path
import sys
import re
import libcxx.util
from libcxx.sym_check import util
extract_ignore_names = ['_init', '_fini']
class NMExtractor(object):
"""
NMExtractor - Extract symbol lists from libraries using nm.
"""
@staticmethod
def find_tool():
"""
Search for the nm executable and return the path.
"""
return distutils.spawn.find_executable('nm')
def __init__(self, static_lib):
"""
Initialize the nm executable and flags that will be used to extract
symbols from shared libraries.
"""
self.nm_exe = self.find_tool()
if self.nm_exe is None:
# ERROR no NM found
print("ERROR: Could not find nm")
sys.exit(1)
self.static_lib = static_lib
self.flags = ['-P', '-g']
def extract(self, lib):
"""
Extract symbols from a library and return the results as a dict of
parsed symbols.
"""
cmd = [self.nm_exe] + self.flags + [lib]
out, _, exit_code = libcxx.util.executeCommandVerbose(cmd)
if exit_code != 0:
raise RuntimeError('Failed to run %s on %s' % (self.nm_exe, lib))
fmt_syms = (self._extract_sym(l)
for l in out.splitlines() if l.strip())
# Cast symbol to string.
final_syms = (repr(s) for s in fmt_syms if self._want_sym(s))
# Make unique and sort strings.
tmp_list = list(sorted(set(final_syms)))
# Cast string back to symbol.
return util.read_syms_from_list(tmp_list)
def _extract_sym(self, sym_str):
bits = sym_str.split()
# Everything we want has at least two columns.
if len(bits) < 2:
return None
new_sym = {
'name': bits[0],
'type': bits[1],
'is_defined': (bits[1].lower() != 'u')
}
new_sym['name'] = new_sym['name'].replace('@@', '@')
new_sym = self._transform_sym_type(new_sym)
# NM types which we want to save the size for.
if new_sym['type'] == 'OBJECT' and len(bits) > 3:
new_sym['size'] = int(bits[3], 16)
return new_sym
@staticmethod
def _want_sym(sym):
"""
Check that s is a valid symbol that we want to keep.
"""
if sym is None or len(sym) < 2:
return False
if sym['name'] in extract_ignore_names:
return False
bad_types = ['t', 'b', 'r', 'd', 'w']
return (sym['type'] not in bad_types
and sym['name'] not in ['__bss_start', '_end', '_edata'])
@staticmethod
def _transform_sym_type(sym):
"""
Map the nm single letter output for type to either FUNC or OBJECT.
If the type is not recognized it is left unchanged.
"""
func_types = ['T', 'W']
obj_types = ['B', 'D', 'R', 'V', 'S']
if sym['type'] in func_types:
sym['type'] = 'FUNC'
elif sym['type'] in obj_types:
sym['type'] = 'OBJECT'
return sym
class ReadElfExtractor(object):
"""
ReadElfExtractor - Extract symbol lists from libraries using readelf.
"""
@staticmethod
def find_tool():
"""
Search for the readelf executable and return the path.
"""
return distutils.spawn.find_executable('readelf')
def __init__(self, static_lib):
"""
Initialize the readelf executable and flags that will be used to
extract symbols from shared libraries.
"""
self.tool = self.find_tool()
if self.tool is None:
# ERROR no NM found
print("ERROR: Could not find readelf")
sys.exit(1)
# TODO: Support readelf for reading symbols from archives
assert not static_lib and "RealElf does not yet support static libs"
self.flags = ['--wide', '--symbols']
def extract(self, lib):
"""
Extract symbols from a library and return the results as a dict of
parsed symbols.
"""
cmd = [self.tool] + self.flags + [lib]
out, _, exit_code = libcxx.util.executeCommandVerbose(cmd)
if exit_code != 0:
raise RuntimeError('Failed to run %s on %s' % (self.nm_exe, lib))
dyn_syms = self.get_dynsym_table(out)
return self.process_syms(dyn_syms)
def process_syms(self, sym_list):
new_syms = []
for s in sym_list:
parts = s.split()
if not parts:
continue
assert len(parts) == 7 or len(parts) == 8 or len(parts) == 9
if len(parts) == 7:
continue
new_sym = {
'name': parts[7],
'size': int(parts[2]),
'type': parts[3],
'is_defined': (parts[6] != 'UND')
}
assert new_sym['type'] in ['OBJECT', 'FUNC', 'NOTYPE', 'TLS']
if new_sym['name'] in extract_ignore_names:
continue
if new_sym['type'] == 'NOTYPE':
continue
if new_sym['type'] == 'FUNC':
del new_sym['size']
new_syms += [new_sym]
return new_syms
def get_dynsym_table(self, out):
lines = out.splitlines()
start = -1
end = -1
for i in range(len(lines)):
if lines[i].startswith("Symbol table '.dynsym'"):
start = i + 2
if start != -1 and end == -1 and not lines[i].strip():
end = i + 1
assert start != -1
if end == -1:
end = len(lines)
return lines[start:end]
def extract_symbols(lib_file, static_lib=None):
"""
Extract and return a list of symbols extracted from a static or dynamic
library. The symbols are extracted using NM or readelf. They are then
filtered and formated. Finally they symbols are made unique.
"""
if static_lib is None:
_, ext = os.path.splitext(lib_file)
static_lib = True if ext in ['.a'] else False
if ReadElfExtractor.find_tool() and not static_lib:
extractor = ReadElfExtractor(static_lib=static_lib)
else:
extractor = NMExtractor(static_lib=static_lib)
return extractor.extract(lib_file)
| libcudacxx-main | libcxx/utils/libcxx/sym_check/extract.py |
# -*- Python -*- vim: set syntax=python tabstop=4 expandtab cc=80:
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""
match - A set of functions for matching symbols in a list to a list of regexs
"""
import re
def find_and_report_matching(symbol_list, regex_list):
report = ''
found_count = 0
for regex_str in regex_list:
report += 'Matching regex "%s":\n' % regex_str
matching_list = find_matching_symbols(symbol_list, regex_str)
if not matching_list:
report += ' No matches found\n\n'
continue
# else
found_count += len(matching_list)
for m in matching_list:
report += ' MATCHES: %s\n' % m['name']
report += '\n'
return found_count, report
def find_matching_symbols(symbol_list, regex_str):
regex = re.compile(regex_str)
matching_list = []
for s in symbol_list:
if regex.match(s['name']):
matching_list += [s]
return matching_list
| libcudacxx-main | libcxx/utils/libcxx/sym_check/match.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""GDB pretty-printers for libc++.
These should work for objects compiled when _LIBCUDACXX_ABI_UNSTABLE is defined
and when it is undefined.
"""
from __future__ import print_function
import re
import gdb
# One under-documented feature of the gdb pretty-printer API
# is that clients can call any other member of the API
# before they call to_string.
# Therefore all self.FIELDs must be set in the pretty-printer's
# __init__ function.
_void_pointer_type = gdb.lookup_type("void").pointer()
_long_int_type = gdb.lookup_type("unsigned long long")
_libcpp_big_endian = False
def addr_as_long(addr):
return int(addr.cast(_long_int_type))
# The size of a pointer in bytes.
_pointer_size = _void_pointer_type.sizeof
def _remove_cxx_namespace(typename):
"""Removed libc++ specific namespace from the type.
Arguments:
typename(string): A type, such as std::__u::something.
Returns:
A string without the libc++ specific part, such as std::something.
"""
return re.sub("std::__.*?::", "std::", typename)
def _remove_generics(typename):
"""Remove generics part of the type. Assumes typename is not empty.
Arguments:
typename(string): A type such as std::my_collection<element>.
Returns:
The prefix up to the generic part, such as std::my_collection.
"""
match = re.match("^([^<]+)", typename)
return match.group(1)
# Some common substitutions on the types to reduce visual clutter (A user who
# wants to see the actual details can always use print/r).
_common_substitutions = [
("std::basic_string<char, std::char_traits<char>, std::allocator<char> >",
"std::string"),
]
def _prettify_typename(gdb_type):
"""Returns a pretty name for the type, or None if no name can be found.
Arguments:
gdb_type(gdb.Type): A type object.
Returns:
A string, without type_defs, libc++ namespaces, and common substitutions
applied.
"""
type_without_typedefs = gdb_type.strip_typedefs()
typename = type_without_typedefs.name or type_without_typedefs.tag or \
str(type_without_typedefs)
result = _remove_cxx_namespace(typename)
for find_str, subst_str in _common_substitutions:
result = re.sub(find_str, subst_str, result)
return result
def _typename_for_nth_generic_argument(gdb_type, n):
"""Returns a pretty string for the nth argument of the given type.
Arguments:
gdb_type(gdb.Type): A type object, such as the one for std::map<int, int>
n: The (zero indexed) index of the argument to return.
Returns:
A string for the nth argument, such a "std::string"
"""
element_type = gdb_type.template_argument(n)
return _prettify_typename(element_type)
def _typename_with_n_generic_arguments(gdb_type, n):
"""Return a string for the type with the first n (1, ...) generic args."""
base_type = _remove_generics(_prettify_typename(gdb_type))
arg_list = [base_type]
template = "%s<"
for i in range(n):
arg_list.append(_typename_for_nth_generic_argument(gdb_type, i))
template += "%s, "
result = (template[:-2] + ">") % tuple(arg_list)
return result
def _typename_with_first_generic_argument(gdb_type):
return _typename_with_n_generic_arguments(gdb_type, 1)
class StdTuplePrinter(object):
"""Print a std::tuple."""
class _Children(object):
"""Class to iterate over the tuple's children."""
def __init__(self, val):
self.val = val
self.child_iter = iter(self.val["__base_"].type.fields())
self.count = 0
def __iter__(self):
return self
def next(self):
# child_iter raises StopIteration when appropriate.
field_name = self.child_iter.next()
child = self.val["__base_"][field_name]["__value_"]
self.count += 1
return ("[%d]" % self.count, child)
def __init__(self, val):
self.val = val
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if not self.val.type.fields():
return "empty %s" % typename
return "%s containing" % typename
def children(self):
if not self.val.type.fields():
return iter(())
return self._Children(self.val)
def _get_base_subobject(child_class_value, index=0):
"""Returns the object's value in the form of the parent class at index.
This function effectively casts the child_class_value to the base_class's
type, but the type-to-cast to is stored in the field at index, and once
we know the field, we can just return the data.
Args:
child_class_value: the value to cast
index: the parent class index
Raises:
Exception: field at index was not a base-class field.
"""
field = child_class_value.type.fields()[index]
if not field.is_base_class:
raise Exception("Not a base-class field.")
return child_class_value[field]
def _value_of_pair_first(value):
"""Convenience for _get_base_subobject, for the common case."""
return _get_base_subobject(value, 0)["__value_"]
class StdStringPrinter(object):
"""Print a std::string."""
def _get_short_size(self, short_field, short_size):
"""Short size depends on both endianness and a compile-time define."""
# If the padding field is present after all this indirection, then string
# was compiled with _LIBCUDACXX_ABI_ALTERNATE_STRING_LAYOUT defined.
field = short_field.type.fields()[1].type.fields()[0]
libcpp_abi_alternate_string_layout = field.name and "__padding" in field.name
# This logical structure closely follows the original code (which is clearer
# in C++). Keep them parallel to make them easier to compare.
if libcpp_abi_alternate_string_layout:
if _libcpp_big_endian:
return short_size >> 1
else:
return short_size
elif _libcpp_big_endian:
return short_size
else:
return short_size >> 1
def __init__(self, val):
self.val = val
def to_string(self):
"""Build a python string from the data whether stored inline or separately."""
value_field = _value_of_pair_first(self.val["__r_"])
short_field = value_field["__s"]
short_size = short_field["__size_"]
if short_size == 0:
return ""
short_mask = self.val["__short_mask"]
# Counter intuitive to compare the size and short_mask to see if the string
# is long, but that's the way the implementation does it. Note that
# __is_long() doesn't use get_short_size in C++.
is_long = short_size & short_mask
if is_long:
long_field = value_field["__l"]
data = long_field["__data_"]
size = long_field["__size_"]
else:
data = short_field["__data_"]
size = self._get_short_size(short_field, short_size)
if hasattr(data, "lazy_string"):
return data.lazy_string(length=size)
return data.string(length=size)
def display_hint(self):
return "string"
class StdUniquePtrPrinter(object):
"""Print a std::unique_ptr."""
def __init__(self, val):
self.val = val
self.addr = _value_of_pair_first(self.val["__ptr_"])
self.pointee_type = self.val.type.template_argument(0)
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if not self.addr:
return "%s is nullptr" % typename
return ("%s<%s> containing" %
(typename,
_remove_generics(_prettify_typename(self.pointee_type))))
def __iter__(self):
if self.addr:
yield "__ptr_", self.addr.cast(self.pointee_type.pointer())
def children(self):
return self
class StdSharedPointerPrinter(object):
"""Print a std::shared_ptr."""
def __init__(self, val):
self.val = val
self.addr = self.val["__ptr_"]
def to_string(self):
"""Returns self as a string."""
typename = _remove_generics(_prettify_typename(self.val.type))
pointee_type = _remove_generics(
_prettify_typename(self.val.type.template_argument(0)))
if not self.addr:
return "%s is nullptr" % typename
refcount = self.val["__cntrl_"]
if refcount != 0:
usecount = refcount["__shared_owners_"] + 1
weakcount = refcount["__shared_weak_owners_"]
if usecount == 0:
state = "expired, weak %d" % weakcount
else:
state = "count %d, weak %d" % (usecount, weakcount)
return "%s<%s> %s containing" % (typename, pointee_type, state)
def __iter__(self):
if self.addr:
yield "__ptr_", self.addr
def children(self):
return self
class StdVectorPrinter(object):
"""Print a std::vector."""
class _VectorBoolIterator(object):
"""Class to iterate over the bool vector's children."""
def __init__(self, begin, size, bits_per_word):
self.item = begin
self.size = size
self.bits_per_word = bits_per_word
self.count = 0
self.offset = 0
def __iter__(self):
return self
def next(self):
"""Retrieve the next element."""
self.count += 1
if self.count > self.size:
raise StopIteration
entry = self.item.dereference()
if entry & (1 << self.offset):
outbit = 1
else:
outbit = 0
self.offset += 1
if self.offset >= self.bits_per_word:
self.item += 1
self.offset = 0
return ("[%d]" % self.count, outbit)
class _VectorIterator(object):
"""Class to iterate over the non-bool vector's children."""
def __init__(self, begin, end):
self.item = begin
self.end = end
self.count = 0
def __iter__(self):
return self
def next(self):
self.count += 1
if self.item == self.end:
raise StopIteration
entry = self.item.dereference()
self.item += 1
return ("[%d]" % self.count, entry)
def __init__(self, val):
"""Set val, length, capacity, and iterator for bool and normal vectors."""
self.val = val
self.typename = _remove_generics(_prettify_typename(val.type))
begin = self.val["__begin_"]
if self.val.type.template_argument(0).code == gdb.TYPE_CODE_BOOL:
self.typename += "<bool>"
self.length = self.val["__size_"]
bits_per_word = self.val["__bits_per_word"]
self.capacity = _value_of_pair_first(
self.val["__cap_alloc_"]) * bits_per_word
self.iterator = self._VectorBoolIterator(
begin, self.length, bits_per_word)
else:
end = self.val["__end_"]
self.length = end - begin
self.capacity = _get_base_subobject(
self.val["__end_cap_"])["__value_"] - begin
self.iterator = self._VectorIterator(begin, end)
def to_string(self):
return ("%s of length %d, capacity %d" %
(self.typename, self.length, self.capacity))
def children(self):
return self.iterator
def display_hint(self):
return "array"
class StdBitsetPrinter(object):
"""Print a std::bitset."""
def __init__(self, val):
self.val = val
self.n_words = int(self.val["__n_words"])
self.bits_per_word = int(self.val["__bits_per_word"])
if self.n_words == 1:
self.values = [int(self.val["__first_"])]
else:
self.values = [int(self.val["__first_"][index])
for index in range(self.n_words)]
def to_string(self):
typename = _prettify_typename(self.val.type)
return "%s" % typename
def _byte_it(self, value):
index = -1
while value:
index += 1
will_yield = value % 2
value /= 2
if will_yield:
yield index
def _list_it(self):
for word_index in range(self.n_words):
current = self.values[word_index]
if current:
for n in self._byte_it(current):
yield ("[%d]" % (word_index * self.bits_per_word + n), 1)
def __iter__(self):
return self._list_it()
def children(self):
return self
class StdDequePrinter(object):
"""Print a std::deque."""
def __init__(self, val):
self.val = val
self.size = int(_value_of_pair_first(val["__size_"]))
self.start_ptr = self.val["__map_"]["__begin_"]
self.first_block_start_index = int(self.val["__start_"])
self.node_type = self.start_ptr.type
self.block_size = self._calculate_block_size(
val.type.template_argument(0))
def _calculate_block_size(self, element_type):
"""Calculates the number of elements in a full block."""
size = element_type.sizeof
# Copied from struct __deque_block_size implementation of libcxx.
return 4096 / size if size < 256 else 16
def _bucket_it(self, start_addr, start_index, end_index):
for i in range(start_index, end_index):
yield i, (start_addr.dereference() + i).dereference()
def _list_it(self):
"""Primary iteration worker."""
num_emitted = 0
current_addr = self.start_ptr
start_index = self.first_block_start_index
while num_emitted < self.size:
end_index = min(start_index + self.size -
num_emitted, self.block_size)
for _, elem in self._bucket_it(current_addr, start_index, end_index):
yield "", elem
num_emitted += end_index - start_index
current_addr = gdb.Value(addr_as_long(current_addr) + _pointer_size) \
.cast(self.node_type)
start_index = 0
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if self.size:
return "%s with %d elements" % (typename, self.size)
return "%s is empty" % typename
def __iter__(self):
return self._list_it()
def children(self):
return self
def display_hint(self):
return "array"
class StdListPrinter(object):
"""Print a std::list."""
def __init__(self, val):
self.val = val
size_alloc_field = self.val["__size_alloc_"]
self.size = int(_value_of_pair_first(size_alloc_field))
dummy_node = self.val["__end_"]
self.nodetype = gdb.lookup_type(
re.sub("__list_node_base", "__list_node",
str(dummy_node.type.strip_typedefs()))).pointer()
self.first_node = dummy_node["__next_"]
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if self.size:
return "%s with %d elements" % (typename, self.size)
return "%s is empty" % typename
def _list_iter(self):
current_node = self.first_node
for _ in range(self.size):
yield "", current_node.cast(self.nodetype).dereference()["__value_"]
current_node = current_node.dereference()["__next_"]
def __iter__(self):
return self._list_iter()
def children(self):
return self if self.nodetype else iter(())
def display_hint(self):
return "array"
class StdQueueOrStackPrinter(object):
"""Print a std::queue or std::stack."""
def __init__(self, val):
self.val = val
self.underlying = val["c"]
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
return "%s wrapping" % typename
def children(self):
return iter([("", self.underlying)])
def display_hint(self):
return "array"
class StdPriorityQueuePrinter(object):
"""Print a std::priority_queue."""
def __init__(self, val):
self.val = val
self.underlying = val["c"]
def to_string(self):
# TODO(tamur): It would be nice to print the top element. The technical
# difficulty is that, the implementation refers to the underlying
# container, which is a generic class. libstdcxx pretty printers do not
# print the top element.
typename = _remove_generics(_prettify_typename(self.val.type))
return "%s wrapping" % typename
def children(self):
return iter([("", self.underlying)])
def display_hint(self):
return "array"
class RBTreeUtils(object):
"""Utility class for std::(multi)map, and std::(multi)set and iterators."""
def __init__(self, cast_type, root):
self.cast_type = cast_type
self.root = root
def left_child(self, node):
result = node.cast(self.cast_type).dereference()["__left_"]
return result
def right_child(self, node):
result = node.cast(self.cast_type).dereference()["__right_"]
return result
def parent(self, node):
"""Return the parent of node, if it exists."""
# If this is the root, then from the algorithm's point of view, it has no
# parent.
if node == self.root:
return None
# We don't have enough information to tell if this is the end_node (which
# doesn't have a __parent_ field), or the root (which doesn't have a parent
# from the algorithm's point of view), so cast_type may not be correct for
# this particular node. Use heuristics.
# The end_node's left child is the root. Note that when printing interators
# in isolation, the root is unknown.
if self.left_child(node) == self.root:
return None
parent = node.cast(self.cast_type).dereference()["__parent_"]
# If the value at the offset of __parent_ doesn't look like a valid pointer,
# then assume that node is the end_node (and therefore has no parent).
# End_node type has a pointer embedded, so should have pointer alignment.
if addr_as_long(parent) % _void_pointer_type.alignof:
return None
# This is ugly, but the only other option is to dereference an invalid
# pointer. 0x8000 is fairly arbitrary, but has had good results in
# practice. If there was a way to tell if a pointer is invalid without
# actually dereferencing it and spewing error messages, that would be ideal.
if parent < 0x8000:
return None
return parent
def is_left_child(self, node):
parent = self.parent(node)
return parent is not None and self.left_child(parent) == node
def is_right_child(self, node):
parent = self.parent(node)
return parent is not None and self.right_child(parent) == node
class AbstractRBTreePrinter(object):
"""Abstract super class for std::(multi)map, and std::(multi)set."""
def __init__(self, val):
self.val = val
tree = self.val["__tree_"]
self.size = int(_value_of_pair_first(tree["__pair3_"]))
dummy_root = tree["__pair1_"]
root = _value_of_pair_first(dummy_root)["__left_"]
cast_type = self._init_cast_type(val.type)
self.util = RBTreeUtils(cast_type, root)
def _get_key_value(self, node):
"""Subclasses should override to return a list of values to yield."""
raise NotImplementedError
def _traverse(self):
"""Traverses the binary search tree in order."""
current = self.util.root
skip_left_child = False
while True:
if not skip_left_child and self.util.left_child(current):
current = self.util.left_child(current)
continue
skip_left_child = False
for key_value in self._get_key_value(current):
yield "", key_value
right_child = self.util.right_child(current)
if right_child:
current = right_child
continue
while self.util.is_right_child(current):
current = self.util.parent(current)
if self.util.is_left_child(current):
current = self.util.parent(current)
skip_left_child = True
continue
break
def __iter__(self):
return self._traverse()
def children(self):
return self if self.util.cast_type and self.size > 0 else iter(())
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if self.size:
return "%s with %d elements" % (typename, self.size)
return "%s is empty" % typename
class StdMapPrinter(AbstractRBTreePrinter):
"""Print a std::map or std::multimap."""
def _init_cast_type(self, val_type):
map_it_type = gdb.lookup_type(
str(val_type) + "::iterator").strip_typedefs()
tree_it_type = map_it_type.template_argument(0)
node_ptr_type = tree_it_type.template_argument(1)
return node_ptr_type
def display_hint(self):
return "map"
def _get_key_value(self, node):
key_value = node.cast(self.util.cast_type).dereference()[
"__value_"]["__cc"]
return [key_value["first"], key_value["second"]]
class StdSetPrinter(AbstractRBTreePrinter):
"""Print a std::set."""
def _init_cast_type(self, val_type):
set_it_type = gdb.lookup_type(
str(val_type) + "::iterator").strip_typedefs()
node_ptr_type = set_it_type.template_argument(1)
return node_ptr_type
def display_hint(self):
return "array"
def _get_key_value(self, node):
key_value = node.cast(self.util.cast_type).dereference()["__value_"]
return [key_value]
class AbstractRBTreeIteratorPrinter(object):
"""Abstract super class for std::(multi)map, and std::(multi)set iterator."""
def _initialize(self, val, typename):
self.typename = typename
self.val = val
self.addr = self.val["__ptr_"]
cast_type = self.val.type.template_argument(1)
self.util = RBTreeUtils(cast_type, None)
if self.addr:
self.node = self.addr.cast(cast_type).dereference()
def _is_valid_node(self):
if not self.util.parent(self.addr):
return False
return self.util.is_left_child(self.addr) or \
self.util.is_right_child(self.addr)
def to_string(self):
if not self.addr:
return "%s is nullptr" % self.typename
return "%s " % self.typename
def _get_node_value(self, node):
raise NotImplementedError
def __iter__(self):
addr_str = "[%s]" % str(self.addr)
if not self._is_valid_node():
yield addr_str, " end()"
else:
yield addr_str, self._get_node_value(self.node)
def children(self):
return self if self.addr else iter(())
class MapIteratorPrinter(AbstractRBTreeIteratorPrinter):
"""Print a std::(multi)map iterator."""
def __init__(self, val):
self._initialize(val["__i_"],
_remove_generics(_prettify_typename(val.type)))
def _get_node_value(self, node):
return node["__value_"]["__cc"]
class SetIteratorPrinter(AbstractRBTreeIteratorPrinter):
"""Print a std::(multi)set iterator."""
def __init__(self, val):
self._initialize(val, _remove_generics(_prettify_typename(val.type)))
def _get_node_value(self, node):
return node["__value_"]
class StdFposPrinter(object):
"""Print a std::fpos or std::streampos."""
def __init__(self, val):
self.val = val
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
offset = self.val["__off_"]
state = self.val["__st_"]
count = state["__count"]
value = state["__value"]["__wch"]
return "%s with stream offset:%s with state: {count:%s value:%s}" % (
typename, offset, count, value)
class AbstractUnorderedCollectionPrinter(object):
"""Abstract super class for std::unordered_(multi)[set|map]."""
def __init__(self, val):
self.val = val
self.table = val["__table_"]
self.sentinel = self.table["__p1_"]
self.size = int(_value_of_pair_first(self.table["__p2_"]))
node_base_type = self.sentinel.type.template_argument(0)
self.cast_type = node_base_type.template_argument(0)
def _list_it(self, sentinel_ptr):
next_ptr = _value_of_pair_first(sentinel_ptr)["__next_"]
while str(next_ptr.cast(_void_pointer_type)) != "0x0":
next_val = next_ptr.cast(self.cast_type).dereference()
for key_value in self._get_key_value(next_val):
yield "", key_value
next_ptr = next_val["__next_"]
def to_string(self):
typename = _remove_generics(_prettify_typename(self.val.type))
if self.size:
return "%s with %d elements" % (typename, self.size)
return "%s is empty" % typename
def _get_key_value(self, node):
"""Subclasses should override to return a list of values to yield."""
raise NotImplementedError
def children(self):
return self if self.cast_type and self.size > 0 else iter(())
def __iter__(self):
return self._list_it(self.sentinel)
class StdUnorderedSetPrinter(AbstractUnorderedCollectionPrinter):
"""Print a std::unordered_(multi)set."""
def _get_key_value(self, node):
return [node["__value_"]]
def display_hint(self):
return "array"
class StdUnorderedMapPrinter(AbstractUnorderedCollectionPrinter):
"""Print a std::unordered_(multi)map."""
def _get_key_value(self, node):
key_value = node["__value_"]["__cc"]
return [key_value["first"], key_value["second"]]
def display_hint(self):
return "map"
class AbstractHashMapIteratorPrinter(object):
"""Abstract class for unordered collection iterators."""
def _initialize(self, val, addr):
self.val = val
self.typename = _remove_generics(_prettify_typename(self.val.type))
self.addr = addr
if self.addr:
self.node = self.addr.cast(self.cast_type).dereference()
def _get_key_value(self):
"""Subclasses should override to return a list of values to yield."""
raise NotImplementedError
def to_string(self):
if not self.addr:
return "%s = end()" % self.typename
return "%s " % self.typename
def children(self):
return self if self.addr else iter(())
def __iter__(self):
for key_value in self._get_key_value():
yield "", key_value
class StdUnorderedSetIteratorPrinter(AbstractHashMapIteratorPrinter):
"""Print a std::(multi)set iterator."""
def __init__(self, val):
self.cast_type = val.type.template_argument(0)
self._initialize(val, val["__node_"])
def _get_key_value(self):
return [self.node["__value_"]]
def display_hint(self):
return "array"
class StdUnorderedMapIteratorPrinter(AbstractHashMapIteratorPrinter):
"""Print a std::(multi)map iterator."""
def __init__(self, val):
self.cast_type = val.type.template_argument(0).template_argument(0)
self._initialize(val, val["__i_"]["__node_"])
def _get_key_value(self):
key_value = self.node["__value_"]["__cc"]
return [key_value["first"], key_value["second"]]
def display_hint(self):
return "map"
def _remove_std_prefix(typename):
match = re.match("^std::(.+)", typename)
return match.group(1) if match is not None else ""
class LibcxxPrettyPrinter(object):
"""PrettyPrinter object so gdb-commands like 'info pretty-printers' work."""
def __init__(self, name):
super(LibcxxPrettyPrinter, self).__init__()
self.name = name
self.enabled = True
self.lookup = {
"basic_string": StdStringPrinter,
"string": StdStringPrinter,
"tuple": StdTuplePrinter,
"unique_ptr": StdUniquePtrPrinter,
"shared_ptr": StdSharedPointerPrinter,
"weak_ptr": StdSharedPointerPrinter,
"bitset": StdBitsetPrinter,
"deque": StdDequePrinter,
"list": StdListPrinter,
"queue": StdQueueOrStackPrinter,
"stack": StdQueueOrStackPrinter,
"priority_queue": StdPriorityQueuePrinter,
"map": StdMapPrinter,
"multimap": StdMapPrinter,
"set": StdSetPrinter,
"multiset": StdSetPrinter,
"vector": StdVectorPrinter,
"__map_iterator": MapIteratorPrinter,
"__map_const_iterator": MapIteratorPrinter,
"__tree_iterator": SetIteratorPrinter,
"__tree_const_iterator": SetIteratorPrinter,
"fpos": StdFposPrinter,
"unordered_set": StdUnorderedSetPrinter,
"unordered_multiset": StdUnorderedSetPrinter,
"unordered_map": StdUnorderedMapPrinter,
"unordered_multimap": StdUnorderedMapPrinter,
"__hash_map_iterator": StdUnorderedMapIteratorPrinter,
"__hash_map_const_iterator": StdUnorderedMapIteratorPrinter,
"__hash_iterator": StdUnorderedSetIteratorPrinter,
"__hash_const_iterator": StdUnorderedSetIteratorPrinter,
}
self.subprinters = []
for name, subprinter in self.lookup.items():
# Subprinters and names are used only for the rarely used command "info
# pretty" (and related), so the name of the first data structure it prints
# is a reasonable choice.
if subprinter not in self.subprinters:
subprinter.name = name
self.subprinters.append(subprinter)
def __call__(self, val):
"""Return the pretty printer for a val, if the type is supported."""
# Do not handle any type that is not a struct/class.
if val.type.strip_typedefs().code != gdb.TYPE_CODE_STRUCT:
return None
# Don't attempt types known to be inside libstdcxx.
typename = val.type.name or val.type.tag or str(val.type)
match = re.match("^std::(__.*?)::", typename)
if match is None or match.group(1) in ["__cxx1998",
"__debug",
"__7",
"__g"]:
return None
# Handle any using declarations or other typedefs.
typename = _prettify_typename(val.type)
if not typename:
return None
without_generics = _remove_generics(typename)
lookup_name = _remove_std_prefix(without_generics)
if lookup_name in self.lookup:
return self.lookup[lookup_name](val)
return None
_libcxx_printer_name = "libcxx_pretty_printer"
# These are called for every binary object file, which could be thousands in
# certain pathological cases. Limit our pretty printers to the progspace.
def _register_libcxx_printers(event):
progspace = event.new_objfile.progspace
# It would be ideal to get the endianness at print time, but
# gdb.execute clears gdb's internal wrap buffer, removing any values
# already generated as part of a larger data structure, and there is
# no python api to get the endianness. Mixed-endianness debugging
# rare enough that this workaround should be adequate.
_libcpp_big_endian = "big endian" in gdb.execute("show endian",
to_string=True)
if not getattr(progspace, _libcxx_printer_name, False):
print("Loading libc++ pretty-printers.")
gdb.printing.register_pretty_printer(
progspace, LibcxxPrettyPrinter(_libcxx_printer_name))
setattr(progspace, _libcxx_printer_name, True)
def _unregister_libcxx_printers(event):
progspace = event.progspace
if getattr(progspace, _libcxx_printer_name, False):
for printer in progspace.pretty_printers:
if getattr(printer, "name", "none") == _libcxx_printer_name:
progspace.pretty_printers.remove(printer)
setattr(progspace, _libcxx_printer_name, False)
break
def register_libcxx_printer_loader():
"""Register event handlers to load libc++ pretty-printers."""
gdb.events.new_objfile.connect(_register_libcxx_printers)
gdb.events.clear_objfiles.connect(_unregister_libcxx_printers)
| libcudacxx-main | libcxx/utils/gdb/libcxx/printers.py |
#! /usr/bin/env python
# encoding: utf-8
import argparse
import errno
import logging
import os
import platform
import re
import sys
import subprocess
import tempfile
try:
import winreg
except ImportError:
import _winreg as winreg
try:
import urllib.request as request
except ImportError:
import urllib as request
try:
import urllib.parse as parse
except ImportError:
import urlparse as parse
class EmptyLogger(object):
'''
Provides an implementation that performs no logging
'''
def debug(self, *k, **kw):
pass
def info(self, *k, **kw):
pass
def warn(self, *k, **kw):
pass
def error(self, *k, **kw):
pass
def critical(self, *k, **kw):
pass
def setLevel(self, *k, **kw):
pass
urls = (
'http://downloads.sourceforge.net/project/mingw-w64/Toolchains%20'
'targetting%20Win32/Personal%20Builds/mingw-builds/installer/'
'repository.txt',
'http://downloads.sourceforge.net/project/mingwbuilds/host-windows/'
'repository.txt'
)
'''
A list of mingw-build repositories
'''
def repository(urls = urls, log = EmptyLogger()):
'''
Downloads and parse mingw-build repository files and parses them
'''
log.info('getting mingw-builds repository')
versions = {}
re_sourceforge = re.compile(r'http://sourceforge.net/projects/([^/]+)/files')
re_sub = r'http://downloads.sourceforge.net/project/\1'
for url in urls:
log.debug(' - requesting: %s', url)
socket = request.urlopen(url)
repo = socket.read()
if not isinstance(repo, str):
repo = repo.decode();
socket.close()
for entry in repo.split('\n')[:-1]:
value = entry.split('|')
version = tuple([int(n) for n in value[0].strip().split('.')])
version = versions.setdefault(version, {})
arch = value[1].strip()
if arch == 'x32':
arch = 'i686'
elif arch == 'x64':
arch = 'x86_64'
arch = version.setdefault(arch, {})
threading = arch.setdefault(value[2].strip(), {})
exceptions = threading.setdefault(value[3].strip(), {})
revision = exceptions.setdefault(int(value[4].strip()[3:]),
re_sourceforge.sub(re_sub, value[5].strip()))
return versions
def find_in_path(file, path=None):
'''
Attempts to find an executable in the path
'''
if platform.system() == 'Windows':
file += '.exe'
if path is None:
path = os.environ.get('PATH', '')
if type(path) is type(''):
path = path.split(os.pathsep)
return list(filter(os.path.exists,
map(lambda dir, file=file: os.path.join(dir, file), path)))
def find_7zip(log = EmptyLogger()):
'''
Attempts to find 7zip for unpacking the mingw-build archives
'''
log.info('finding 7zip')
path = find_in_path('7z')
if not path:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\7-Zip')
path, _ = winreg.QueryValueEx(key, 'Path')
path = [os.path.join(path, '7z.exe')]
log.debug('found \'%s\'', path[0])
return path[0]
find_7zip()
def unpack(archive, location, log = EmptyLogger()):
'''
Unpacks a mingw-builds archive
'''
sevenzip = find_7zip(log)
log.info('unpacking %s', os.path.basename(archive))
cmd = [sevenzip, 'x', archive, '-o' + location, '-y']
log.debug(' - %r', cmd)
with open(os.devnull, 'w') as devnull:
subprocess.check_call(cmd, stdout = devnull)
def download(url, location, log = EmptyLogger()):
'''
Downloads and unpacks a mingw-builds archive
'''
log.info('downloading MinGW')
log.debug(' - url: %s', url)
log.debug(' - location: %s', location)
re_content = re.compile(r'attachment;[ \t]*filename=(")?([^"]*)(")?[\r\n]*')
stream = request.urlopen(url)
try:
content = stream.getheader('Content-Disposition') or ''
except AttributeError:
content = stream.headers.getheader('Content-Disposition') or ''
matches = re_content.match(content)
if matches:
filename = matches.group(2)
else:
parsed = parse.urlparse(stream.geturl())
filename = os.path.basename(parsed.path)
try:
os.makedirs(location)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(location):
pass
else:
raise
archive = os.path.join(location, filename)
with open(archive, 'wb') as out:
while True:
buf = stream.read(1024)
if not buf:
break
out.write(buf)
unpack(archive, location, log = log)
os.remove(archive)
possible = os.path.join(location, 'mingw64')
if not os.path.exists(possible):
possible = os.path.join(location, 'mingw32')
if not os.path.exists(possible):
raise ValueError('Failed to find unpacked MinGW: ' + possible)
return possible
def root(location = None, arch = None, version = None, threading = None,
exceptions = None, revision = None, log = EmptyLogger()):
'''
Returns the root folder of a specific version of the mingw-builds variant
of gcc. Will download the compiler if needed
'''
# Get the repository if we don't have all the information
if not (arch and version and threading and exceptions and revision):
versions = repository(log = log)
# Determine some defaults
version = version or max(versions.keys())
if not arch:
arch = platform.machine().lower()
if arch == 'x86':
arch = 'i686'
elif arch == 'amd64':
arch = 'x86_64'
if not threading:
keys = versions[version][arch].keys()
if 'posix' in keys:
threading = 'posix'
elif 'win32' in keys:
threading = 'win32'
else:
threading = keys[0]
if not exceptions:
keys = versions[version][arch][threading].keys()
if 'seh' in keys:
exceptions = 'seh'
elif 'sjlj' in keys:
exceptions = 'sjlj'
else:
exceptions = keys[0]
if revision == None:
revision = max(versions[version][arch][threading][exceptions].keys())
if not location:
location = os.path.join(tempfile.gettempdir(), 'mingw-builds')
# Get the download url
url = versions[version][arch][threading][exceptions][revision]
# Tell the user whatzzup
log.info('finding MinGW %s', '.'.join(str(v) for v in version))
log.debug(' - arch: %s', arch)
log.debug(' - threading: %s', threading)
log.debug(' - exceptions: %s', exceptions)
log.debug(' - revision: %s', revision)
log.debug(' - url: %s', url)
# Store each specific revision differently
slug = '{version}-{arch}-{threading}-{exceptions}-rev{revision}'
slug = slug.format(
version = '.'.join(str(v) for v in version),
arch = arch,
threading = threading,
exceptions = exceptions,
revision = revision
)
if arch == 'x86_64':
root_dir = os.path.join(location, slug, 'mingw64')
elif arch == 'i686':
root_dir = os.path.join(location, slug, 'mingw32')
else:
raise ValueError('Unknown MinGW arch: ' + arch)
# Download if needed
if not os.path.exists(root_dir):
downloaded = download(url, os.path.join(location, slug), log = log)
if downloaded != root_dir:
raise ValueError('The location of mingw did not match\n%s\n%s'
% (downloaded, root_dir))
return root_dir
def str2ver(string):
'''
Converts a version string into a tuple
'''
try:
version = tuple(int(v) for v in string.split('.'))
if len(version) is not 3:
raise ValueError()
except ValueError:
raise argparse.ArgumentTypeError(
'please provide a three digit version string')
return version
def main():
'''
Invoked when the script is run directly by the python interpreter
'''
parser = argparse.ArgumentParser(
description = 'Downloads a specific version of MinGW',
formatter_class = argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--location',
help = 'the location to download the compiler to',
default = os.path.join(tempfile.gettempdir(), 'mingw-builds'))
parser.add_argument('--arch', required = True, choices = ['i686', 'x86_64'],
help = 'the target MinGW architecture string')
parser.add_argument('--version', type = str2ver,
help = 'the version of GCC to download')
parser.add_argument('--threading', choices = ['posix', 'win32'],
help = 'the threading type of the compiler')
parser.add_argument('--exceptions', choices = ['sjlj', 'seh', 'dwarf'],
help = 'the method to throw exceptions')
parser.add_argument('--revision', type=int,
help = 'the revision of the MinGW release')
group = parser.add_mutually_exclusive_group()
group.add_argument('-v', '--verbose', action='store_true',
help='increase the script output verbosity')
group.add_argument('-q', '--quiet', action='store_true',
help='only print errors and warning')
args = parser.parse_args()
# Create the logger
logger = logging.getLogger('mingw')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
if args.quiet:
logger.setLevel(logging.WARN)
if args.verbose:
logger.setLevel(logging.DEBUG)
# Get MinGW
root_dir = root(location = args.location, arch = args.arch,
version = args.version, threading = args.threading,
exceptions = args.exceptions, revision = args.revision,
log = logger)
sys.stdout.write('%s\n' % os.path.join(root_dir, 'bin'))
if __name__ == '__main__':
try:
main()
except IOError as e:
sys.stderr.write('IO error: %s\n' % e)
sys.exit(1)
except OSError as e:
sys.stderr.write('OS error: %s\n' % e)
sys.exit(1)
except KeyboardInterrupt as e:
sys.stderr.write('Killed\n')
sys.exit(1)
| libcudacxx-main | libcxx/utils/google-benchmark/mingw.py |
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Werror',
'-pedantic-errors',
'-std=c++0x',
'-fno-strict-aliasing',
'-O3',
'-DNDEBUG',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x', 'c++',
'-I', 'include',
'-isystem', '/usr/include',
'-isystem', '/usr/local/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cc' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| libcudacxx-main | libcxx/utils/google-benchmark/.ycm_extra_conf.py |
#!/usr/bin/env python
"""
strip_asm.py - Cleanup ASM output for the specified file
"""
from argparse import ArgumentParser
import sys
import os
import re
def find_used_labels(asm):
found = set()
label_re = re.compile("\s*j[a-z]+\s+\.L([a-zA-Z0-9][a-zA-Z0-9_]*)")
for l in asm.splitlines():
m = label_re.match(l)
if m:
found.add('.L%s' % m.group(1))
return found
def normalize_labels(asm):
decls = set()
label_decl = re.compile("^[.]{0,1}L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)")
for l in asm.splitlines():
m = label_decl.match(l)
if m:
decls.add(m.group(0))
if len(decls) == 0:
return asm
needs_dot = next(iter(decls))[0] != '.'
if not needs_dot:
return asm
for ld in decls:
asm = re.sub("(^|\s+)" + ld + "(?=:|\s)", '\\1.' + ld, asm)
return asm
def transform_labels(asm):
asm = normalize_labels(asm)
used_decls = find_used_labels(asm)
new_asm = ''
label_decl = re.compile("^\.L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)")
for l in asm.splitlines():
m = label_decl.match(l)
if not m or m.group(0) in used_decls:
new_asm += l
new_asm += '\n'
return new_asm
def is_identifier(tk):
if len(tk) == 0:
return False
first = tk[0]
if not first.isalpha() and first != '_':
return False
for i in range(1, len(tk)):
c = tk[i]
if not c.isalnum() and c != '_':
return False
return True
def process_identifiers(l):
"""
process_identifiers - process all identifiers and modify them to have
consistent names across all platforms; specifically across ELF and MachO.
For example, MachO inserts an additional understore at the beginning of
names. This function removes that.
"""
parts = re.split(r'([a-zA-Z0-9_]+)', l)
new_line = ''
for tk in parts:
if is_identifier(tk):
if tk.startswith('__Z'):
tk = tk[1:]
elif tk.startswith('_') and len(tk) > 1 and \
tk[1].isalpha() and tk[1] != 'Z':
tk = tk[1:]
new_line += tk
return new_line
def process_asm(asm):
"""
Strip the ASM of unwanted directives and lines
"""
new_contents = ''
asm = transform_labels(asm)
# TODO: Add more things we want to remove
discard_regexes = [
re.compile("\s+\..*$"), # directive
re.compile("\s*#(NO_APP|APP)$"), #inline ASM
re.compile("\s*#.*$"), # comment line
re.compile("\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"), #global directive
re.compile("\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"),
]
keep_regexes = [
]
fn_label_def = re.compile("^[a-zA-Z_][a-zA-Z0-9_.]*:")
for l in asm.splitlines():
# Remove Mach-O attribute
l = l.replace('@GOTPCREL', '')
add_line = True
for reg in discard_regexes:
if reg.match(l) is not None:
add_line = False
break
for reg in keep_regexes:
if reg.match(l) is not None:
add_line = True
break
if add_line:
if fn_label_def.match(l) and len(new_contents) != 0:
new_contents += '\n'
l = process_identifiers(l)
new_contents += l
new_contents += '\n'
return new_contents
def main():
parser = ArgumentParser(
description='generate a stripped assembly file')
parser.add_argument(
'input', metavar='input', type=str, nargs=1,
help='An input assembly file')
parser.add_argument(
'out', metavar='output', type=str, nargs=1,
help='The output file')
args, unknown_args = parser.parse_known_args()
input = args.input[0]
output = args.out[0]
if not os.path.isfile(input):
print(("ERROR: input file '%s' does not exist") % input)
sys.exit(1)
contents = None
with open(input, 'r') as f:
contents = f.read()
new_contents = process_asm(contents)
with open(output, 'w') as f:
f.write(new_contents)
if __name__ == '__main__':
main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified;
| libcudacxx-main | libcxx/utils/google-benchmark/tools/strip_asm.py |
#!/usr/bin/env python
import unittest
"""
compare.py - versatile benchmark output compare tool
"""
import argparse
from argparse import ArgumentParser
import sys
import gbench
from gbench import util, report
from gbench.util import *
def check_inputs(in1, in2, flags):
"""
Perform checking on the user provided inputs and diagnose any abnormalities
"""
in1_kind, in1_err = classify_input_file(in1)
in2_kind, in2_err = classify_input_file(in2)
output_file = find_benchmark_flag('--benchmark_out=', flags)
output_type = find_benchmark_flag('--benchmark_out_format=', flags)
if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file:
print(("WARNING: '--benchmark_out=%s' will be passed to both "
"benchmarks causing it to be overwritten") % output_file)
if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0:
print("WARNING: passing optional flags has no effect since both "
"inputs are JSON")
if output_type is not None and output_type != 'json':
print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`"
" is not supported.") % output_type)
sys.exit(1)
def create_parser():
parser = ArgumentParser(
description='versatile benchmark output compare tool')
parser.add_argument(
'-a',
'--display_aggregates_only',
dest='display_aggregates_only',
action="store_true",
help="If there are repetitions, by default, we display everything - the"
" actual runs, and the aggregates computed. Sometimes, it is "
"desirable to only view the aggregates. E.g. when there are a lot "
"of repetitions. Do note that only the display is affected. "
"Internally, all the actual runs are still used, e.g. for U test.")
utest = parser.add_argument_group()
utest.add_argument(
'--no-utest',
dest='utest',
default=True,
action="store_false",
help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS))
alpha_default = 0.05
utest.add_argument(
"--alpha",
dest='utest_alpha',
default=alpha_default,
type=float,
help=("significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") %
alpha_default)
subparsers = parser.add_subparsers(
help='This tool has multiple modes of operation:',
dest='mode')
parser_a = subparsers.add_parser(
'benchmarks',
help='The most simple use-case, compare all the output of these two benchmarks')
baseline = parser_a.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test_baseline',
metavar='test_baseline',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
contender = parser_a.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'test_contender',
metavar='test_contender',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
parser_a.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
parser_b = subparsers.add_parser(
'filters', help='Compare filter one with the filter two of benchmark')
baseline = parser_b.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test',
metavar='test',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
baseline.add_argument(
'filter_baseline',
metavar='filter_baseline',
type=str,
nargs=1,
help='The first filter, that will be used as baseline')
contender = parser_b.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'filter_contender',
metavar='filter_contender',
type=str,
nargs=1,
help='The second filter, that will be compared against the baseline')
parser_b.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
parser_c = subparsers.add_parser(
'benchmarksfiltered',
help='Compare filter one of first benchmark with filter two of the second benchmark')
baseline = parser_c.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test_baseline',
metavar='test_baseline',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
baseline.add_argument(
'filter_baseline',
metavar='filter_baseline',
type=str,
nargs=1,
help='The first filter, that will be used as baseline')
contender = parser_c.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'test_contender',
metavar='test_contender',
type=argparse.FileType('r'),
nargs=1,
help='The second benchmark executable or JSON output file, that will be compared against the baseline')
contender.add_argument(
'filter_contender',
metavar='filter_contender',
type=str,
nargs=1,
help='The second filter, that will be compared against the baseline')
parser_c.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
return parser
def main():
# Parse the command line flags
parser = create_parser()
args, unknown_args = parser.parse_known_args()
if args.mode is None:
parser.print_help()
exit(1)
assert not unknown_args
benchmark_options = args.benchmark_options
if args.mode == 'benchmarks':
test_baseline = args.test_baseline[0].name
test_contender = args.test_contender[0].name
filter_baseline = ''
filter_contender = ''
# NOTE: if test_baseline == test_contender, you are analyzing the stdev
description = 'Comparing %s to %s' % (test_baseline, test_contender)
elif args.mode == 'filters':
test_baseline = args.test[0].name
test_contender = args.test[0].name
filter_baseline = args.filter_baseline[0]
filter_contender = args.filter_contender[0]
# NOTE: if filter_baseline == filter_contender, you are analyzing the
# stdev
description = 'Comparing %s to %s (from %s)' % (
filter_baseline, filter_contender, args.test[0].name)
elif args.mode == 'benchmarksfiltered':
test_baseline = args.test_baseline[0].name
test_contender = args.test_contender[0].name
filter_baseline = args.filter_baseline[0]
filter_contender = args.filter_contender[0]
# NOTE: if test_baseline == test_contender and
# filter_baseline == filter_contender, you are analyzing the stdev
description = 'Comparing %s (from %s) to %s (from %s)' % (
filter_baseline, test_baseline, filter_contender, test_contender)
else:
# should never happen
print("Unrecognized mode of operation: '%s'" % args.mode)
parser.print_help()
exit(1)
check_inputs(test_baseline, test_contender, benchmark_options)
if args.display_aggregates_only:
benchmark_options += ['--benchmark_display_aggregates_only=true']
options_baseline = []
options_contender = []
if filter_baseline and filter_contender:
options_baseline = ['--benchmark_filter=%s' % filter_baseline]
options_contender = ['--benchmark_filter=%s' % filter_contender]
# Run the benchmarks and report the results
json1 = json1_orig = gbench.util.run_or_load_benchmark(
test_baseline, benchmark_options + options_baseline)
json2 = json2_orig = gbench.util.run_or_load_benchmark(
test_contender, benchmark_options + options_contender)
# Now, filter the benchmarks so that the difference report can work
if filter_baseline and filter_contender:
replacement = '[%s vs. %s]' % (filter_baseline, filter_contender)
json1 = gbench.report.filter_benchmark(
json1_orig, filter_baseline, replacement)
json2 = gbench.report.filter_benchmark(
json2_orig, filter_contender, replacement)
# Diff and output
output_lines = gbench.report.generate_difference_report(
json1, json2, args.display_aggregates_only,
args.utest, args.utest_alpha)
print(description)
for ln in output_lines:
print(ln)
class TestParser(unittest.TestCase):
def setUp(self):
self.parser = create_parser()
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'gbench',
'Inputs')
self.testInput0 = os.path.join(testInputs, 'test1_run1.json')
self.testInput1 = os.path.join(testInputs, 'test1_run2.json')
def test_benchmarks_basic(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_without_utest(self):
parsed = self.parser.parse_args(
['--no-utest', 'benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertFalse(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.05)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_display_aggregates_only(self):
parsed = self.parser.parse_args(
['-a', 'benchmarks', self.testInput0, self.testInput1])
self.assertTrue(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_with_utest_alpha(self):
parsed = self.parser.parse_args(
['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.314)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_without_utest_with_utest_alpha(self):
parsed = self.parser.parse_args(
['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertFalse(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.314)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_with_remainder(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1, 'd'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.benchmark_options, ['d'])
def test_benchmarks_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1, '--', 'e'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.benchmark_options, ['e'])
def test_filters_basic(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertFalse(parsed.benchmark_options)
def test_filters_with_remainder(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd', 'e'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertEqual(parsed.benchmark_options, ['e'])
def test_filters_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd', '--', 'f'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertEqual(parsed.benchmark_options, ['f'])
def test_benchmarksfiltered_basic(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertFalse(parsed.benchmark_options)
def test_benchmarksfiltered_with_remainder(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertEqual(parsed.benchmark_options[0], 'f')
def test_benchmarksfiltered_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertEqual(parsed.benchmark_options[0], 'g')
if __name__ == '__main__':
# unittest.main()
main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified;
| libcudacxx-main | libcxx/utils/google-benchmark/tools/compare.py |
"""util.py - General utilities for running, loading, and processing benchmarks
"""
import json
import os
import tempfile
import subprocess
import sys
# Input file type enumeration
IT_Invalid = 0
IT_JSON = 1
IT_Executable = 2
_num_magic_bytes = 2 if sys.platform.startswith('win') else 4
def is_executable_file(filename):
"""
Return 'True' if 'filename' names a valid file which is likely
an executable. A file is considered an executable if it starts with the
magic bytes for a EXE, Mach O, or ELF file.
"""
if not os.path.isfile(filename):
return False
with open(filename, mode='rb') as f:
magic_bytes = f.read(_num_magic_bytes)
if sys.platform == 'darwin':
return magic_bytes in [
b'\xfe\xed\xfa\xce', # MH_MAGIC
b'\xce\xfa\xed\xfe', # MH_CIGAM
b'\xfe\xed\xfa\xcf', # MH_MAGIC_64
b'\xcf\xfa\xed\xfe', # MH_CIGAM_64
b'\xca\xfe\xba\xbe', # FAT_MAGIC
b'\xbe\xba\xfe\xca' # FAT_CIGAM
]
elif sys.platform.startswith('win'):
return magic_bytes == b'MZ'
else:
return magic_bytes == b'\x7FELF'
def is_json_file(filename):
"""
Returns 'True' if 'filename' names a valid JSON output file.
'False' otherwise.
"""
try:
with open(filename, 'r') as f:
json.load(f)
return True
except BaseException:
pass
return False
def classify_input_file(filename):
"""
Return a tuple (type, msg) where 'type' specifies the classified type
of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable
string represeting the error.
"""
ftype = IT_Invalid
err_msg = None
if not os.path.exists(filename):
err_msg = "'%s' does not exist" % filename
elif not os.path.isfile(filename):
err_msg = "'%s' does not name a file" % filename
elif is_executable_file(filename):
ftype = IT_Executable
elif is_json_file(filename):
ftype = IT_JSON
else:
err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename
return ftype, err_msg
def check_input_file(filename):
"""
Classify the file named by 'filename' and return the classification.
If the file is classified as 'IT_Invalid' print an error message and exit
the program.
"""
ftype, msg = classify_input_file(filename)
if ftype == IT_Invalid:
print("Invalid input file: %s" % msg)
sys.exit(1)
return ftype
def find_benchmark_flag(prefix, benchmark_flags):
"""
Search the specified list of flags for a flag matching `<prefix><arg>` and
if it is found return the arg it specifies. If specified more than once the
last value is returned. If the flag is not found None is returned.
"""
assert prefix.startswith('--') and prefix.endswith('=')
result = None
for f in benchmark_flags:
if f.startswith(prefix):
result = f[len(prefix):]
return result
def remove_benchmark_flags(prefix, benchmark_flags):
"""
Return a new list containing the specified benchmark_flags except those
with the specified prefix.
"""
assert prefix.startswith('--') and prefix.endswith('=')
return [f for f in benchmark_flags if not f.startswith(prefix)]
def load_benchmark_results(fname):
"""
Read benchmark output from a file and return the JSON object.
REQUIRES: 'fname' names a file containing JSON benchmark output.
"""
with open(fname, 'r') as f:
return json.load(f)
def run_benchmark(exe_name, benchmark_flags):
"""
Run a benchmark specified by 'exe_name' with the specified
'benchmark_flags'. The benchmark is run directly as a subprocess to preserve
real time console output.
RETURNS: A JSON object representing the benchmark output
"""
output_name = find_benchmark_flag('--benchmark_out=',
benchmark_flags)
is_temp_output = False
if output_name is None:
is_temp_output = True
thandle, output_name = tempfile.mkstemp()
os.close(thandle)
benchmark_flags = list(benchmark_flags) + \
['--benchmark_out=%s' % output_name]
cmd = [exe_name] + benchmark_flags
print("RUNNING: %s" % ' '.join(cmd))
exitCode = subprocess.call(cmd)
if exitCode != 0:
print('TEST FAILED...')
sys.exit(exitCode)
json_res = load_benchmark_results(output_name)
if is_temp_output:
os.unlink(output_name)
return json_res
def run_or_load_benchmark(filename, benchmark_flags):
"""
Get the results for a specified benchmark. If 'filename' specifies
an executable benchmark then the results are generated by running the
benchmark. Otherwise 'filename' must name a valid JSON output file,
which is loaded and the result returned.
"""
ftype = check_input_file(filename)
if ftype == IT_JSON:
return load_benchmark_results(filename)
elif ftype == IT_Executable:
return run_benchmark(filename, benchmark_flags)
else:
assert False # This branch is unreachable
| libcudacxx-main | libcxx/utils/google-benchmark/tools/gbench/util.py |
"""Google Benchmark tooling"""
__author__ = 'Eric Fiselier'
__email__ = '[email protected]'
__versioninfo__ = (0, 5, 0)
__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev'
__all__ = []
| libcudacxx-main | libcxx/utils/google-benchmark/tools/gbench/__init__.py |
import unittest
"""report.py - Utilities for reporting statistics about benchmark results
"""
import os
import re
import copy
from scipy.stats import mannwhitneyu
class BenchmarkColor(object):
def __init__(self, name, code):
self.name = name
self.code = code
def __repr__(self):
return '%s%r' % (self.__class__.__name__,
(self.name, self.code))
def __format__(self, format):
return self.code
# Benchmark Colors Enumeration
BC_NONE = BenchmarkColor('NONE', '')
BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m')
BC_CYAN = BenchmarkColor('CYAN', '\033[96m')
BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m')
BC_OKGREEN = BenchmarkColor('OKGREEN', '\033[32m')
BC_HEADER = BenchmarkColor('HEADER', '\033[92m')
BC_WARNING = BenchmarkColor('WARNING', '\033[93m')
BC_WHITE = BenchmarkColor('WHITE', '\033[97m')
BC_FAIL = BenchmarkColor('FAIL', '\033[91m')
BC_ENDC = BenchmarkColor('ENDC', '\033[0m')
BC_BOLD = BenchmarkColor('BOLD', '\033[1m')
BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m')
UTEST_MIN_REPETITIONS = 2
UTEST_OPTIMAL_REPETITIONS = 9 # Lowest reasonable number, More is better.
UTEST_COL_NAME = "_pvalue"
def color_format(use_color, fmt_str, *args, **kwargs):
"""
Return the result of 'fmt_str.format(*args, **kwargs)' after transforming
'args' and 'kwargs' according to the value of 'use_color'. If 'use_color'
is False then all color codes in 'args' and 'kwargs' are replaced with
the empty string.
"""
assert use_color is True or use_color is False
if not use_color:
args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE
for arg in args]
kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE
for key, arg in kwargs.items()}
return fmt_str.format(*args, **kwargs)
def find_longest_name(benchmark_list):
"""
Return the length of the longest benchmark name in a given list of
benchmark JSON objects
"""
longest_name = 1
for bc in benchmark_list:
if len(bc['name']) > longest_name:
longest_name = len(bc['name'])
return longest_name
def calculate_change(old_val, new_val):
"""
Return a float representing the decimal change between old_val and new_val.
"""
if old_val == 0 and new_val == 0:
return 0.0
if old_val == 0:
return float(new_val - old_val) / (float(old_val + new_val) / 2)
return float(new_val - old_val) / abs(old_val)
def filter_benchmark(json_orig, family, replacement=""):
"""
Apply a filter to the json, and only leave the 'family' of benchmarks.
"""
regex = re.compile(family)
filtered = {}
filtered['benchmarks'] = []
for be in json_orig['benchmarks']:
if not regex.search(be['name']):
continue
filteredbench = copy.deepcopy(be) # Do NOT modify the old name!
filteredbench['name'] = regex.sub(replacement, filteredbench['name'])
filtered['benchmarks'].append(filteredbench)
return filtered
def get_unique_benchmark_names(json):
"""
While *keeping* the order, give all the unique 'names' used for benchmarks.
"""
seen = set()
uniqued = [x['name'] for x in json['benchmarks']
if x['name'] not in seen and
(seen.add(x['name']) or True)]
return uniqued
def intersect(list1, list2):
"""
Given two lists, get a new list consisting of the elements only contained
in *both of the input lists*, while preserving the ordering.
"""
return [x for x in list1 if x in list2]
def partition_benchmarks(json1, json2):
"""
While preserving the ordering, find benchmarks with the same names in
both of the inputs, and group them.
(i.e. partition/filter into groups with common name)
"""
json1_unique_names = get_unique_benchmark_names(json1)
json2_unique_names = get_unique_benchmark_names(json2)
names = intersect(json1_unique_names, json2_unique_names)
partitions = []
for name in names:
# Pick the time unit from the first entry of the lhs benchmark.
time_unit = (x['time_unit']
for x in json1['benchmarks'] if x['name'] == name).next()
# Filter by name and time unit.
lhs = [x for x in json1['benchmarks'] if x['name'] == name and
x['time_unit'] == time_unit]
rhs = [x for x in json2['benchmarks'] if x['name'] == name and
x['time_unit'] == time_unit]
partitions.append([lhs, rhs])
return partitions
def extract_field(partition, field_name):
# The count of elements may be different. We want *all* of them.
lhs = [x[field_name] for x in partition[0]]
rhs = [x[field_name] for x in partition[1]]
return [lhs, rhs]
def print_utest(partition, utest_alpha, first_col_width, use_color=True):
timings_time = extract_field(partition, 'real_time')
timings_cpu = extract_field(partition, 'cpu_time')
min_rep_cnt = min(len(timings_time[0]),
len(timings_time[1]),
len(timings_cpu[0]),
len(timings_cpu[1]))
# Does *everything* has at least UTEST_MIN_REPETITIONS repetitions?
if min_rep_cnt < UTEST_MIN_REPETITIONS:
return []
def get_utest_color(pval):
return BC_FAIL if pval >= utest_alpha else BC_OKGREEN
time_pvalue = mannwhitneyu(
timings_time[0], timings_time[1], alternative='two-sided').pvalue
cpu_pvalue = mannwhitneyu(
timings_cpu[0], timings_cpu[1], alternative='two-sided').pvalue
dsc = "U Test, Repetitions: {} vs {}".format(
len(timings_cpu[0]), len(timings_cpu[1]))
dsc_color = BC_OKGREEN
if min_rep_cnt < UTEST_OPTIMAL_REPETITIONS:
dsc_color = BC_WARNING
dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format(
UTEST_OPTIMAL_REPETITIONS)
special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}"
last_name = partition[0][0]['name']
return [color_format(use_color,
special_str,
BC_HEADER,
"{}{}".format(last_name, UTEST_COL_NAME),
first_col_width,
get_utest_color(time_pvalue), time_pvalue,
get_utest_color(cpu_pvalue), cpu_pvalue,
dsc_color, dsc,
endc=BC_ENDC)]
def generate_difference_report(
json1,
json2,
display_aggregates_only=False,
utest=False,
utest_alpha=0.05,
use_color=True):
"""
Calculate and report the difference between each test of two benchmarks
runs specified as 'json1' and 'json2'.
"""
assert utest is True or utest is False
first_col_width = find_longest_name(json1['benchmarks'])
def find_test(name):
for b in json2['benchmarks']:
if b['name'] == name:
return b
return None
first_col_width = max(
first_col_width,
len('Benchmark'))
first_col_width += len(UTEST_COL_NAME)
first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format(
'Benchmark', 12 + first_col_width)
output_strs = [first_line, '-' * len(first_line)]
partitions = partition_benchmarks(json1, json2)
for partition in partitions:
# Careful, we may have different repetition count.
for i in range(min(len(partition[0]), len(partition[1]))):
bn = partition[0][i]
other_bench = partition[1][i]
# *If* we were asked to only display aggregates,
# and if it is non-aggregate, then skip it.
if display_aggregates_only and 'run_type' in bn and 'run_type' in other_bench:
assert bn['run_type'] == other_bench['run_type']
if bn['run_type'] != 'aggregate':
continue
fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
def get_color(res):
if res > 0.05:
return BC_FAIL
elif res > -0.07:
return BC_WHITE
else:
return BC_CYAN
tres = calculate_change(bn['real_time'], other_bench['real_time'])
cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time'])
output_strs += [color_format(use_color,
fmt_str,
BC_HEADER,
bn['name'],
first_col_width,
get_color(tres),
tres,
get_color(cpures),
cpures,
bn['real_time'],
other_bench['real_time'],
bn['cpu_time'],
other_bench['cpu_time'],
endc=BC_ENDC)]
# After processing the whole partition, if requested, do the U test.
if utest:
output_strs += print_utest(partition,
utest_alpha=utest_alpha,
first_col_width=first_col_width,
use_color=use_color)
return output_strs
###############################################################################
# Unit tests
class TestGetUniqueBenchmarkNames(unittest.TestCase):
def load_results(self):
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput = os.path.join(testInputs, 'test3_run0.json')
with open(testOutput, 'r') as f:
json = json.load(f)
return json
def test_basic(self):
expect_lines = [
'BM_One',
'BM_Two',
'short', # These two are not sorted
'medium', # These two are not sorted
]
json = self.load_results()
output_lines = get_unique_benchmark_names(json)
print("\n")
print("\n".join(output_lines))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
self.assertEqual(expect_lines[i], output_lines[i])
class TestReportDifference(unittest.TestCase):
def load_results(self):
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput1 = os.path.join(testInputs, 'test1_run1.json')
testOutput2 = os.path.join(testInputs, 'test1_run2.json')
with open(testOutput1, 'r') as f:
json1 = json.load(f)
with open(testOutput2, 'r') as f:
json2 = json.load(f)
return json1, json2
def test_basic(self):
expect_lines = [
['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'],
['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'],
['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'],
['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'],
['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'],
['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'],
['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'],
['BM_100xSlower', '+99.0000', '+99.0000',
'100', '10000', '100', '10000'],
['BM_100xFaster', '-0.9900', '-0.9900',
'10000', '100', '10000', '100'],
['BM_10PercentCPUToTime', '+0.1000',
'-0.1000', '100', '110', '100', '90'],
['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'],
['BM_BadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'],
]
json1, json2 = self.load_results()
output_lines_with_header = generate_difference_report(
json1, json2, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(len(parts), 7)
self.assertEqual(expect_lines[i], parts)
class TestReportDifferenceBetweenFamilies(unittest.TestCase):
def load_result(self):
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput = os.path.join(testInputs, 'test2_run.json')
with open(testOutput, 'r') as f:
json = json.load(f)
return json
def test_basic(self):
expect_lines = [
['.', '-0.5000', '-0.5000', '10', '5', '10', '5'],
['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'],
['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'],
['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'],
]
json = self.load_result()
json1 = filter_benchmark(json, "BM_Z.ro", ".")
json2 = filter_benchmark(json, "BM_O.e", ".")
output_lines_with_header = generate_difference_report(
json1, json2, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(len(parts), 7)
self.assertEqual(expect_lines[i], parts)
class TestReportDifferenceWithUTest(unittest.TestCase):
def load_results(self):
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput1 = os.path.join(testInputs, 'test3_run0.json')
testOutput2 = os.path.join(testInputs, 'test3_run1.json')
with open(testOutput1, 'r') as f:
json1 = json.load(f)
with open(testOutput2, 'r') as f:
json2 = json.load(f)
return json1, json2
def test_utest(self):
expect_lines = []
expect_lines = [
['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'],
['BM_Two_pvalue',
'0.6985',
'0.6985',
'U',
'Test,',
'Repetitions:',
'2',
'vs',
'2.',
'WARNING:',
'Results',
'unreliable!',
'9+',
'repetitions',
'recommended.'],
['short', '-0.1250', '-0.0625', '8', '7', '80', '75'],
['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
['short_pvalue',
'0.7671',
'0.1489',
'U',
'Test,',
'Repetitions:',
'2',
'vs',
'3.',
'WARNING:',
'Results',
'unreliable!',
'9+',
'repetitions',
'recommended.'],
['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'],
]
json1, json2 = self.load_results()
output_lines_with_header = generate_difference_report(
json1, json2, utest=True, utest_alpha=0.05, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(expect_lines[i], parts)
class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
unittest.TestCase):
def load_results(self):
import json
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'Inputs')
testOutput1 = os.path.join(testInputs, 'test3_run0.json')
testOutput2 = os.path.join(testInputs, 'test3_run1.json')
with open(testOutput1, 'r') as f:
json1 = json.load(f)
with open(testOutput2, 'r') as f:
json2 = json.load(f)
return json1, json2
def test_utest(self):
expect_lines = []
expect_lines = [
['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'],
['BM_Two_pvalue',
'0.6985',
'0.6985',
'U',
'Test,',
'Repetitions:',
'2',
'vs',
'2.',
'WARNING:',
'Results',
'unreliable!',
'9+',
'repetitions',
'recommended.'],
['short', '-0.1250', '-0.0625', '8', '7', '80', '75'],
['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
['short_pvalue',
'0.7671',
'0.1489',
'U',
'Test,',
'Repetitions:',
'2',
'vs',
'3.',
'WARNING:',
'Results',
'unreliable!',
'9+',
'repetitions',
'recommended.'],
]
json1, json2 = self.load_results()
output_lines_with_header = generate_difference_report(
json1, json2, display_aggregates_only=True,
utest=True, utest_alpha=0.05, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(expect_lines[i], parts)
if __name__ == '__main__':
unittest.main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified;
| libcudacxx-main | libcxx/utils/google-benchmark/tools/gbench/report.py |
# -*- coding: utf-8 -*-
#
# libc++ documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'libc++'
copyright = u'2011-2018, LLVM Project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '10.0'
# The full version, including alpha/beta/rc tags.
release = '10.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y-%m-%d'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'libcxxdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'libcxx.tex', u'libcxx Documentation',
u'LLVM project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('contents', 'libc++', u'libc++ Documentation',
[u'LLVM project'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('contents', 'libc++', u'libc++ Documentation',
u'LLVM project', 'libc++', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# FIXME: Define intersphinx configuration.
intersphinx_mapping = {}
# -- Options for extensions ----------------------------------------------------
# Enable this if you want TODOs to show up in the generated documentation.
todo_include_todos = True
| libcudacxx-main | libcxx/docs/conf.py |
# -*- Python -*- vim: set ft=python ts=4 sw=4 expandtab tw=79:
# Configuration file for the 'lit' test runner.
import os
import site
site.addsitedir(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'utils'))
from libcxx.test.googlebenchmark import GoogleBenchmark
# Tell pylint that we know config and lit_config exist somewhere.
if 'PYLINT_IMPORT' in os.environ:
config = object()
lit_config = object()
# name: The name of this test suite.
config.name = 'libc++ benchmarks'
config.suffixes = []
config.test_exec_root = os.path.join(config.libcxx_obj_root, 'benchmarks')
config.test_source_root = config.test_exec_root
config.test_format = GoogleBenchmark(test_sub_dirs='.',
test_suffix='.libcxx.out',
benchmark_args=config.benchmark_args) | libcudacxx-main | libcxx/benchmarks/lit.cfg.py |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""Commands used to automate testing gdb pretty printers.
This script is part of a larger framework to test gdb pretty printers. It
runs the program, detects test cases, checks them, and prints results.
See gdb_pretty_printer_test.sh.cpp on how to write a test case.
"""
from __future__ import print_function
import re
import gdb
test_failures = 0
class CheckResult(gdb.Command):
def __init__(self):
super(CheckResult, self).__init__(
"print_and_compare", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
try:
# Stack frame is:
# 0. StopForDebugger
# 1. ComparePrettyPrintToChars or ComparePrettyPrintToRegex
# 2. TestCase
compare_frame = gdb.newest_frame().older()
testcase_frame = compare_frame.older()
test_loc = testcase_frame.find_sal()
# Use interactive commands in the correct context to get the pretty
# printed version
value_str = self._get_value_string(compare_frame, testcase_frame)
# Ignore the convenience variable name and newline
value = value_str[value_str.find("= ") + 2:-1]
gdb.newest_frame().select()
expectation_val = compare_frame.read_var("expectation")
if "PrettyPrintToRegex" in compare_frame.name():
check_literal = expectation_val.string()
test_fails = not re.match(check_literal, value)
else:
check_literal_string = expectation_val.string(encoding="utf-8")
check_literal = str(check_literal_string.encode("utf-8"))
test_fails = value != check_literal
if test_fails:
global test_failures
print("FAIL: " + test_loc.symtab.filename +
":" + str(test_loc.line))
print("GDB printed:")
print(" " + value)
print("Value should match:")
print(" " + check_literal)
test_failures += 1
else:
print("PASS: " + test_loc.symtab.filename +
":" + str(test_loc.line))
except RuntimeError as e:
# At this point, lots of different things could be wrong, so don't try to
# recover or figure it out. Don't exit either, because then it's
# impossible debug the framework itself.
print("FAIL: Something is wrong in the test framework.")
print(str(e))
test_failures += 1
def _get_value_string(self, compare_frame, testcase_frame):
compare_frame.select()
if "ComparePrettyPrint" in compare_frame.name():
return gdb.execute("p value", to_string=True)
value_str = str(compare_frame.read_var("value"))
clean_expression_str = value_str.strip("'\"")
testcase_frame.select()
return gdb.execute("p " + clean_expression_str, to_string=True)
def exit_handler(event=None):
global test_failures
if test_failures:
print("FAILED %d cases" % test_failures)
exit(test_failures)
# Start code executed at load time
# Disable terminal paging
gdb.execute("set height 0")
gdb.execute("set python print-stack full")
test_failures = 0
CheckResult()
test_bp = gdb.Breakpoint("StopForDebugger")
test_bp.enabled = True
test_bp.silent = True
test_bp.commands = "print_and_compare\ncontinue"
# "run" won't return if the program exits; ensure the script regains control.
gdb.events.exited.connect(exit_handler)
gdb.execute("run")
# If the program didn't exit, something went wrong, but we don't
# know what. Fail on exit.
test_failures += 1
exit_handler(None)
| libcudacxx-main | .upstream-tests/test/pretty_printers/gdb_pretty_printer_test.py |
import sys
import os
import socket
import stat
# Ensure that this is being run on a specific platform
assert sys.platform.startswith('linux') or sys.platform.startswith('darwin') \
or sys.platform.startswith('cygwin') or sys.platform.startswith('freebsd') \
or sys.platform.startswith('netbsd')
def env_path():
ep = os.environ.get('LIBCXX_FILESYSTEM_DYNAMIC_TEST_ROOT')
assert ep is not None
ep = os.path.realpath(ep)
assert os.path.isdir(ep)
return ep
env_path_global = env_path()
# Make sure we don't try and write outside of env_path.
# All paths used should be sanitized
def sanitize(p):
p = os.path.realpath(p)
if os.path.commonprefix([env_path_global, p]):
return p
assert False
"""
Some of the tests restrict permissions to induce failures.
Before we delete the test environment, we have to walk it and re-raise the
permissions.
"""
def clean_recursive(root_p):
if not os.path.islink(root_p):
os.chmod(root_p, 0o777)
for ent in os.listdir(root_p):
p = os.path.join(root_p, ent)
if os.path.islink(p) or not os.path.isdir(p):
os.remove(p)
else:
assert os.path.isdir(p)
clean_recursive(p)
os.rmdir(p)
def init_test_directory(root_p):
root_p = sanitize(root_p)
assert not os.path.exists(root_p)
os.makedirs(root_p)
def destroy_test_directory(root_p):
root_p = sanitize(root_p)
clean_recursive(root_p)
os.rmdir(root_p)
def create_file(fname, size):
with open(sanitize(fname), 'w') as f:
f.write('c' * size)
def create_dir(dname):
os.mkdir(sanitize(dname))
def create_symlink(source, link):
os.symlink(sanitize(source), sanitize(link))
def create_hardlink(source, link):
os.link(sanitize(source), sanitize(link))
def create_fifo(source):
os.mkfifo(sanitize(source))
def create_socket(source):
sock = socket.socket(socket.AF_UNIX)
sanitized_source = sanitize(source)
# AF_UNIX sockets may have very limited path length, so split it
# into chdir call (with technically unlimited length) followed
# by bind() relative to the directory
os.chdir(os.path.dirname(sanitized_source))
sock.bind(os.path.basename(sanitized_source))
if __name__ == '__main__':
command = " ".join(sys.argv[1:])
eval(command)
sys.exit(0)
| libcudacxx-main | .upstream-tests/test/support/filesystem_dynamic_test_helper.py |
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
from argparse import ArgumentParser
import sys
def print_and_exit(msg):
sys.stderr.write(msg + '\n')
sys.exit(1)
def main():
parser = ArgumentParser(
description="Concatenate two files into a single file")
parser.add_argument(
'-o', '--output', dest='output', required=True,
help='The output file. stdout is used if not given',
type=str, action='store')
parser.add_argument(
'files', metavar='files', nargs='+',
help='The files to concatenate')
args = parser.parse_args()
if len(args.files) < 2:
print_and_exit('fewer than 2 inputs provided')
data = ''
for filename in args.files:
with open(filename, 'r') as f:
data += f.read()
if len(data) != 0 and data[-1] != '\n':
data += '\n'
assert len(data) > 0 and "cannot cat empty files"
with open(args.output, 'w') as f:
f.write(data)
if __name__ == '__main__':
main()
sys.exit(0)
| libcudacxx-main | .upstream-tests/utils/cat_files.py |
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""
Generate a linker script that links libc++ to the proper ABI library.
An example script for c++abi would look like "INPUT(libc++.so.1 -lc++abi)".
"""
import argparse
import os
import sys
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--dryrun", help="Don't write any output",
action="store_true", default=False)
parser.add_argument("--rename", action="store_true", default=False,
help="Rename the output as input so we can replace it")
parser.add_argument("--input", help="Path to libc++ library", required=True)
parser.add_argument("--output", help="Path to libc++ linker script",
required=True)
parser.add_argument("libraries", nargs="+",
help="List of libraries libc++ depends on")
args = parser.parse_args()
# Use the relative path for the libc++ library.
libcxx = os.path.relpath(args.input, os.path.dirname(args.output))
# Prepare the list of public libraries to link.
public_libs = ['-l%s' % l for l in args.libraries]
# Generate the linker script contents.
contents = "INPUT(%s)" % ' '.join([libcxx] + public_libs)
if args.dryrun:
print("GENERATING SCRIPT: '%s' as file %s" % (contents, args.output))
return 0
# Remove the existing libc++ symlink if it exists.
if os.path.islink(args.output):
os.unlink(args.output)
# Replace it with the linker script.
with open(args.output, 'w') as f:
f.write(contents + "\n")
return 0
if __name__ == '__main__':
sys.exit(main())
| libcudacxx-main | .upstream-tests/utils/gen_link_script.py |
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""
sym_extract - Extract and output a list of symbols from a shared library.
"""
from argparse import ArgumentParser
from libcudacxx.sym_check import extract, util
def main():
parser = ArgumentParser(
description='Extract a list of symbols from a shared library.')
parser.add_argument('library', metavar='shared-lib', type=str,
help='The library to extract symbols from')
parser.add_argument('-o', '--output', dest='output',
help='The output file. stdout is used if not given',
type=str, action='store', default=None)
parser.add_argument('--names-only', dest='names_only',
help='Output only the name of the symbol',
action='store_true', default=False)
parser.add_argument('--only-stdlib-symbols', dest='only_stdlib',
help="Filter all symbols not related to the stdlib",
action='store_true', default=False)
parser.add_argument('--defined-only', dest='defined_only',
help="Filter all symbols that are not defined",
action='store_true', default=False)
parser.add_argument('--undefined-only', dest='undefined_only',
help="Filter all symbols that are defined",
action='store_true', default=False)
args = parser.parse_args()
assert not (args.undefined_only and args.defined_only)
if args.output is not None:
print('Extracting symbols from %s to %s.'
% (args.library, args.output))
syms = extract.extract_symbols(args.library)
if args.only_stdlib:
syms, other_syms = util.filter_stdlib_symbols(syms)
filter = lambda x: x
if args.defined_only:
filter = lambda l: list([x for x in l if x['is_defined']])
if args.undefined_only:
filter = lambda l: list([x for x in l if not x['is_defined']])
util.write_syms(syms, out=args.output, names_only=args.names_only, filter=filter)
if __name__ == '__main__':
main()
| libcudacxx-main | .upstream-tests/utils/sym_extract.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.