python_code
stringlengths
0
679k
repo_name
stringlengths
9
41
file_path
stringlengths
6
149
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json from syngen.cli.commands.base_command import BaseCommand from syngen.configuration.configuration import SynGenConfiguration from syngen.synthesizer.configuration_graph_synthesizer import ConfigurationGraphSynthesizer class SynthesizeCommand(BaseCommand): def init_parser(self, base_parser): synthesizer = base_parser.add_parser( "synthesize", help="Run Graph Synthesizer", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) synthesizer.set_defaults(action=self.run) synthesizer.add_argument( "-cp", "--config-path", type=str, default=None, help="Path to SynGen Configuration file" ) synthesizer.add_argument( "--timer-path", type=str, default=None, help="Saves generation process timings to the specified file" ) synthesizer.add_argument( "-sp", "--save-path", type=str, default="./generated", required=False, help="Save path to dump generated files", ) synthesizer.add_argument( "--cpu", action='store_true', help="Runs all operations on CPU. [Attention] Alignment is not available on CPU" ) synthesizer.add_argument( "-v", "--verbose", action='store_true', help="Displays generation process progress" ) def run(self, args): dict_args = vars(args) config_path = dict_args.pop('config_path') gpu = not dict_args.pop('cpu') with open(config_path, 'r') as f: configuration = json.load(f) configuration = SynGenConfiguration(configuration) synthesizer = ConfigurationGraphSynthesizer( configuration, gpu=gpu, **dict_args, ) synthesizer.fit() synthesizer.generate(return_data=False)
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/cli/commands/synthesize.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class BaseCommand(object): def init_parser(self, base_parser): raise NotImplementedError() def run(self, args): raise NotImplementedError()
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/cli/commands/base_command.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # flake8: noqa from .base_synthesizer import BaseSynthesizer from .configuration_graph_synthesizer import ConfigurationGraphSynthesizer
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/synthesizer/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc class BaseSynthesizer(abc.ABC): """Base class for all ``Synthesizers``""" @classmethod def get_synthesizers(cls, include_parents=True): """Recursively find sublcasses of `BaseSynthesizer` Args: include_parents (bool): whether to include parents to other classes. (default: `True`) """ synthesizers = dict() for child in cls.__subclasses__(): children = child.get_synthesizers(include_parents) synthesizers.update(children) if include_parents or not children: if abc.ABC not in child.__bases__: synthesizers[child.__name__] = child return synthesizers def fit(self, *args, **kwargs): """fits synthesizer on a specified dataset""" raise NotImplementedError() def generate(self, *args, **kwargs): """generate graph using configured synthesizer""" raise NotImplementedError() def save(self, path: str): """save this synthesizer to disk Args: path (str): The path to save the synthesizer to """ raise NotImplementedError() @classmethod def load(cls, path: str): """load up a saved synthesizer object from disk. Args: path (str): The path to load the synthesizer from """ raise NotImplementedError() @staticmethod def add_args(parser): """optional function to add arguments to parser for the CLI interface""" return parser
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/synthesizer/base_synthesizer.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import logging import json import os import shutil import warnings from typing import Optional, Literal import pandas as pd from syngen.configuration import SynGenDatasetFeatureSpec, SynGenConfiguration from syngen.generator.tabular import tabular_generators_classes from syngen.graph_aligner import aligner_classes from syngen.generator.graph import get_structural_generator_class from syngen.generator.tabular.utils import tabular_chunk_sample_generation from syngen.utils.io_utils import ( dump_generated_graph, load_graph, load_dataframe, merge_dataframe_files, dump_dataframe, ) from syngen.utils.types import DataFrameType, MetaData, DataSourceInputType from syngen.utils.utils import CustomTimer, dynamic_import, get_object_path, to_ndarray, df_to_pandas, ensure_path logger = logging.getLogger(__name__) log = logger warnings.filterwarnings('ignore') class ConfigurationGraphSynthesizer(object): """A configuration graph synthesizer. Supports generating graph datasets based on the provided configuration. This synthesizer requires a dataset to be fit on prior to generating graphs of similar properties. Args: configuration (SynGenConfiguration): configuration to be used during generation timer_path (srt): path to the file where the generation process timings will be saved num_workers (int): number of workers to speed up generation. save_path (str): path to the directory where the results will be saved gpu (bool): flag to use GPU graph generator (default: True ), if set to False CPU will be used. verbose (bool): print intermediate results (default: False) """ def __init__( self, configuration: SynGenConfiguration, timer_path: Optional[str] = None, num_workers: int = 1, save_path: str = './', gpu: bool = True, verbose: bool = False, **kwargs, ): self.configuration = configuration self.num_workers = num_workers self.verbose = verbose self.timer = CustomTimer(timer_path, verbose=self.verbose) self.gpu = gpu self.save_path = save_path if not os.path.exists(self.save_path): os.makedirs(self.save_path) self.structure_generators = None self.tabular_generators = None self.aligners = None def _fit_tabular_generators(self, tab_gen_configs, feature_info_list, part: Literal[MetaData.NODES, MetaData.EDGES], features_to_return=()): tabular_generators = [] feature_info_dict = {feature[MetaData.NAME]: feature for feature in feature_info_list} feature_data_cache = {} for tab_gen_cfg in tab_gen_configs: gen_info = {'feature_file': tab_gen_cfg.get('feature_file')} tab_gen_class = tabular_generators_classes[tab_gen_cfg[MetaData.TYPE]] tab_gen_cfg[MetaData.PARAMS]['gpu'] = tab_gen_cfg[MetaData.PARAMS].get('gpu', self.gpu) tab_gen_cfg[MetaData.PARAMS]['verbose'] = tab_gen_cfg[MetaData.PARAMS].get('verbose', self.verbose) perform_fit = True enforce_fit = tab_gen_cfg.get('perform_fit', False) generator_dump_path = tab_gen_cfg.get(MetaData.DUMP_PATH, None) if generator_dump_path and os.path.exists(generator_dump_path) and not enforce_fit: tab_gen = tab_gen_class.load(generator_dump_path) perform_fit = False else: tab_gen = tab_gen_class(**tab_gen_cfg[MetaData.PARAMS]) if tab_gen_cfg[MetaData.DATA_SOURCE][MetaData.TYPE] == DataSourceInputType.RANDOM: if perform_fit: tab_gen.fit(columns=tab_gen_cfg[MetaData.FEATURES_LIST]) if generator_dump_path and perform_fit: tab_gen.save(generator_dump_path) tabular_generators.append((tab_gen, gen_info)) continue categorical_features = [] data_source_feature_info_list = None if not perform_fit: pass elif tab_gen_cfg[MetaData.DATA_SOURCE][MetaData.TYPE] == DataSourceInputType.DATASET: data_source_path = tab_gen_cfg[MetaData.DATA_SOURCE][MetaData.PATH] elif tab_gen_cfg[MetaData.DATA_SOURCE][MetaData.TYPE] == DataSourceInputType.CONFIGURATION: cfg = SynGenDatasetFeatureSpec.instantiate_from_preprocessed( tab_gen_cfg[MetaData.DATA_SOURCE][MetaData.PATH]) data_source_info = cfg.get_info(part, tab_gen_cfg[MetaData.DATA_SOURCE][MetaData.NAME]) data_source_feature_info_list = data_source_info[MetaData.FEATURES] data_source_path = os.path.join(tab_gen_cfg[MetaData.DATA_SOURCE][MetaData.PATH], data_source_info[MetaData.FEATURES_PATH]) else: raise ValueError("unsupported data_source type") for feature_name in tab_gen_cfg[MetaData.FEATURES_LIST]: if feature_info_dict[feature_name][MetaData.FEATURE_TYPE] == MetaData.CATEGORICAL: categorical_features.append(feature_name) if not perform_fit and len(features_to_return) == 0: pass elif data_source_path in feature_data_cache: data = feature_data_cache[data_source_path] else: # FORCE_CPU_MEM_TRANSFER data = load_dataframe(data_source_path, feature_info=data_source_feature_info_list) feature_data_cache[data_source_path] = data if perform_fit: tab_gen.fit(data, categorical_columns=categorical_features, columns=tab_gen_cfg[MetaData.FEATURES_LIST], verbose=self.verbose) if generator_dump_path and perform_fit: tab_gen.save(ensure_path(generator_dump_path)) tabular_generators.append((tab_gen, gen_info)) if features_to_return: return_dataframe = pd.DataFrame() for _, cache_data in feature_data_cache.items(): columns_intersect = list(set(features_to_return) & set(cache_data.columns)) return_dataframe[columns_intersect] = cache_data[columns_intersect] del feature_data_cache return_categorical_features = [] for feature_name in features_to_return: if feature_info_dict[feature_name][MetaData.FEATURE_TYPE] == MetaData.CATEGORICAL: return_categorical_features.append(feature_name) return tabular_generators, (return_dataframe, return_categorical_features) del feature_data_cache return tabular_generators def _fit_structural_generator(self, edge_type, return_graph=False): structure_gen_cfg = edge_type[MetaData.STRUCTURE_GENERATOR] is_bipartite = edge_type[MetaData.SRC_NODE_TYPE] != edge_type[MetaData.DST_NODE_TYPE] is_directed = edge_type[MetaData.DIRECTED] data_source_cfg = structure_gen_cfg[MetaData.DATA_SOURCE] is_random = data_source_cfg[MetaData.TYPE] == DataSourceInputType.RANDOM generator_class = get_structural_generator_class( structure_gen_cfg[MetaData.TYPE], is_bipartite=is_bipartite, is_random=is_random, ) gen_info = dict(is_bipartite=is_bipartite, is_directed=is_directed, num_edges=edge_type[MetaData.COUNT], noise=structure_gen_cfg[MetaData.PARAMS].get('noise', 0.5)) structure_gen_cfg[MetaData.PARAMS]['gpu'] = structure_gen_cfg[MetaData.PARAMS].get('gpu', self.gpu) structure_gen_cfg[MetaData.PARAMS]['verbose'] = structure_gen_cfg[MetaData.PARAMS].get('verbose', self.verbose) perform_fit = True enforce_fit = structure_gen_cfg.get('perform_fit', False) generator_dump_path = structure_gen_cfg.get(MetaData.DUMP_PATH, None) if generator_dump_path and os.path.exists(generator_dump_path) and not enforce_fit: generator = generator_class.load(generator_dump_path) generator.gpu = structure_gen_cfg[MetaData.PARAMS]['gpu'] generator.verbose = structure_gen_cfg[MetaData.PARAMS]['verbose'] perform_fit = False else: generator = generator_class( **structure_gen_cfg[MetaData.PARAMS] ) if not perform_fit and not return_graph: pass elif data_source_cfg[MetaData.TYPE] == DataSourceInputType.RANDOM: graph = None elif data_source_cfg[MetaData.TYPE] == DataSourceInputType.CONFIGURATION: cfg = SynGenDatasetFeatureSpec.instantiate_from_preprocessed(data_source_cfg[MetaData.PATH]) data_source_edge_info = cfg.get_edge_info(data_source_cfg[MetaData.NAME]) graph_src_set = cfg.get_node_info(data_source_edge_info[MetaData.SRC_NODE_TYPE])[MetaData.COUNT] graph_path = os.path.join(data_source_cfg[MetaData.PATH], data_source_edge_info[MetaData.STRUCTURE_PATH]) graph = load_graph(graph_path) else: raise ValueError("unsupported data_source type") if is_bipartite: gen_info['is_directed'] = False gen_info['num_nodes_src_set'] = self.configuration.get_node_info( edge_type[MetaData.SRC_NODE_TYPE])[MetaData.COUNT] gen_info['num_nodes_dst_set'] = self.configuration.get_node_info( edge_type[MetaData.DST_NODE_TYPE])[MetaData.COUNT] if perform_fit: generator.fit(graph, src_set=None, dst_set=None, is_directed=False, transform_graph=False) else: gen_info['num_nodes'] = self.configuration.get_node_info(edge_type[MetaData.SRC_NODE_TYPE])[MetaData.COUNT] gen_info['has_self_loop'] = structure_gen_cfg[MetaData.PARAMS].get('has_self_loop', False) if perform_fit: generator.fit(graph, is_directed=is_directed) if generator_dump_path and perform_fit: generator.save(generator_dump_path) if return_graph: return (generator, gen_info), graph, graph_src_set return generator, gen_info def _fit_aligners(self, aligner_cfgs, graphs_to_process, features_to_align): aligners = [] for aligner_cfg in aligner_cfgs: aligner_class = aligner_classes[aligner_cfg[MetaData.TYPE]] aligner_graphs = {graph_name: graphs_to_process[graph_name] for graph_name in aligner_cfg[MetaData.GRAPHS]} aligner_node_features = {feature_name: features_to_align[MetaData.NODES][feature_name] for feature_name in aligner_cfg[MetaData.NODES]} aligner_edge_features = {feature_name: features_to_align[MetaData.EDGES][feature_name] for feature_name in aligner_cfg[MetaData.EDGES]} aligner = aligner_class(**aligner_cfg[MetaData.PARAMS]) aligner.fit(aligner_graphs, aligner_node_features, aligner_edge_features) aligners.append(( aligner, { graph_name: { MetaData.SRC_NODE_TYPE: graph_info[MetaData.SRC_NODE_TYPE], MetaData.DST_NODE_TYPE: graph_info[MetaData.DST_NODE_TYPE] } for graph_name, graph_info in aligner_graphs.items() } )) del features_to_align del graphs_to_process return aligners def fit( self, ): """Fit the synthesizer on graph. """ self.structure_generators = {} self.tabular_generators = {MetaData.NODES: {}, MetaData.EDGES: {}} self.aligners = [] graphs_to_process = {} features_to_align = {MetaData.NODES: {}, MetaData.EDGES: {}} if MetaData.ALIGNERS in self.configuration: for aligner_cfg in self.configuration[MetaData.ALIGNERS]: for graph_name in aligner_cfg[MetaData.GRAPHS]: graphs_to_process[graph_name] = None for part in [MetaData.NODES, MetaData.EDGES]: if aligner_cfg[part]: for part_name, feature_names in aligner_cfg[part].items(): if part_name not in features_to_align[part]: features_to_align[part][part_name] = { MetaData.FEATURES_LIST: set(), } features_to_align[part][part_name][MetaData.FEATURES_LIST] |= set(feature_names) self.timer.start_counter('fit') self.timer.start_counter('fit_nodes') for node_type in self.configuration[MetaData.NODES]: node_name = node_type[MetaData.NAME] if MetaData.TABULAR_GENERATORS in node_type: self.timer.start_counter(f'fit_node_{node_name}') if node_name in features_to_align[MetaData.NODES]: self.tabular_generators[MetaData.NODES][node_name], (features_data, cat_cols) = \ self._fit_tabular_generators( node_type[MetaData.TABULAR_GENERATORS], node_type[MetaData.FEATURES], MetaData.NODES, features_to_return=list(features_to_align[MetaData.NODES][node_name][MetaData.FEATURES_LIST]) ) features_to_align[MetaData.NODES][node_name][MetaData.FEATURES_DATA] = features_data features_to_align[MetaData.NODES][node_name][MetaData.CATEGORICAL_COLUMNS] = cat_cols else: self.tabular_generators[MetaData.NODES][node_name] = self._fit_tabular_generators( node_type[MetaData.TABULAR_GENERATORS], node_type[MetaData.FEATURES], MetaData.NODES ) self.timer.end_counter(f'fit_node_{node_name}', f'NODE {node_name} FIT TOOK') self.timer.end_counter('fit_nodes', 'FIT NODES TOOK') self.timer.start_counter('fit_edges') for edge_type in self.configuration[MetaData.EDGES]: edge_name = edge_type[MetaData.NAME] if MetaData.STRUCTURE_GENERATOR in edge_type: self.timer.start_counter(f'fit_edges_struct_{edge_name}') if edge_name in graphs_to_process: graphs_to_process[edge_name] = { MetaData.SRC_NODE_TYPE: edge_type[MetaData.SRC_NODE_TYPE], MetaData.DST_NODE_TYPE: edge_type[MetaData.DST_NODE_TYPE], } self.structure_generators[edge_name], \ graphs_to_process[edge_name][MetaData.STRUCTURE_DATA], \ graphs_to_process[edge_name]['src_size'] = self._fit_structural_generator(edge_type, return_graph=True) else: self.structure_generators[edge_name] = self._fit_structural_generator(edge_type) self.timer.end_counter(f'fit_edges_struct_{edge_name}', f'EDGE {edge_name} STRUCTURAL FIT TOOK') if MetaData.TABULAR_GENERATORS in edge_type: self.timer.start_counter(f'fit_edges_tabular_{edge_name}') if edge_name in features_to_align[MetaData.EDGES]: self.tabular_generators[MetaData.EDGES][edge_name], (features_data, cat_cols) = \ self._fit_tabular_generators( edge_type[MetaData.TABULAR_GENERATORS], edge_type[MetaData.FEATURES], MetaData.EDGES, features_to_return=list(features_to_align[MetaData.EDGES][edge_name][MetaData.FEATURES_LIST]) ) features_to_align[MetaData.EDGES][edge_name][MetaData.FEATURES_DATA] = features_data features_to_align[MetaData.EDGES][edge_name][MetaData.CATEGORICAL_COLUMNS] = cat_cols else: self.tabular_generators[MetaData.EDGES][edge_name] = self._fit_tabular_generators( edge_type[MetaData.TABULAR_GENERATORS], edge_type[MetaData.FEATURES], MetaData.EDGES ) self.timer.end_counter(f'fit_edges_tabular_{edge_name}', f'EDGE {edge_name} TABULAR FIT TOOK') if MetaData.ALIGNERS in self.configuration: self.aligners = self._fit_aligners(self.configuration[MetaData.ALIGNERS], graphs_to_process, features_to_align) self.timer.end_counter('fit_edges', 'FIT EDGES TOOK') self.timer.end_counter('fit', 'FIT TOOK') def _generate_tabular_data(self, tabular_generators, num_samples, features_path, name): merge_data = features_path.endswith('.csv') or features_path.endswith('.parquet') if self.aligners: assert merge_data generated_dfs = [] for tab_gen_id, (tab_gen, gen_info) in enumerate(tabular_generators): use_memmap = False if merge_data: save_path = os.path.join(self.save_path, 'temp_tab_gen_dir') fname = f"{name}_{tab_gen_id}" if len(tabular_generators) > 1 else name else: save_path = os.path.join(self.save_path, features_path) fname = 'chunk' os.makedirs(save_path, exist_ok=True) if gen_info['feature_file'] and gen_info['feature_file'].endswith('.npy') and tab_gen.supports_memmap: use_memmap = True fname = gen_info['feature_file'] feature_files = tabular_chunk_sample_generation( tab_gen, n_samples=num_samples, save_path=save_path, fname=fname, num_workers=self.num_workers, use_memmap=use_memmap, verbose=self.verbose ) if merge_data: generated_df = merge_dataframe_files(feature_files, format='parquet') generated_dfs.append(generated_df) shutil.rmtree(save_path) if merge_data: generated_dfs = pd.concat(generated_dfs, axis=1) dump_dataframe(generated_dfs, os.path.join(self.save_path, features_path), format=None) gc.collect() def generate( self, return_data=False, **kwargs, ): """ Generates graph Args: return_data(bool): if true load the generated data into the output configuration """ node_type_to_node_counts = {node_type[MetaData.NAME]: node_type[MetaData.COUNT] for node_type in self.configuration[MetaData.NODES]} edge_type_to_edge_info = {edge_type[MetaData.NAME]: edge_type for edge_type in self.configuration[MetaData.EDGES]} output_config = self.configuration.copy() edge_type_name_to_idx = {edge_info[MetaData.NAME]: idx for idx, edge_info in enumerate(output_config[MetaData.EDGES])} node_type_name_to_idx = {node_info[MetaData.NAME]: idx for idx, node_info in enumerate(output_config[MetaData.NODES])} self.timer.start_counter("gen_s") for edge_type_name, (structure_generator, gen_info) in self.structure_generators.items(): self.timer.start_counter(f'gen_edges_struct_{edge_type_name}') edge_info = edge_type_to_edge_info[edge_type_name] generated_graph_path = ensure_path(os.path.join(self.save_path, edge_info[MetaData.STRUCTURE_PATH])) merge_data = generated_graph_path.endswith('.csv') or \ generated_graph_path.endswith('.parquet') use_memmap = generated_graph_path.endswith('.npy') if not merge_data and not use_memmap: os.makedirs(generated_graph_path, exist_ok=True) if gen_info['is_bipartite']: num_nodes_src_set = node_type_to_node_counts[edge_info[MetaData.SRC_NODE_TYPE]] \ if node_type_to_node_counts[edge_info[MetaData.SRC_NODE_TYPE]] > -1 \ else gen_info['num_nodes_src_set'] num_nodes_dst_set = node_type_to_node_counts[edge_info[MetaData.DST_NODE_TYPE]] \ if node_type_to_node_counts[edge_info[MetaData.DST_NODE_TYPE]] > -1 \ else gen_info['num_nodes_dst_set'] graph, src_nodes, dst_nodes = structure_generator.generate( num_edges_dst_src=gen_info['num_edges'], num_edges_src_dst=gen_info['num_edges'], num_nodes_src_set=num_nodes_src_set, num_nodes_dst_set=num_nodes_dst_set, is_directed=gen_info['is_directed'], noise=gen_info.get('noise', 0.5), return_node_ids=True, apply_edge_mirroring=False, transform_graph=False, save_path=None if merge_data else generated_graph_path, ) node_type_to_node_counts[edge_info[MetaData.SRC_NODE_TYPE]] = max( node_type_to_node_counts[edge_info[MetaData.SRC_NODE_TYPE]], src_nodes.max() + 1 ) node_type_to_node_counts[edge_info[MetaData.DST_NODE_TYPE]] = max( node_type_to_node_counts[edge_info[MetaData.DST_NODE_TYPE]], dst_nodes.max() + 1 ) else: num_nodes = node_type_to_node_counts[edge_info[MetaData.SRC_NODE_TYPE]] \ if node_type_to_node_counts[edge_info[MetaData.SRC_NODE_TYPE]] > -1 \ else gen_info['num_nodes'] graph, node_ids = structure_generator.generate( num_nodes=num_nodes, num_edges=gen_info['num_edges'], is_directed=gen_info['is_directed'], has_self_loop=gen_info.get('has_self_loop', False), noise=gen_info.get('noise', 0.5), return_node_ids=True, save_path=None if merge_data else generated_graph_path ) node_type_to_node_counts[edge_info[MetaData.SRC_NODE_TYPE]] = max( node_type_to_node_counts[edge_info[MetaData.SRC_NODE_TYPE]], node_ids.max() + 1 ) if merge_data or not self.gpu: dump_generated_graph(generated_graph_path, graph) output_config[MetaData.EDGES][edge_type_name_to_idx[edge_type_name]][MetaData.COUNT] = \ len(graph) if merge_data or use_memmap else int(graph) del graph gc.collect() self.timer.end_counter(f'gen_edges_struct_{edge_type_name}', f'EDGE {edge_type_name} STRUCT GEN TOOK') self.timer.end_counter("gen_s", "GEN STRUCT TOOK") for node_type_name, counts in node_type_to_node_counts.items(): output_config[MetaData.NODES][node_type_name_to_idx[node_type_name]][MetaData.COUNT] = int(counts) self.timer.start_counter("gen_t_nodes") for node_type_name, tabular_generators in self.tabular_generators[MetaData.NODES].items(): num_nodes = node_type_to_node_counts[node_type_name] features_path = output_config[MetaData.NODES][node_type_name_to_idx[node_type_name]][MetaData.FEATURES_PATH] self._generate_tabular_data(tabular_generators, num_nodes, features_path, node_type_name) self.timer.end_counter("gen_t_nodes", "GEN TABULAR NODE FEATURES TOOK") self.timer.start_counter("gen_t_edges") for edge_type_name, tabular_generators in self.tabular_generators[MetaData.EDGES].items(): num_edges = output_config[MetaData.EDGES][edge_type_name_to_idx[edge_type_name]][MetaData.COUNT] features_path = output_config[MetaData.EDGES][edge_type_name_to_idx[edge_type_name]][MetaData.FEATURES_PATH] self._generate_tabular_data(tabular_generators, num_edges, features_path, edge_type_name) self.timer.end_counter("gen_t_edges", "GEN TABULAR EDGE FEATURES TOOK") self.timer.start_counter("gen_alignment") if self.aligners: for aligner, graphs_info in self.aligners: graphs_data = {} for graph_name, graph_info in graphs_info.items(): graphs_data[graph_name] = graph_info.copy() if graph_info[MetaData.SRC_NODE_TYPE] != graph_info[MetaData.DST_NODE_TYPE]: graphs_data[graph_name]['src_size'] = \ output_config[MetaData.NODES][node_type_name_to_idx[graph_info[MetaData.SRC_NODE_TYPE]]][ MetaData.COUNT] graphs_data[graph_name][MetaData.STRUCTURE_DATA] = load_graph(os.path.join( self.save_path, output_config[MetaData.EDGES][edge_type_name_to_idx[graph_name]][MetaData.STRUCTURE_PATH] )) node_features_data = { node_name: load_dataframe(os.path.join( self.save_path, output_config[MetaData.NODES][node_type_name_to_idx[node_name]][MetaData.FEATURES_PATH]), feature_info=output_config[MetaData.NODES][node_type_name_to_idx[node_name]][MetaData.FEATURES] ) for node_name in aligner.features_to_correlate_node } edge_features_data = { edge_name: load_dataframe(os.path.join( self.save_path, output_config[MetaData.EDGES][edge_type_name_to_idx[edge_name]][MetaData.FEATURES_PATH]), feature_info=output_config[MetaData.EDGES][edge_type_name_to_idx[edge_name]][MetaData.FEATURES] ) for edge_name in aligner.features_to_correlate_edge } aligned_data = aligner.align( graphs_data, node_features_data, edge_features_data, ) for node_name, tab_data in aligned_data[MetaData.NODES].items(): dump_dataframe(tab_data, os.path.join( self.save_path, output_config[MetaData.NODES][node_type_name_to_idx[node_name]][MetaData.FEATURES_PATH] ), format=None ) for edge_name, tab_data in aligned_data[MetaData.EDGES].items(): dump_dataframe(tab_data, os.path.join( self.save_path, output_config[MetaData.EDGES][edge_type_name_to_idx[edge_name]][MetaData.FEATURES_PATH] ), format=None ) self.timer.end_counter("gen_alignment", "GEN ALIGNMENT TAKE") with open(os.path.join(self.save_path, 'graph_metadata.json'), 'w') as f: json.dump(output_config, f, indent=4) output_config[MetaData.PATH] = self.save_path if return_data: for node_info in output_config[MetaData.NODES]: if node_info[MetaData.FEATURES_PATH]: node_info[MetaData.FEATURES_DATA] = load_dataframe(os.path.join( self.save_path, node_info[MetaData.FEATURES_PATH] )) for edge_info in output_config[MetaData.EDGES]: if edge_info[MetaData.FEATURES_PATH]: edge_info[MetaData.FEATURES_DATA] = load_dataframe(os.path.join( self.save_path, edge_info[MetaData.FEATURES_PATH] )) if edge_info[MetaData.STRUCTURE_PATH]: edge_info[MetaData.STRUCTURE_DATA] = load_graph(os.path.join( self.save_path, edge_info[MetaData.STRUCTURE_PATH], )) return output_config return output_config def save(self, path): """ saves the synthesizer to disk Args: path (str): The path to save the synthesizer to """ meta_data = { "configuration": self.configuration.copy(), "timer_path": self.timer.path, "num_workers": self.num_workers, "save_path": self.save_path, "gpu": self.gpu, "verbose": self.verbose, } if not os.path.exists(path): os.makedirs(path) if self.structure_generators: meta_data['struct_gens'] = {} for edge_name, (struct_gen, gen_info) in self.structure_generators.items(): struct_gen.save(os.path.join(path, f'struct_gen_{edge_name}')) meta_data['struct_gens'][edge_name] = { 'gen_info': gen_info, 'object_path': get_object_path(struct_gen) } if self.tabular_generators: meta_data['tab_gens'] = {} for part, part_gens in self.tabular_generators.items(): meta_data['tab_gens'][part] = {} for part_name, tab_gens in part_gens.items(): meta_data['tab_gens'][part][part_name] = [] for idx, (tab_gen, gen_info) in enumerate(tab_gens): tab_gen.save(os.path.join(path, f'tab_gen_{part}_{part_name}_{idx}')) meta_data['tab_gens'][part][part_name].append({ 'gen_info': gen_info, 'object_path': get_object_path(tab_gen) }) if self.aligners: meta_data['aligners'] = [] for idx, (aligner, graphs_info) in enumerate(self.aligners): aligner.save(os.path.join(path, f'aligner_{idx}')) meta_data['aligners'].append( { 'object_path': get_object_path(aligner), 'graphs_info': graphs_info, } ) with open(os.path.join(path, "synthesizer_metadata.json"), "w") as fp: json.dump(meta_data, fp, indent=4) @classmethod def load(cls, path): """ load up a saved synthesizer object from disk. Args: path (str): The path to load the synthesizer from """ with open(os.path.join(path, "synthesizer_metadata.json"), 'r') as f: meta_data = json.load(f) struct_gens = meta_data.pop('struct_gens', {}) tab_gens = meta_data.pop('tab_gens', {}) aligners = meta_data.pop('aligners', {}) instance = cls(**meta_data) if struct_gens: instance.structure_generators = { edge_name: ( dynamic_import(data['object_path']).load( os.path.join(path, f'struct_gen_{edge_name}') ), data['gen_info'], ) for edge_name, data in struct_gens.items() } if tab_gens: instance.tabular_generators = { part: { part_name: [ ( dynamic_import(data['object_path']).load( os.path.join(path, f'tab_gen_{part}_{part_name}_{idx}') ), data['gen_info'], ) for idx, data in enumerate(part_gens) ] for part_name, part_gens in part_data.items() } for part, part_data in tab_gens.items() } if aligners: instance.aligners = [ ( dynamic_import(data['object_path']).load( os.path.join(path, f'aligner_{idx}') ), data['graphs_info'], ) for idx, data in enumerate(aligners) ] return instance
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/synthesizer/configuration_graph_synthesizer.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cupy as cp from numba import cuda WARP_SIZE = 32 # could be 32 or 64 @cuda.jit def repeat_kernel(repeat_ptr, cumsum_ptr, res, size): idx = cuda.grid(1) stride = cuda.gridsize(1) / WARP_SIZE warp_id = idx / WARP_SIZE tid_in_warp = idx % WARP_SIZE for i in range(warp_id, size, stride): end = cumsum_ptr[i] repeat = repeat_ptr[i] start = end - repeat for j in range(start + tid_in_warp, end, WARP_SIZE): res[j] = i def cuda_repeat(repeats): cumsum = repeats.cumsum(0) total = cumsum[-1].item() size = len(repeats) block = 512 warps_per_block = block // WARP_SIZE grid = max((size + warps_per_block - 1) // warps_per_block, 2048) res = cp.empty(total, dtype=repeats.dtype) repeat_kernel[grid, block](repeats, cumsum, res, size) cuda.synchronize() return res
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/utils.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pickle from typing import Optional, List import cupy as cp import numpy as np import pandas as pd from tqdm import tqdm from pandas.api.types import is_integer_dtype from sklearn.preprocessing import OrdinalEncoder from syngen.generator.tabular.chunked_tabular_generator import ChunkedBaseTabularGenerator from syngen.generator.utils import cuda_repeat class GaussianGenerator(ChunkedBaseTabularGenerator): def __init__(self, **kwargs): super().__init__(**kwargs) def ordinal_encoder(self, cat_col): encoder = OrdinalEncoder() encoder.fit(cat_col) return encoder def fit( self, data, categorical_columns=(), columns: Optional[List[str]] = None, verbose: bool = False, ): self.column_order = columns or list(data.columns) self.cat_fit = {} self.categorical_columns = set(categorical_columns) self.continuous_columns = set(self.column_order) - self.categorical_columns num_samples = len(data) # - multinomial distribution cat_cols = tqdm(self.categorical_columns) if verbose else self.categorical_columns for column in cat_cols: enc = self.ordinal_encoder(data[column].values.reshape(-1, 1)) pvals = data[column].value_counts() / num_samples pvals = pvals.values self.cat_fit[column] = { "encoder": enc, "pvals": pvals, 'dtype': data[column].dtype, } self.cont_fit = {} self.integer_continuous_columns = [] # - gaussian distribution cont_cols = tqdm(self.continuous_columns) if verbose else self.continuous_columns for column in cont_cols: mean, std = data[column].mean(), data[column].std() self.cont_fit[column] = { "mean": mean, "std": std, 'dtype': data[column].dtype, } if is_integer_dtype(data[column].dtype): self.integer_continuous_columns.append(column) self.fits = {**self.cat_fit, **self.cont_fit} def sample(self, n, gpu=False, memmap_kwargs=None, start_idx=0, end_idx=None, **kwargs): use_memmap = memmap_kwargs is not None if use_memmap: memmap_outfile = np.load(memmap_kwargs['filename'], mmap_mode='r+') if gpu: cont_means = [] cont_stds = [] for column in self.continuous_columns: cont_means.append(self.fits[column]['mean']) cont_stds.append(self.fits[column]['std']) cont_data = cp.random.normal( cp.array(cont_means), cp.array(cont_stds), size=(n, len(self.continuous_columns)), dtype=cp.float32 ) cont_data = cp.asnumpy(cont_data) df = pd.DataFrame(cont_data, columns=list(self.continuous_columns)) if self.integer_continuous_columns: df[self.integer_continuous_columns] = \ df[self.integer_continuous_columns].astype(np.int32) for column in self.categorical_columns: sampled_data = cp.random.multinomial(n, self.fits[column]["pvals"]) sampled_data = cuda_repeat(sampled_data) cp.random.shuffle(sampled_data) sampled_data = cp.asnumpy(sampled_data.reshape(-1, 1)) encoder = self.fits[column]["encoder"] sampled_data = encoder.inverse_transform(sampled_data) df[column] = sampled_data.reshape(-1).astype(self.fits[column]["dtype"]) else: df = pd.DataFrame() for column in self.column_order: if column in self.categorical_columns: sampled_data = np.random.multinomial(n, self.fits[column]["pvals"]) sampled_data = np.repeat(np.arange(len(sampled_data)), sampled_data) np.random.shuffle(sampled_data) sampled_data = sampled_data.reshape(-1, 1) encoder = self.fits[column]["encoder"] sampled_data = encoder.inverse_transform(sampled_data) else: sampled_data = np.random.normal( self.fits[column]['mean'], self.fits[column]['std'], n) df[column] = sampled_data.reshape(-1).astype(self.fits[column]["dtype"]) df = df[self.column_order] if use_memmap: memmap_outfile[start_idx:end_idx] = df.values return None return df def save(self, path): with open(path, 'wb') as file_handler: pickle.dump(self, file_handler, protocol=pickle.HIGHEST_PROTOCOL) @classmethod def load(cls, path): with open(path, 'rb') as file_handler: model = pickle.load(file_handler) return model
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/gaussian_generator.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # flake8: noqa from .base_tabular_generator import BaseTabularGenerator from .chunked_tabular_generator import ChunkedBaseTabularGenerator from .ctgan import CTGANGenerator from .gaussian_generator import GaussianGenerator from .kde_generator import KDEGenerator from .random import RandomMVGenerator from .uniform_generator import UniformGenerator # Does not include CTGAN tabular_generators_classes = { 'kde': KDEGenerator, 'random': RandomMVGenerator, 'gaussian': GaussianGenerator, 'uniform': UniformGenerator, 'ctgan': CTGANGenerator, } tabular_generators_types_to_classes = { cls.__class__.__name__: k for k, cls in tabular_generators_classes .items() }
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from tqdm import tqdm from typing import Union, List, Optional import pickle import cupy as cp import numpy as np import pandas as pd from sklearn.neighbors import KernelDensity from cuml.neighbors import KernelDensity as KernelDensityGPU from sklearn.preprocessing import OrdinalEncoder from syngen.generator.tabular.chunked_tabular_generator import ChunkedBaseTabularGenerator import warnings warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning) class KDEGenerator(ChunkedBaseTabularGenerator): def __init__(self, **kwargs): """ A tabular generator based on kernel density estimation. Categorical and continuous columns are modeled using gaussian KDE """ super().__init__(**kwargs) def ordinal_encoder(self, cat_col): encoder = OrdinalEncoder() encoder.fit(cat_col) return encoder def fit( self, data: pd.DataFrame, categorical_columns: list = (), samples: Union[float, int] = 0.1, columns: Optional[List[str]] = None, verbose: bool = False, ): if samples > 0: num_samples = len(data) if 0.0 <= samples <= 1.0: num_samples = samples * num_samples else: num_samples = samples num_samples = min(int(num_samples), 10_000_000) data = data.sample(n=num_samples) self.column_order = columns or list(data.columns) self.cat_fit = {} self.categorical_columns = set(categorical_columns) self.continuous_columns = set(self.column_order) - self.categorical_columns # - kde distribution cat_cols = tqdm(self.categorical_columns) if verbose else self.categorical_columns for column in cat_cols: col_data = data[column].dropna().values.reshape(-1, 1) enc = self.ordinal_encoder(col_data) col_data = enc.transform(col_data).reshape(-1, 1) kde = KernelDensity(kernel="gaussian") kde = kde.fit(col_data) self.cat_fit[column] = { "encoder": enc, "n_categories": len(enc.categories_[0]), "sampler": kde, 'dtype': data[column].dtype, } self.cont_fit = {} # - gaussian distribution cont_cols = tqdm(self.continuous_columns) if verbose else self.continuous_columns for column in cont_cols: col_data = data[column].values.reshape(-1, 1) kde = KernelDensity(kernel="gaussian") kde = kde.fit(col_data) self.cont_fit[column] = { "sampler": kde, 'dtype': data[column].dtype, } self.fits = {**self.cat_fit, **self.cont_fit} def sample(self, n, gpu=False, memmap_kwargs=None, start_idx=0, end_idx=None, **kwargs): use_memmap = memmap_kwargs is not None if use_memmap: memmap_outfile = np.load(memmap_kwargs['filename'], mmap_mode='r+') df = pd.DataFrame() if gpu: for column_id, column in enumerate(self.column_order): sampler = self.fits[column]["sampler"] gpu_sampler = KernelDensityGPU(kernel="gaussian") gpu_sampler.fit(np.asarray(sampler.tree_.data)) if "encoder" in self.fits[column]: # - must be categorical encoder = self.fits[column]["encoder"] n_categories = self.fits[column]["n_categories"] sampled_data = gpu_sampler.sample(n) sampled_data = cp.abs(sampled_data.reshape(-1, 1)) sampled_data = cp.round(sampled_data) sampled_data = cp.clip(sampled_data, 0, n_categories - 1) sampled_data = cp.asnumpy(sampled_data) sampled_data = encoder.inverse_transform(sampled_data).reshape(-1) else: sampled_data = gpu_sampler.sample(n) sampled_data = cp.asnumpy(sampled_data.reshape(-1)) sampled_data = sampled_data.astype(self.fits[column]["dtype"]) if use_memmap: memmap_outfile[start_idx:end_idx, column_id] = sampled_data else: df[column] = sampled_data else: for column_id, column in enumerate(self.column_order): sampler = self.fits[column]["sampler"] if "encoder" in self.fits[column]: # - must be categorical encoder = self.fits[column]["encoder"] n_categories = self.fits[column]["n_categories"] sampled_data = sampler.sample(n) sampled_data = np.abs(sampled_data.reshape(-1, 1)) sampled_data = np.round(sampled_data) sampled_data = np.clip(sampled_data, 0, n_categories - 1) sampled_data = encoder.inverse_transform(sampled_data).reshape(-1) else: sampled_data = sampler.sample(n).reshape(-1) sampled_data = sampled_data.astype(self.fits[column]["dtype"]) if use_memmap: memmap_outfile[start_idx:end_idx, column_id] = sampled_data else: df[column] = sampled_data if use_memmap: return None return df def save(self, path): with open(path, 'wb') as file_handler: pickle.dump(self, file_handler, protocol=pickle.HIGHEST_PROTOCOL) @classmethod def load(cls, path): with open(path, 'rb') as file_handler: model = pickle.load(file_handler) return model
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/kde_generator.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, List, Tuple import numpy as np import pandas as pd import torch import torch.optim as optim import torch.utils.data from torch.nn import ( BatchNorm2d, BCELoss, Conv2d, ConvTranspose2d, CrossEntropyLoss, Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Sigmoid, SmoothL1Loss, ) from torch.nn import functional as F from torch.nn import init from torch.optim import Adam from sklearn import model_selection, preprocessing from syngen.generator.tabular.base_tabular_generator import BaseTabularGenerator from syngen.generator.tabular.data_transformer.ctab_data_transformer import ( CTABDataTransformer, ImageTransformer, ) from syngen.utils.types import ColumnType class CTABGenerator(BaseTabularGenerator): """ Adopted from: https://github.com/Team-TUD/CTAB-GAN Args: embedding_dim (int): Size of the random sample passed to the Generator. Defaults to 128. classifier_dim (tuple or list of ints): Size of the output samples for each one of the classifier Layers. A Linear Layer will be created for each one of the values provided. Defaults to (256, 256). l2scale (float): L2 regularization scaling. Defaults to 1e-5. batch_size (int): Number of data samples to process in each step. epochs (int): Number of training epochs. Defaults to 300. """ def __init__( self, classifier_dim: Tuple[int] = (256, 256, 256, 256), embedding_dim: int = 100, num_channels: int = 64, l2scale: float = 1e-5, batch_size: int = 500, epochs: int = 1, test_ratio: float = 0.1, **kwargs, ): self.embedding_dim = embedding_dim self.classifier_dim = classifier_dim self.num_channels = num_channels self.dside = None self.gside = None self.l2scale = l2scale self.batch_size = batch_size self.epochs = epochs self._device = torch.device( "cuda:0" if torch.cuda.is_available() else "cpu" ) self.test_ratio = test_ratio def column_check(self, data: pd.DataFrame, columns: list): data_cols = data.columns invalid_cols = [] for c in columns: if c not in data_cols: invalid_cols.append(c) return invalid_cols def set_device(self, device): self._device = device if self._generator is not None: self._generator.to(self._device) def fit( self, train_data: pd.DataFrame, categorical_columns: List[str] = [], log_columns: List[str] = [], integer_columns: List[str] = [], mixed_columns: Dict = {}, problem_type: Dict = {}, ): specified_cols = ( list(categorical_columns) + list(log_columns) + list(mixed_columns) + list(integer_columns) ) target_col = None target_index = None if problem_type: # - supports only single problem type target_col = list(problem_type.values())[0] specified_cols += [target_col] # - check for invalid columns invalid_cols = self.column_check(train_data, specified_cols) if len(invalid_cols): raise ValueError(f"invalid columns: {invalid_cols}") if target_col is not None: target_index = train_data.columns.get_loc(target_col) self.data_prep = DataPreprocessing( categorical_columns=categorical_columns, log_columns=log_columns, mixed_columns=mixed_columns, integer_columns=integer_columns, test_ratio=self.test_ratio, target_col=target_col, ) train_data = self.data_prep.transform(train_data) categorical_columns = self.data_prep.column_types[ ColumnType.CATEGORICAL ] mixed_columns = self.data_prep.column_types[ColumnType.MIXED] self.transformer = CTABDataTransformer( categorical_columns=categorical_columns, mixed_dict=mixed_columns ) self.transformer.fit(train_data) train_data = self.transformer.transform(train_data.values) data_sampler = Sampler(train_data, self.transformer.output_info) data_dim = self.transformer.output_dim self.cond_generator = Cond(train_data, self.transformer.output_info) sides = [4, 8, 16, 24, 32, 64, 128] col_size_d = data_dim + self.cond_generator.n_opt for i in sides: if i * i >= col_size_d: self.dside = i break sides = [4, 8, 16, 24, 32, 64, 128] col_size_g = data_dim for i in sides: if i * i >= col_size_g: self.gside = i break layers_G = determine_layers_gen( self.gside, self.embedding_dim + self.cond_generator.n_opt, self.num_channels, ) layers_D = determine_layers_disc(self.dside, self.num_channels) self._generator = Generator(self.gside, layers_G).to(self._device) discriminator = Discriminator(self.dside, layers_D).to(self._device) optimizer_params = dict( lr=2e-4, betas=(0.5, 0.9), eps=1e-3, weight_decay=self.l2scale ) optimizerG = Adam(self._generator.parameters(), **optimizer_params) optimizerD = Adam(discriminator.parameters(), **optimizer_params) st_ed = None classifier = None optimizerC = None if target_index is not None: st_ed = get_st_ed(target_index, self.transformer.output_info) classifier = Classifier(data_dim, self.classifier_dim, st_ed).to( self._device ) optimizerC = optim.Adam( classifier.parameters(), **optimizer_params ) self._generator.apply(weights_init) discriminator.apply(weights_init) self.Gtransformer = ImageTransformer(self.gside) self.Dtransformer = ImageTransformer(self.dside) steps_per_epoch = max(1, len(train_data) // self.batch_size) for i in range(self.epochs): for _ in range(steps_per_epoch): noisez = torch.randn( self.batch_size, self.embedding_dim, device=self._device ) condvec = self.cond_generator.sample_train(self.batch_size) c, m, col, opt = condvec c = torch.from_numpy(c).to(self._device) m = torch.from_numpy(m).to(self._device) noisez = torch.cat([noisez, c], dim=1) noisez = noisez.view( self.batch_size, self.embedding_dim + self.cond_generator.n_opt, 1, 1, ) perm = np.arange(self.batch_size) np.random.shuffle(perm) real = data_sampler.sample( self.batch_size, col[perm], opt[perm] ) c_perm = c[perm] real = torch.from_numpy(real.astype("float32")).to( self._device ) fake = self._generator(noisez) faket = self.Gtransformer.inverse_transform(fake) fakeact = apply_activate(faket, self.transformer.output_info) fake_cat = torch.cat([fakeact, c], dim=1) real_cat = torch.cat([real, c_perm], dim=1) real_cat_d = self.Dtransformer.transform(real_cat) fake_cat_d = self.Dtransformer.transform(fake_cat) optimizerD.zero_grad() y_real, _ = discriminator(real_cat_d) y_fake, _ = discriminator(fake_cat_d) loss_d = -(torch.log(y_real + 1e-4).mean()) - ( torch.log(1.0 - y_fake + 1e-4).mean() ) loss_d.backward() optimizerD.step() noisez = torch.randn( self.batch_size, self.embedding_dim, device=self._device ) condvec = self.cond_generator.sample_train(self.batch_size) c, m, col, opt = condvec c = torch.from_numpy(c).to(self._device) m = torch.from_numpy(m).to(self._device) noisez = torch.cat([noisez, c], dim=1) noisez = noisez.view( self.batch_size, self.embedding_dim + self.cond_generator.n_opt, 1, 1, ) optimizerG.zero_grad() fake = self._generator(noisez) faket = self.Gtransformer.inverse_transform(fake) fakeact = apply_activate(faket, self.transformer.output_info) fake_cat = torch.cat([fakeact, c], dim=1) fake_cat = self.Dtransformer.transform(fake_cat) y_fake, info_fake = discriminator(fake_cat) cross_entropy = cond_loss( faket, self.transformer.output_info, c, m ) _, info_real = discriminator(real_cat_d) g = -(torch.log(y_fake + 1e-4).mean()) + cross_entropy g.backward(retain_graph=True) loss_mean = torch.norm( torch.mean(info_fake.view(self.batch_size, -1), dim=0) - torch.mean(info_real.view(self.batch_size, -1), dim=0), 1, ) loss_std = torch.norm( torch.std(info_fake.view(self.batch_size, -1), dim=0) - torch.std(info_real.view(self.batch_size, -1), dim=0), 1, ) loss_info = loss_mean + loss_std loss_info.backward() optimizerG.step() if problem_type: fake = self._generator(noisez) faket = self.Gtransformer.inverse_transform(fake) fakeact = apply_activate( faket, self.transformer.output_info ) real_pre, real_label = classifier(real) fake_pre, fake_label = classifier(fakeact) c_loss = CrossEntropyLoss() if (st_ed[1] - st_ed[0]) == 1: c_loss = SmoothL1Loss() real_label = real_label.type_as(real_pre) fake_label = fake_label.type_as(fake_pre) real_label = torch.reshape(real_label, real_pre.size()) fake_label = torch.reshape(fake_label, fake_pre.size()) elif (st_ed[1] - st_ed[0]) == 2: c_loss = BCELoss() real_label = real_label.type_as(real_pre) fake_label = fake_label.type_as(fake_pre) loss_cc = c_loss(real_pre, real_label) loss_cg = c_loss(fake_pre, fake_label) optimizerG.zero_grad() loss_cg.backward() optimizerG.step() optimizerC.zero_grad() loss_cc.backward() optimizerC.step() def sample(self, n, **kwargs): assert hasattr(self, "_generator"), "`fit` function must be called prior to `sample`" self._generator.eval() output_info = self.transformer.output_info steps = n // self.batch_size + 1 data = [] for i in range(steps): noisez = torch.randn( self.batch_size, self.embedding_dim, device=self._device ) condvec = self.cond_generator.sample(self.batch_size) c = condvec c = torch.from_numpy(c).to(self._device) noisez = torch.cat([noisez, c], dim=1) noisez = noisez.view( self.batch_size, self.embedding_dim + self.cond_generator.n_opt, 1, 1, ) fake = self._generator(noisez) faket = self.Gtransformer.inverse_transform(fake) fakeact = apply_activate(faket, output_info) data.append(fakeact.detach().cpu().numpy()) data = np.concatenate(data, axis=0) result = self.transformer.inverse_transform(data) output = self.data_prep.inverse_prep(result) return output.iloc[:n] class Classifier(Module): def __init__(self, input_dim, dis_dims, st_ed): super(Classifier, self).__init__() dim = input_dim - (st_ed[1] - st_ed[0]) seq = [] self.str_end = st_ed for item in list(dis_dims): seq += [Linear(dim, item), LeakyReLU(0.2), Dropout(0.5)] dim = item if (st_ed[1] - st_ed[0]) == 1: seq += [Linear(dim, 1)] elif (st_ed[1] - st_ed[0]) == 2: seq += [Linear(dim, 1), Sigmoid()] else: seq += [Linear(dim, (st_ed[1] - st_ed[0]))] self.seq = Sequential(*seq) def forward(self, input): label = None if (self.str_end[1] - self.str_end[0]) == 1: label = input[:, self.str_end[0] : self.str_end[1]] else: label = torch.argmax( input[:, self.str_end[0] : self.str_end[1]], axis=-1 ) new_imp = torch.cat( (input[:, : self.str_end[0]], input[:, self.str_end[1] :]), 1 ) if ((self.str_end[1] - self.str_end[0]) == 2) | ( (self.str_end[1] - self.str_end[0]) == 1 ): return self.seq(new_imp).view(-1), label else: return self.seq(new_imp), label def apply_activate(data, output_info): data_t = [] st = 0 for item in output_info: if item[1] == "tanh": ed = st + item[0] data_t.append(torch.tanh(data[:, st:ed])) st = ed elif item[1] == "softmax": ed = st + item[0] data_t.append(F.gumbel_softmax(data[:, st:ed], tau=0.2)) st = ed return torch.cat(data_t, dim=1) def get_st_ed(target_col_index, output_info): st = 0 c = 0 tc = 0 for item in output_info: if c == target_col_index: break if item[1] == "tanh": st += item[0] elif item[1] == "softmax": st += item[0] c += 1 tc += 1 ed = st + output_info[tc][0] return (st, ed) def random_choice_prob_index_sampling(probs, col_idx): option_list = [] for i in col_idx: pp = probs[i] option_list.append(np.random.choice(np.arange(len(probs[i])), p=pp)) return np.array(option_list).reshape(col_idx.shape) def random_choice_prob_index(a, axis=1): r = np.expand_dims(np.random.rand(a.shape[1 - axis]), axis=axis) return (a.cumsum(axis=axis) > r).argmax(axis=axis) def maximum_interval(output_info): max_interval = 0 for item in output_info: max_interval = max(max_interval, item[0]) return max_interval class Cond(object): def __init__(self, data, output_info): self.model = [] st = 0 counter = 0 for item in output_info: if item[1] == "tanh": st += item[0] elif item[1] == "softmax": ed = st + item[0] counter += 1 self.model.append(np.argmax(data[:, st:ed], axis=-1)) st = ed self.interval = [] self.n_col = 0 self.n_opt = 0 st = 0 self.p = np.zeros((counter, maximum_interval(output_info))) self.p_sampling = [] for item in output_info: if item[1] == "tanh": st += item[0] elif item[1] == "softmax": ed = st + item[0] tmp = np.sum(data[:, st:ed], axis=0) tmp_sampling = np.sum(data[:, st:ed], axis=0) tmp = np.log(tmp + 1) tmp = tmp / np.sum(tmp) tmp_sampling = tmp_sampling / np.sum(tmp_sampling) self.p_sampling.append(tmp_sampling) self.p[self.n_col, : item[0]] = tmp self.interval.append((self.n_opt, item[0])) self.n_opt += item[0] self.n_col += 1 st = ed self.interval = np.asarray(self.interval) def sample_train(self, batch): if self.n_col == 0: return None idx = np.random.choice(np.arange(self.n_col), batch) vec = np.zeros((batch, self.n_opt), dtype="float32") mask = np.zeros((batch, self.n_col), dtype="float32") mask[np.arange(batch), idx] = 1 opt1prime = random_choice_prob_index(self.p[idx]) for i in np.arange(batch): vec[i, self.interval[idx[i], 0] + opt1prime[i]] = 1 return vec, mask, idx, opt1prime def sample(self, batch): if self.n_col == 0: return None idx = np.random.choice(np.arange(self.n_col), batch) vec = np.zeros((batch, self.n_opt), dtype="float32") opt1prime = random_choice_prob_index_sampling(self.p_sampling, idx) for i in np.arange(batch): vec[i, self.interval[idx[i], 0] + opt1prime[i]] = 1 return vec def cond_loss(data, output_info, c, m): loss = [] st = 0 st_c = 0 for item in output_info: if item[1] == "tanh": st += item[0] elif item[1] == "softmax": ed = st + item[0] ed_c = st_c + item[0] tmp = F.cross_entropy( data[:, st:ed], torch.argmax(c[:, st_c:ed_c], dim=1), reduction="none", ) loss.append(tmp) st = ed st_c = ed_c loss = torch.stack(loss, dim=1) return (loss * m).sum() / data.size()[0] class Sampler(object): def __init__(self, data, output_info): super(Sampler, self).__init__() self.data = data self.model = [] self.n = len(data) st = 0 for item in output_info: if item[1] == "tanh": st += item[0] elif item[1] == "softmax": ed = st + item[0] tmp = [] for j in range(item[0]): tmp.append(np.nonzero(data[:, st + j])[0]) self.model.append(tmp) st = ed def sample(self, n, col, opt): if col is None: idx = np.random.choice(np.arange(self.n), n) return self.data[idx] idx = [] for c, o in zip(col, opt): idx.append(np.random.choice(self.model[c][o])) return self.data[idx] class Discriminator(Module): def __init__(self, side, layers): super(Discriminator, self).__init__() self.side = side info = len(layers) - 2 self.seq = Sequential(*layers) self.seq_info = Sequential(*layers[:info]) def forward(self, input): return (self.seq(input)), self.seq_info(input) class Generator(Module): def __init__(self, side, layers): super(Generator, self).__init__() self.side = side self.seq = Sequential(*layers) def forward(self, input_): return self.seq(input_) def determine_layers_disc(side, num_channels): layer_dims = [(1, side), (num_channels, side // 2)] while layer_dims[-1][1] > 3 and len(layer_dims) < 4: layer_dims.append((layer_dims[-1][0] * 2, layer_dims[-1][1] // 2)) layers_D = [] for prev, curr in zip(layer_dims, layer_dims[1:]): layers_D += [ Conv2d(prev[0], curr[0], 4, 2, 1, bias=False), BatchNorm2d(curr[0]), LeakyReLU(0.2, inplace=True), ] print() layers_D += [ Conv2d(layer_dims[-1][0], 1, layer_dims[-1][1], 1, 0), Sigmoid(), ] return layers_D def determine_layers_gen(side, embedding_dim, num_channels): layer_dims = [(1, side), (num_channels, side // 2)] while layer_dims[-1][1] > 3 and len(layer_dims) < 4: layer_dims.append((layer_dims[-1][0] * 2, layer_dims[-1][1] // 2)) layers_G = [ ConvTranspose2d( embedding_dim, layer_dims[-1][0], layer_dims[-1][1], 1, 0, output_padding=0, bias=False, ) ] for prev, curr in zip(reversed(layer_dims), reversed(layer_dims[:-1])): layers_G += [ BatchNorm2d(prev[0]), ReLU(True), ConvTranspose2d( prev[0], curr[0], 4, 2, 1, output_padding=0, bias=True ), ] return layers_G def weights_init(m): classname = m.__class__.__name__ if classname.find("Conv") != -1: init.normal_(m.weight.data, 0.0, 0.02) elif classname.find("BatchNorm") != -1: init.normal_(m.weight.data, 1.0, 0.02) init.constant_(m.bias.data, 0) class DataPreprocessing(object): def __init__( self, categorical_columns: list, log_columns: list, mixed_columns: dict, integer_columns: list, test_ratio: float, target_col: str = None, ): self.categorical_columns = categorical_columns self.log_columns = log_columns self.mixed_columns = mixed_columns self.integer_columns = integer_columns self.column_types = dict() self.column_types[ColumnType.CATEGORICAL] = [] self.column_types[ColumnType.MIXED] = {} self.lower_bounds = {} self.label_encoder_list = [] self.CONSTANT_INT = -9999999 if target_col is not None: self.target_col = target_col self.test_ratio = test_ratio super().__init__() def transform(self, raw_df: pd.DataFrame): if hasattr(self, "target_col"): y_real = raw_df[self.target_col] X_real = raw_df.drop(columns=[self.target_col]) ( X_train_real, _, y_train_real, _, ) = model_selection.train_test_split( X_real, y_real, test_size=self.test_ratio, stratify=y_real, random_state=42, ) X_train_real.loc[:, self.target_col] = y_train_real else: X_train_real = raw_df self.df = X_train_real self.df = self.df.replace(r" ", np.nan) self.df = self.df.fillna("empty") all_columns = set(self.df.columns) irrelevant_missing_columns = set(self.categorical_columns) relevant_missing_columns = list( all_columns - irrelevant_missing_columns ) for i in relevant_missing_columns: if i in self.log_columns: if "empty" in list(self.df[i].values): self.df[i] = self.df[i].apply( lambda x: self.CONSTANT_INT if x == "empty" else x ) self.mixed_columns[i] = [self.CONSTANT_INT] elif i in list(self.mixed_columns.keys()): if "empty" in list(self.df[i].values): self.df[i] = self.df[i].apply( lambda x: self.CONSTANT_INT if x == "empty" else x ) self.mixed_columns[i].append(self.CONSTANT_INT) else: if "empty" in list(self.df[i].values): self.df[i] = self.df[i].apply( lambda x: self.CONSTANT_INT if x == "empty" else x ) self.mixed_columns[i] = [self.CONSTANT_INT] if self.log_columns: for log_column in self.log_columns: valid_indices = [] for idx, val in enumerate(self.df[log_column].values): if val != self.CONSTANT_INT: valid_indices.append(idx) eps = 1 lower = np.min(self.df[log_column].iloc[valid_indices].values) self.lower_bounds[log_column] = lower if lower > 0: self.df[log_column] = self.df[log_column].apply( lambda x: np.log(x) if x != self.CONSTANT_INT else self.CONSTANT_INT ) elif lower == 0: self.df[log_column] = self.df[log_column].apply( lambda x: np.log(x + eps) if x != self.CONSTANT_INT else self.CONSTANT_INT ) else: self.df[log_column] = self.df[log_column].apply( lambda x: np.log(x - lower + eps) if x != self.CONSTANT_INT else self.CONSTANT_INT ) for column_index, column in enumerate(self.df.columns): if column in self.categorical_columns: label_encoder = preprocessing.LabelEncoder() self.df[column] = self.df[column].astype(str) label_encoder.fit(self.df[column]) current_label_encoder = dict() current_label_encoder["column"] = column current_label_encoder["label_encoder"] = label_encoder transformed_column = label_encoder.transform(self.df[column]) self.df[column] = transformed_column self.label_encoder_list.append(current_label_encoder) self.column_types[ColumnType.CATEGORICAL].append(column_index) elif column in self.mixed_columns: self.column_types[ColumnType.MIXED][ column_index ] = self.mixed_columns[column] return self.df def inverse_prep(self, data, eps=1): df_sample = pd.DataFrame(data, columns=self.df.columns) for i in range(len(self.label_encoder_list)): le = self.label_encoder_list[i]["label_encoder"] df_sample[self.label_encoder_list[i]["column"]] = df_sample[ self.label_encoder_list[i]["column"] ].astype(int) df_sample[ self.label_encoder_list[i]["column"] ] = le.inverse_transform( df_sample[self.label_encoder_list[i]["column"]] ) if self.log_columns: for i in df_sample: if i in self.log_columns: lower_bound = self.lower_bounds[i] if lower_bound > 0: df_sample[i].apply(lambda x: np.exp(x)) elif lower_bound == 0: df_sample[i] = df_sample[i].apply( lambda x: np.ceil(np.exp(x) - eps) if (np.exp(x) - eps) < 0 else (np.exp(x) - eps) ) else: df_sample[i] = df_sample[i].apply( lambda x: np.exp(x) - eps + lower_bound ) if self.integer_columns: for column in self.integer_columns: df_sample[column] = np.round(df_sample[column].values) df_sample[column] = df_sample[column].astype(int) df_sample.replace(self.CONSTANT_INT, np.nan, inplace=True) df_sample.replace("empty", np.nan, inplace=True) return df_sample
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/ctab.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os.path from abc import ABC import tqdm import cupy as cp import numpy as np import multiprocessing from functools import partial from syngen.utils.io_utils import dump_dataframe from syngen.utils.types.dataframe_type import DataFrameType from syngen.utils.memory_manager import MemoryManager from syngen.generator.tabular import BaseTabularGenerator class ChunkedBaseTabularGenerator(BaseTabularGenerator, ABC): """ A Chunked Base Tabular Generator contains the base functionality of the multiprocess (Multi-GPU) data generation. """ def chunked_sampling(self, n_samples: int, save_path: str, fname: str, n_workers: int = 0, gpus: int = -1, use_memmap=False, memory_threshold=0.8, verbose=True): memory_manager = MemoryManager() if gpus < 0: gpus = memory_manager.get_available_gpus() emp_n = 1000 est_samples = self.sample(emp_n, gpu=False) mem_usage = est_samples.memory_usage(index=True, deep=True).sum() est_sample_mem = int(np.ceil(mem_usage / emp_n * self._space_complexity_factor())) est_mem = est_sample_mem * n_samples memmap_kwargs = None chunk_save_path = None if use_memmap: assert fname.endswith(".npy") memmap_shape = list(est_samples.shape) memmap_shape[0] = n_samples memmap_shape = tuple(memmap_shape) memmap_dtype = est_samples.dtypes.iloc[0] memmap_filename = os.path.join(save_path, fname) memmap_kwargs = dict( filename=memmap_filename, ) memmap_outfile = np.lib.format.open_memmap(memmap_filename, dtype=memmap_dtype, shape=memmap_shape, mode='w+') else: chunk_format = '{chunk_id}' chunk_save_path = os.path.join(save_path, f'{fname}_{chunk_format}') if gpus > 0: mem_avail = memory_manager.get_min_available_across_gpus_memory(gpus=gpus) n_workers = gpus chunk_partial = partial(self._generate_chunk, chunk_save_path=chunk_save_path, gpu=True, gpus=gpus, memmap_kwargs=memmap_kwargs) else: mem_avail = memory_manager.get_available_virtual_memory() chunk_partial = partial(self._generate_chunk, chunk_save_path=chunk_save_path, gpu=False, memmap_kwargs=memmap_kwargs) if mem_avail * memory_threshold > est_mem: df = self.sample(n_samples, gpu=True, memmap_kwargs=memmap_kwargs, start_idx=0, end_idx=n_samples) if chunk_save_path: chunk_save_path = chunk_save_path.format(chunk_id=0) dump_dataframe(df, save_path=chunk_save_path, format='parquet') res = [chunk_save_path] else: mem_avail = int(mem_avail * memory_threshold) # to avoid OOM max_samples_per_chunk = int(mem_avail // est_sample_mem) n_chunks = n_samples//max_samples_per_chunk + (1 if n_samples % max_samples_per_chunk > 0 else 0) samples_per_chunk = n_samples // n_chunks chunk_sizes = [samples_per_chunk] * n_chunks if n_samples % n_chunks > 0: chunk_sizes.append(n_samples % n_chunks) multiprocessing.set_start_method('spawn', force=True) with multiprocessing.Pool(processes=n_workers) as pool: res = pool.imap_unordered(chunk_partial, enumerate(zip(chunk_sizes, np.cumsum(chunk_sizes))), chunksize=(len(chunk_sizes)+n_workers-1)//n_workers) if verbose: res = tqdm.tqdm(res, total=len(chunk_sizes)) res = list(res) return res def _generate_chunk(self, chunk_info, chunk_save_path, gpu, memmap_kwargs, gpus=0): chunk_id, (chunk_size, chunk_end) = chunk_info if gpu: gpu_id = int(multiprocessing.current_process()._identity[0]) % gpus with cp.cuda.Device(gpu_id): df = self.sample(chunk_size, gpu=True, memmap_kwargs=memmap_kwargs, start_idx=chunk_end-chunk_size, end_idx=chunk_end) else: df = self.sample(chunk_size, gpu=False, memmap_kwargs=memmap_kwargs, start_idx=chunk_end-chunk_size, end_idx=chunk_end) if chunk_save_path: chunk_save_path = chunk_save_path.format(chunk_id=chunk_id) dump_dataframe(df, save_path=chunk_save_path, format='parquet') return chunk_save_path def _space_complexity_factor(self): return 2.0 # we support float16 but it requires intermediate float32 @property def supports_memmap(self) -> bool: return True def sample(self, num_samples, *args, gpu=False, **kwargs) -> DataFrameType: """generate `num_samples` from generator Args: num_samples (int): number of samples to generate gpu (bool): whether to use cpu or gpu implementation (default: False) *args: optional positional args **kwargs: optional key-word arguments """ raise NotImplementedError()
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/chunked_tabular_generator.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, List import cupy as cp import pickle import numpy as np import pandas as pd from syngen.generator.tabular.chunked_tabular_generator import ChunkedBaseTabularGenerator import warnings warnings.simplefilter(action="ignore", category=FutureWarning) class RandomMVGenerator(ChunkedBaseTabularGenerator): """Random Multivariate Gaussian generator """ def __init__(self, **kwargs): super().__init__(**kwargs) self.ndims = None self.column_order = None def fit(self, data: Optional[pd.DataFrame] = None, ndims: Optional[int] = None, columns: Optional[List[str]] = None, categorical_columns=(), verbose=False): """ random ignores categorical columns at the moment """ assert ndims is not None or data is not None or self.ndims is not None or columns is not None if data is not None: ndims = len(data.columns) self.column_order = list(data.columns) if columns is not None: self.column_order = columns ndims = len(columns) if ndims is None: ndims = self.ndims self.mu = np.random.randn(ndims).astype(np.float32) self.cov = np.eye(ndims) * np.abs( np.random.randn(ndims).reshape(-1, 1) ).astype(np.float32) self.ndims = ndims def _space_complexity_factor(self): return 2.0 def sample(self, n, gpu=False, memmap_kwargs=None, start_idx=0, end_idx=None, **kwargs): use_memmap = memmap_kwargs is not None if use_memmap: memmap_outfile = np.load(memmap_kwargs['filename'], mmap_mode='r+') if gpu: samples = cp.random.multivariate_normal(self.mu, self.cov, size=n, dtype=cp.float32) samples = cp.asnumpy(samples) else: samples = np.random.multivariate_normal(self.mu, self.cov, size=n).astype(np.float32) if use_memmap: memmap_outfile[start_idx:end_idx] = samples return None else: df = pd.DataFrame(samples) if self.column_order is None: df.columns = df.columns.astype(str) else: df.columns = self.column_order return df def save(self, path): with open(path, 'wb') as file_handler: pickle.dump(self, file_handler, protocol=pickle.HIGHEST_PROTOCOL) @classmethod def load(cls, path): with open(path, 'rb') as file_handler: model = pickle.load(file_handler) return model
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/random.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import gc import multiprocessing from functools import partial from typing import Callable, List from tqdm import tqdm from syngen.generator.tabular.chunked_tabular_generator import ChunkedBaseTabularGenerator from syngen.utils.io_utils import dump_dataframe from syngen.utils.memory_manager import MemoryManager def _generate_samples( gen, n_samples: int, fname: str, save_path: str, post_gen_fn: Callable = None, i: int = 0, ): """ MP sample generation fn """ fp = os.path.join(save_path, f"{fname}_{i}") samples = gen.sample(n_samples) if post_gen_fn is not None: samples = post_gen_fn(samples) dump_dataframe(samples, fp, format='parquet') return fp def pass_through(x): return x def tabular_chunk_sample_generation( gen, n_samples: int, save_path: str, fname: str, post_gen_fn: Callable = pass_through, num_workers: int = 1, use_memmap=False, verbose=True, ) -> List[str]: """ Chunk large sample generation into parts, and dump csv files into save_path to avoid memory issues. Args: gen: generator to sample new synthetic data from, must implement `sample` n_samples (int): number of samples to generate save_path: directory to dump generated samples fname (str): file name for saving csv's post_gen_fn (Callable): will be called on generated samples num_workers (int): number of workers to speed up generation using multiprocessing Returns: None """ if isinstance(gen, ChunkedBaseTabularGenerator): return gen.chunked_sampling(int(n_samples), save_path=save_path, fname=fname, gpus=-1, use_memmap=use_memmap, verbose=verbose, ) n_samples = int(n_samples) # - check if mem available gc.collect() mem_avail = MemoryManager().get_available_virtual_memory() emp_n = 1000 est_samples = gen.sample(emp_n) mem_usage = est_samples.memory_usage(index=True, deep=True).sum() est_mem = (mem_usage / emp_n) * n_samples # - path file_paths = [] # - gen samples if n_samples <= 1e6 and mem_avail > est_mem: file_paths.append( _generate_samples( gen=gen, n_samples=n_samples, fname=fname, save_path=save_path, post_gen_fn=post_gen_fn, i=n_samples, ) ) else: r = (est_mem // mem_avail) + 10 inc = int(min(n_samples // r, 5e6)) num_iters = n_samples / inc if num_iters - n_samples // inc > 0.0: num_iters += 1 num_iters = int(num_iters) generate_samples_p = partial( _generate_samples, gen, inc, fname, save_path, post_gen_fn ) if num_workers > 1: multiprocessing.set_start_method("spawn", force=True) with multiprocessing.Pool(processes=num_workers) as pool: tasks = pool.imap_unordered(generate_samples_p, range(0, num_iters)) if verbose: tasks = tqdm(tasks, total=num_iters) file_paths = list(tasks) else: itr = range(0, n_samples, inc) if verbose: itr = tqdm(itr) for i in itr: file_paths.append(generate_samples_p(i)) return file_paths
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/utils.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import torch class BaseTabularGenerator(abc.ABC): """Base class for all tabular generators""" def __init__(self, **kwargs): pass @classmethod def get_generators(cls, include_parents=True): """Recursively find subclasses of `BaseTabularGenerator` Args: include_parents (bool): whether to include parents to other classes. (default: `True`) """ generators = dict() for child in cls.__subclasses__(): children = child.get_generators(include_parents) generators.update(children) if include_parents or not children: if abc.ABC not in child.__bases__: generators[child.__name__] = child return generators def fit(self, *args, **kwargs): """fit function for the generator Args: *args: optional positional args **kwargs: optional key-word arguments """ raise NotImplementedError() def sample(self, num_samples, *args, **kwargs): """generate `num_samples` from generator Args: num_samples (int): number of samples to generate *args: optional positional args **kwargs: optional key-word arguments """ raise NotImplementedError() def save(self, path): raise NotImplementedError() @property def supports_memmap(self) -> bool: return False @classmethod def load(cls, path): raise NotImplementedError() @staticmethod def add_args(parser): return parser
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/base_tabular_generator.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import sys import warnings from typing import Optional, List import cudf import numpy as np import pandas as pd import torch from packaging import version from torch import optim from torch.nn import ( BatchNorm1d, Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, functional, ) from syngen.generator.tabular.base_tabular_generator import BaseTabularGenerator from syngen.generator.tabular.data_transformer.ctgan_data_transformer import ( CTGANDataTransformer, ) class CTGANGenerator(BaseTabularGenerator): """Conditional Table GAN Generator. For more details about the process, please check the [Modeling Tabular data using Conditional GAN](https://arxiv.org/abs/1907.00503) paper. Adopted from: https://github.com/sdv-dev/CTGAN Args: embedding_dim (int): Size of the random sample passed to the Generator. Defaults to 128. generator_dim (tuple or list of ints): Size of the output samples for each one of the Residuals. A Residual Layer will be created for each one of the values provided. Defaults to (256, 256). discriminator_dim (tuple or list of ints): Size of the output samples for each one of the Discriminator Layers. A Linear Layer will be created for each one of the values provided. Defaults to (256, 256). generator_lr (float):Learning rate for the generator. Defaults to 2e-4. generator_decay (float):Generator weight decay for the Adam Optimizer. Defaults to 1e-6. discriminator_lr (float):Learning rate for the discriminator. Defaults to 2e-4. discriminator_decay (float):Discriminator weight decay for the Adam Optimizer. Defaults to 1e-6. batch_size (int):Number of data samples to process in each step. discriminator_steps (int):Number of discriminator updates to do for each generator update. From the WGAN paper: https://arxiv.org/abs/1701.07875. WGAN paper default is 5. Default used is 1 to match original CTGAN implementation. log_frequency (boolean):Whether to use log frequency of categorical levels in conditional sampling. Defaults to ``True``. verbose (boolean):Whether to have print statements for progress results. Defaults to ``False``. epochs (int):Number of training epochs. Defaults to 300. pac (int):Number of samples to group together when applying the discriminator. Defaults to 10. gpu (bool):Whether to attempt to use cuda for GPU computation. If this is False or CUDA is not available, CPU will be used. Defaults to ``True``. """ def __init__( self, embedding_dim=128, generator_dim=(256, 256), discriminator_dim=(256, 256), generator_lr=2e-4, generator_decay=1e-6, discriminator_lr=2e-4, discriminator_decay=1e-6, batch_size=500, discriminator_steps=1, log_frequency=True, verbose=False, epochs=300, pac=10, gpu=True, **kwargs, ): super(CTGANGenerator, self).__init__(**kwargs) assert batch_size % 2 == 0 logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) logger = logging.getLogger(__name__) self.log = logger self._embedding_dim = embedding_dim self._generator_dim = generator_dim self._discriminator_dim = discriminator_dim self._generator_lr = generator_lr self._generator_decay = generator_decay self._discriminator_lr = discriminator_lr self._discriminator_decay = discriminator_decay self._batch_size = int(batch_size) self._discriminator_steps = discriminator_steps self._log_frequency = log_frequency self._verbose = verbose self._epochs = epochs self.pac = pac if not gpu or not torch.cuda.is_available(): device = "cpu" elif isinstance(gpu, str): device = gpu else: device = "cuda" self._device = torch.device(device) self._transformer = None self._data_sampler = None self._generator = None @staticmethod def _gumbel_softmax(logits, tau=1, hard=False, eps=1e-10, dim=-1): """Deals with the instability of the gumbel_softmax for older versions of torch. For more details about the issue: https://drive.google.com/file/d/1AA5wPfZ1kquaRtVruCd6BiYZGcDeNxyP/view?usp=sharing Parameters ********** logits: […, num_features] unnormalized log probabilities tau: non-negative scalar temperature hard: if True, the returned samples will be discretized as one-hot vectors, but will be differentiated as if it is the soft sample in autograd dim (int): a dimension along which softmax will be computed. Default: -1. Returns ******* Sampled tensor of same shape as logits from the Gumbel-Softmax distribution. """ if version.parse(torch.__version__) < version.parse("1.2.0"): for i in range(10): transformed = functional.gumbel_softmax( logits, tau=tau, hard=hard, eps=eps, dim=dim ) if not torch.isnan(transformed).any(): return transformed raise ValueError("gumbel_softmax returning NaN.") return functional.gumbel_softmax( logits, tau=tau, hard=hard, eps=eps, dim=dim ) def _apply_activate(self, data): """Apply proper activation function to the output of the generator.""" data_t = [] st = 0 for column_info in self._transformer.output_info_list: for span_info in column_info: if span_info.activation_fn == "tanh": ed = st + span_info.dim data_t.append(torch.tanh(data[:, st:ed])) st = ed elif span_info.activation_fn == "softmax": ed = st + span_info.dim transformed = self._gumbel_softmax(data[:, st:ed], tau=0.2) data_t.append(transformed) st = ed else: assert 0 return torch.cat(data_t, dim=1) def _cond_loss(self, data, c, m): """Compute the cross entropy loss on the fixed discrete column.""" loss = [] st = 0 st_c = 0 for column_info in self._transformer.output_info_list: for span_info in column_info: if ( len(column_info) != 1 or span_info.activation_fn != "softmax" ): # not discrete column st += span_info.dim else: ed = st + span_info.dim ed_c = st_c + span_info.dim tmp = functional.cross_entropy( data[:, st:ed], torch.argmax(c[:, st_c:ed_c], dim=1), reduction="none", ) loss.append(tmp) st = ed st_c = ed_c loss = torch.stack(loss, dim=1) return (loss * m).sum() / data.size()[0] def _validate_discrete_columns(self, train_data, categorical_columns): """Check whether ``categorical_columns`` exists in ``train_data``. Args: train_data (numpy.ndarray or pandas.DataFrame): Training Data. It must be a 2-dimensional numpy array or a pandas.DataFrame. categorical_columns (list-like): List of discrete columns to be used to generate the Conditional Vector. If ``train_data`` is a Numpy array, this list should contain the integer indices of the columns. Otherwise, if it is a ``pandas.DataFrame``, this list should contain the column names. """ if isinstance(train_data, (pd.DataFrame, cudf.DataFrame)): invalid_columns = set(categorical_columns) - set( train_data.columns ) elif isinstance(train_data, np.ndarray): invalid_columns = [] for column in categorical_columns: if column < 0 or column >= train_data.shape[1]: invalid_columns.append(column) else: raise TypeError( "``train_data`` should be either pd.DataFrame or np.array." ) if invalid_columns: raise ValueError( "Invalid columns found: {}".format(invalid_columns) ) def fit(self, train_data, categorical_columns=tuple(), epochs=None, **kwargs): """Fit the CTGAN Synthesizer models to the training data. Args: train_data (numpy.ndarray or pandas.DataFrame): Training Data. It must be a 2-dimensional numpy array or a pandas.DataFrame. categorical_columns (list-like): List of discrete columns to be used to generate the Conditional Vector. If ``train_data`` is a Numpy array, this list should contain the integer indices of the columns. Otherwise, if it is a ``pandas.DataFrame``, this list should contain the column names. """ self._validate_discrete_columns(train_data, categorical_columns) if epochs is None: epochs = self._epochs else: warnings.warn( ( "`epochs` argument in `fit` method has been deprecated and will be removed " "in a future version. Please pass `epochs` to the constructor instead" ), DeprecationWarning, ) self._transformer = CTGANDataTransformer() self._transformer.fit(train_data, categorical_columns) train_data = self._transformer.transform(train_data) self._data_sampler = DataSampler( train_data, self._transformer.output_info_list, self._log_frequency ) data_dim = self._transformer.output_dimensions self._generator = Generator( self._embedding_dim + self._data_sampler.dim_cond_vec(), self._generator_dim, data_dim, ).to(self._device) discriminator = Discriminator( data_dim + self._data_sampler.dim_cond_vec(), self._discriminator_dim, pac=self.pac, ).to(self._device) optimizerG = optim.Adam( self._generator.parameters(), lr=self._generator_lr, betas=(0.5, 0.9), weight_decay=self._generator_decay, ) optimizerD = optim.Adam( discriminator.parameters(), lr=self._discriminator_lr, betas=(0.5, 0.9), weight_decay=self._discriminator_decay, ) mean = torch.zeros( self._batch_size, self._embedding_dim, device=self._device ) std = mean + 1 steps_per_epoch = max(len(train_data) // self._batch_size, 1) for i in range(epochs): for id_ in range(steps_per_epoch): for n in range(self._discriminator_steps): fakez = torch.normal(mean=mean, std=std) condvec = self._data_sampler.sample_condvec( self._batch_size ) if condvec is None: c1, m1, col, opt = None, None, None, None real = self._data_sampler.sample_data( self._batch_size, col, opt ) else: c1, m1, col, opt = condvec c1 = torch.from_numpy(c1).to(self._device) m1 = torch.from_numpy(m1).to(self._device) fakez = torch.cat([fakez, c1], dim=1) perm = np.arange(self._batch_size) np.random.shuffle(perm) real = self._data_sampler.sample_data( self._batch_size, col[perm], opt[perm] ) c2 = c1[perm] fake = self._generator(fakez) fakeact = self._apply_activate(fake) real = torch.from_numpy(real.astype("float32")).to( self._device ) if c1 is not None: fake_cat = torch.cat([fakeact, c1], dim=1) real_cat = torch.cat([real, c2], dim=1) else: real_cat = real fake_cat = fakeact y_fake = discriminator(fake_cat) y_real = discriminator(real_cat) pen = discriminator.calc_gradient_penalty( real_cat, fake_cat, self._device, self.pac ) loss_d = -(torch.mean(y_real) - torch.mean(y_fake)) optimizerD.zero_grad() pen.backward(retain_graph=True) loss_d.backward() optimizerD.step() fakez = torch.normal(mean=mean, std=std) condvec = self._data_sampler.sample_condvec(self._batch_size) if condvec is None: c1, m1, col, opt = None, None, None, None else: c1, m1, col, opt = condvec c1 = torch.from_numpy(c1).to(self._device) m1 = torch.from_numpy(m1).to(self._device) fakez = torch.cat([fakez, c1], dim=1) fake = self._generator(fakez) fakeact = self._apply_activate(fake) if c1 is not None: y_fake = discriminator(torch.cat([fakeact, c1], dim=1)) else: y_fake = discriminator(fakeact) if condvec is None: cross_entropy = 0 else: cross_entropy = self._cond_loss(fake, c1, m1) loss_g = -torch.mean(y_fake) + cross_entropy optimizerG.zero_grad() loss_g.backward() optimizerG.step() if self._verbose: self.log.info( f"Epoch {i + 1}, Loss G: {loss_g.detach().cpu(): .4f}, " f"Loss D: {loss_d.detach().cpu(): .4f}" ) def sample(self, n, gpu=False, condition_column=None, condition_value=None, ): """Sample data similar to the training data. Choosing a condition_column and condition_value will increase the probability of the discrete condition_value happening in the condition_column. Args: n (int): Number of rows to sample. condition_column (string): Name of a discrete column. condition_value (string): Name of the category in the condition_column which we wish to increase the probability of happening. Returns: numpy.ndarray or pandas.DataFrame """ if gpu: self.set_device('cuda') else: self.set_device('cpu') if condition_column is not None and condition_value is not None: condition_info = self._transformer.convert_column_name_value_to_id( condition_column, condition_value ) global_condition_vec = self._data_sampler.generate_cond_from_condition_column_info( condition_info, self._batch_size ) else: global_condition_vec = None steps = n // self._batch_size + 1 data = [] for i in range(steps): mean = torch.zeros(self._batch_size, self._embedding_dim) std = mean + 1 fakez = torch.normal(mean=mean, std=std).to(self._device) if global_condition_vec is not None: condvec = global_condition_vec.copy() else: condvec = self._data_sampler.sample_original_condvec( self._batch_size ) if condvec is not None: c1 = condvec c1 = torch.from_numpy(c1).to(self._device) fakez = torch.cat([fakez, c1], dim=1) fake = self._generator(fakez) fakeact = self._apply_activate(fake) data.append(fakeact.detach().cpu().numpy()) data = np.concatenate(data, axis=0) data = data[:n] return self._transformer.inverse_transform(data) def set_device(self, device): self._device = device if self._generator is not None: self._generator.to(self._device) def save(self, path): """save the trained model""" device_backup = self._device self.set_device(torch.device("cpu")) torch.save(self, path) self.set_device(device_backup) @classmethod def load(cls, path): """load model from `path`""" device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = torch.load(path) model.set_device(device) return model class Discriminator(Module): def __init__(self, input_dim, discriminator_dim, pac=10): super(Discriminator, self).__init__() dim = input_dim * pac self.pac = pac self.pacdim = dim seq = [] for item in list(discriminator_dim): seq += [Linear(dim, item), LeakyReLU(0.2), Dropout(0.5)] dim = item seq += [Linear(dim, 1)] self.seq = Sequential(*seq) def calc_gradient_penalty( self, real_data, fake_data, device="cpu", pac=10, lambda_=10 ): alpha = torch.rand(real_data.size(0) // pac, 1, 1, device=device) alpha = alpha.repeat(1, pac, real_data.size(1)) alpha = alpha.view(-1, real_data.size(1)) interpolates = alpha * real_data + ((1 - alpha) * fake_data) disc_interpolates = self(interpolates) gradients = torch.autograd.grad( outputs=disc_interpolates, inputs=interpolates, grad_outputs=torch.ones(disc_interpolates.size(), device=device), create_graph=True, retain_graph=True, only_inputs=True, )[0] gradient_penalty = ( (gradients.view(-1, pac * real_data.size(1)).norm(2, dim=1) - 1) ** 2 ).mean() * lambda_ return gradient_penalty def forward(self, input): assert input.size()[0] % self.pac == 0, f'generator batch size ({input.size()[0]}) ' \ f'should be divisible by pac ({self.pac})' return self.seq(input.view(-1, self.pacdim)) class Residual(Module): def __init__(self, i, o): super(Residual, self).__init__() self.fc = Linear(i, o) self.bn = BatchNorm1d(o) self.relu = ReLU() def forward(self, input): out = self.fc(input) out = self.bn(out) out = self.relu(out) return torch.cat([out, input], dim=1) class Generator(Module): def __init__(self, embedding_dim, generator_dim, data_dim): super(Generator, self).__init__() dim = embedding_dim seq = [] for item in list(generator_dim): seq += [Residual(dim, item)] dim += item seq.append(Linear(dim, data_dim)) self.seq = Sequential(*seq) def forward(self, input): data = self.seq(input) return data class DataSampler(object): """DataSampler samples the conditional vector and corresponding data for CTGAN.""" def __init__(self, data, output_info, log_frequency): self._data = data def is_discrete_column(column_info): return ( len(column_info) == 1 and column_info[0].activation_fn == "softmax" ) n_discrete_columns = sum( [ 1 for column_info in output_info if is_discrete_column(column_info) ] ) self._discrete_column_matrix_st = np.zeros( n_discrete_columns, dtype="int32" ) # Store the row id for each category in each discrete column. # For example _rid_by_cat_cols[a][b] is a list of all rows with the # a-th discrete column equal value b. self._rid_by_cat_cols = [] # Compute _rid_by_cat_cols st = 0 for column_info in output_info: if is_discrete_column(column_info): span_info = column_info[0] ed = st + span_info.dim rid_by_cat = [] for j in range(span_info.dim): rid_by_cat.append(np.nonzero(data[:, st + j])[0]) self._rid_by_cat_cols.append(rid_by_cat) st = ed else: st += sum([span_info.dim for span_info in column_info]) assert st == data.shape[1] # Prepare an interval matrix for efficiently sample conditional vector max_category = max( [ column_info[0].dim for column_info in output_info if is_discrete_column(column_info) ], default=0, ) self._discrete_column_cond_st = np.zeros( n_discrete_columns, dtype="int32" ) self._discrete_column_n_category = np.zeros( n_discrete_columns, dtype="int32" ) self._discrete_column_category_prob = np.zeros( (n_discrete_columns, max_category) ) self._n_discrete_columns = n_discrete_columns self._n_categories = sum( [ column_info[0].dim for column_info in output_info if is_discrete_column(column_info) ] ) st = 0 current_id = 0 current_cond_st = 0 for column_info in output_info: if is_discrete_column(column_info): span_info = column_info[0] ed = st + span_info.dim category_freq = np.sum(data[:, st:ed], axis=0) if log_frequency: category_freq = np.log(category_freq + 1) category_prob = category_freq / np.sum(category_freq) self._discrete_column_category_prob[ current_id, : span_info.dim ] = category_prob self._discrete_column_cond_st[current_id] = current_cond_st self._discrete_column_n_category[current_id] = span_info.dim current_cond_st += span_info.dim current_id += 1 st = ed else: st += sum([span_info.dim for span_info in column_info]) def _random_choice_prob_index(self, discrete_column_id): probs = self._discrete_column_category_prob[discrete_column_id] r = np.expand_dims(np.random.rand(probs.shape[0]), axis=1) return (probs.cumsum(axis=1) > r).argmax(axis=1) def sample_condvec(self, batch): """Generate the conditional vector for training. Returns: cond (batch x #categories): The conditional vector. mask (batch x #discrete columns): A one-hot vector indicating the selected discrete column. discrete column id (batch): Integer representation of mask. category_id_in_col (batch): Selected category in the selected discrete column. """ if self._n_discrete_columns == 0: return None discrete_column_id = np.random.choice( np.arange(self._n_discrete_columns), batch ) cond = np.zeros((batch, self._n_categories), dtype="float32") mask = np.zeros((batch, self._n_discrete_columns), dtype="float32") mask[np.arange(batch), discrete_column_id] = 1 category_id_in_col = self._random_choice_prob_index(discrete_column_id) category_id = ( self._discrete_column_cond_st[discrete_column_id] + category_id_in_col ) cond[np.arange(batch), category_id] = 1 return cond, mask, discrete_column_id, category_id_in_col def sample_original_condvec(self, batch): """Generate the conditional vector for generation use original frequency.""" if self._n_discrete_columns == 0: return None cond = np.zeros((batch, self._n_categories), dtype="float32") for i in range(batch): row_idx = np.random.randint(0, len(self._data)) col_idx = np.random.randint(0, self._n_discrete_columns) matrix_st = self._discrete_column_matrix_st[col_idx] matrix_ed = matrix_st + self._discrete_column_n_category[col_idx] pick = np.argmax(self._data[row_idx, matrix_st:matrix_ed]) cond[i, pick + self._discrete_column_cond_st[col_idx]] = 1 return cond def sample_data(self, n, col, opt): """Sample data from original training data satisfying the sampled conditional vector. Returns: n rows of matrix data. """ if col is None: idx = np.random.randint(len(self._data), size=n) return self._data[idx] idx = [] for c, o in zip(col, opt): idx.append(np.random.choice(self._rid_by_cat_cols[c][o])) return self._data[idx] def dim_cond_vec(self): return self._n_categories def generate_cond_from_condition_column_info(self, condition_info, batch): vec = np.zeros((batch, self._n_categories), dtype="float32") vec_id = ( self._discrete_column_matrix_st[ condition_info["discrete_column_id"] ] + condition_info["value_id"] ) vec[:, vec_id] = 1 return vec
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/ctgan.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial import pickle from typing import Optional, List, Union from tqdm import tqdm import cupy as cp import numpy as np import pandas as pd from sklearn.preprocessing import OrdinalEncoder from pandas.api.types import is_integer_dtype from syngen.generator.tabular.chunked_tabular_generator import ChunkedBaseTabularGenerator class UniformGenerator(ChunkedBaseTabularGenerator): """Uniform random feature generator. """ def __init__(self, **kwargs): super().__init__(**kwargs) def ordinal_encoder(self, cat_col): encoder = OrdinalEncoder() encoder.fit(cat_col) return encoder def fit( self, data, categorical_columns=(), samples: Union[float, int] = 0.1, columns: Optional[List[str]] = None, verbose: bool = False, ): """Computes the min and max ranges of the columns. Args: data: input data to use for extracting column statistics categorical_columns (list): list of columns that should be treated as categorical. verbose (bool): print intermediate results (default: False) """ if samples > 0: num_samples = len(data) if 0.0 <= samples <= 1.0: num_samples = samples * num_samples else: num_samples = samples num_samples = min(int(num_samples), 10_000_000) data = data.sample(n=num_samples) self.column_order = columns or list(data.columns) self.cat_fit = {} self.categorical_columns = set(categorical_columns) self.continuous_columns = set(self.column_order) - self.categorical_columns cat_cols = tqdm(self.categorical_columns) if verbose else self.categorical_columns for column in cat_cols: enc = self.ordinal_encoder(data[column].values.reshape(-1, 1)) n_unique = len(enc.categories_[0]) self.cat_fit[column] = { "encoder": enc, "n_unique": n_unique, "sampler": partial(np.random.randint, 0, n_unique), 'dtype': data[column].dtype, } self.cont_fit = {} self.integer_continuous_columns = [] cont_cols = tqdm(self.continuous_columns) if verbose else self.continuous_columns for column in cont_cols: min_, max_ = data[column].min(), data[column].max() self.cont_fit[column] = { "min": min_, "max": max_, "sampler": partial(np.random.uniform, min_, max_), 'dtype': data[column].dtype, } if is_integer_dtype(data[column].dtype): self.integer_continuous_columns.append(column) self.fits = {**self.cat_fit, **self.cont_fit} def sample(self, n, gpu=False, memmap_kwargs=None, start_idx=0, end_idx=None, **kwargs): use_memmap = memmap_kwargs is not None if use_memmap: memmap_outfile = np.load(memmap_kwargs['filename'], mmap_mode='r+') if gpu: cont_min = [] cont_max = [] for column in self.continuous_columns: cont_min.append(self.fits[column]['min']) cont_max.append(self.fits[column]['max']) cont_data = cp.random.uniform( cp.array(cont_min), cp.array(cont_max), size=(n, len(self.continuous_columns)), dtype=cp.float32 ) cont_data = cp.asnumpy(cont_data) df = pd.DataFrame(cont_data, columns=list(self.continuous_columns)) if self.integer_continuous_columns: df[self.integer_continuous_columns] = \ df[self.integer_continuous_columns].astype(np.int32) for column in self.categorical_columns: sampled_data = cp.random.randint(0, self.fits[column]["n_unique"], size=n, dtype=cp.int32) sampled_data = cp.asnumpy(sampled_data.reshape(-1, 1)) encoder = self.fits[column]["encoder"] sampled_data = encoder.inverse_transform(sampled_data) df[column] = sampled_data.reshape(-1).astype(self.fits[column]["dtype"]) else: df = pd.DataFrame() for column in self.column_order: sampler = self.fits[column]["sampler"] sampled_data = sampler(n) sampled_data = sampled_data.reshape(-1, 1) if "encoder" in self.fits[column]: encoder = self.fits[column]["encoder"] sampled_data = encoder.inverse_transform(sampled_data) df[column] = sampled_data.reshape(-1).astype(self.fits[column]["dtype"]) df = df[self.column_order] if use_memmap: memmap_outfile[start_idx:end_idx] = df.values return None return df def _space_complexity_factor(self): return 2.5 def save(self, path): with open(path, 'wb') as file_handler: pickle.dump(self, file_handler, protocol=pickle.HIGHEST_PROTOCOL) @classmethod def load(cls, path): with open(path, 'rb') as file_handler: model = pickle.load(file_handler) return model
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/uniform_generator.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC class BaseTransform(ABC): """Base class for all transforms. The `BaseTransform` class contains methods that must be implemented by specific transforms objects. The `fit` method is optional. """ def fit(self, data): """Fits the transform on the data. Args: data (pandas.Series or cudf.Series or numpy.array or cupy.array): Data to transform. Returns: None """ pass def transform(self, data): """Transform the data. Args: data (pandas.Series or cudf.Series or numpy.array or cupy.array): Data to transform. Returns: numpy.array: Transformed data. """ raise NotImplementedError() def fit_transform(self, data): """Fit to the data and then return the transformed data. Args: data (pandas.Series or cudf.Series or numpy.array or cupy.array): Data to fit and transform Returns: Transformed data. """ self.fit(data) return self.transform(data) def inverse_transform(self, data): """Reverses the transformation done on the data back to original values. Args: data (pandas.Series or cudf.Series or numpy.array or cupy.array): Data to inverse-transform. Returns: Inverse transformed data. """ raise NotImplementedError()
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/transforms/base_transform.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # flake8: noqa from .one_hot_encoding import OneHotEncoding
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/transforms/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pandas as pd from syngen.generator.tabular.transforms.base_transform import BaseTransform class OneHotEncoding(BaseTransform): """OneHotEncoding for categorical data. Adopted from: https://github.com/sdv-dev/CTGAN This transformer replaces a single vector with N unique categories in it with N vectors which have 1s on the rows where the corresponding category is found and 0s on the rest. Null values are considered just another category. Args: error_on_unknown (bool): If a value that was not seen during the fit stage is passed to transform, then an error will be raised if this is True. """ dummies = None _dummy_na = None _num_dummies = None _dummy_encoded = False _indexer = None _uniques = None def __init__(self, error_on_unknown=True): self.error_on_unknown = error_on_unknown @staticmethod def _prepare_data(data): """Convert data to appropriate format. If data is a valid list or a list of lists, transforms it into an np.array, otherwise returns it. Args: data (pandas.Series, numpy.ndarray, list or list of lists): Data to prepare. Returns: pandas.Series or numpy.ndarray """ if isinstance(data, list): data = np.array(data) if len(data.shape) > 2: raise ValueError("Unexpected format.") if len(data.shape) == 2: if data.shape[1] != 1: raise ValueError("Unexpected format.") data = data[:, 0] return data def _transform(self, data): if self._dummy_encoded: coder = self._indexer codes = pd.Categorical(data, categories=self._uniques).codes else: coder = self._uniques codes = data rows = len(data) dummies = np.broadcast_to(coder, (rows, self._num_dummies)) coded = np.broadcast_to(codes, (self._num_dummies, rows)).T array = (coded == dummies).astype(int) if self._dummy_na: null = np.zeros((rows, 1), dtype=int) null[pd.isnull(data)] = 1 array = np.append(array, null, axis=1) return array def fit(self, data): """Fit the transformer to the data. Get the pandas `dummies` which will be used later on for OneHotEncoding. Args: data (pandas.Series, numpy.ndarray, list or list of lists): Data to fit the transformer to. """ data = self._prepare_data(data) null = pd.isnull(data) self._uniques = list(pd.unique(data[~null])) self._dummy_na = null.any() self._num_dummies = len(self._uniques) self._indexer = list(range(self._num_dummies)) self.dummies = self._uniques.copy() if not np.issubdtype(data.dtype, np.number): self._dummy_encoded = True if self._dummy_na: self.dummies.append(np.nan) def transform(self, data): """Replace each category with the OneHot vectors. Args: data (pandas.Series, numpy.ndarray, list or list of lists): Data to transform. Returns: numpy.ndarray: """ data = self._prepare_data(data) array = self._transform(data) if self.error_on_unknown: unknown = array.sum(axis=1) == 0 if unknown.any(): raise ValueError( f"Attempted to transform {list(data[unknown])} ", "that were not seen during fit stage.", ) return array def inverse_transform(self, data): """Convert float values back to the original categorical values. Args: data (numpy.ndarray): Data to revert. Returns: pandas.Series """ if data.ndim == 1: data = data.reshape(-1, 1) indices = np.argmax(data, axis=1) return pd.Series(indices).map(self.dummies.__getitem__)
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/transforms/one_hot_encoding.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pandas as pd import torch from sklearn.mixture import BayesianGaussianMixture from syngen.utils.types import ColumnType from syngen.generator.tabular.data_transformer.base_data_transformer import ( BaseDataTransformer, ) class CTABDataTransformer(BaseDataTransformer): """ Data transformer for CTAB generator. Adopted from: https://github.com/zhao-zilong/CTAB-GAN """ def __init__( self, categorical_columns=(), mixed_dict={}, n_clusters=10, eps=0.005 ): self.meta = None self.n_clusters = n_clusters self.eps = eps self.categorical_columns = categorical_columns self.mixed_columns = mixed_dict def get_metadata(self, train_data): meta = [] for index, column_name in enumerate(train_data.columns): column = train_data.iloc[:, index] if index in self.categorical_columns: mapper = column.value_counts().index.tolist() meta.append( { "name": index, "type": ColumnType.CATEGORICAL, "size": len(mapper), "i2s": mapper, } ) elif index in self.mixed_columns.keys(): meta.append( { "name": index, "type": ColumnType.MIXED, "min": column.min(), "max": column.max(), "modal": self.mixed_columns[index], } ) else: meta.append( { "name": index, "type": ColumnType.CONTINUOUS, "min": column.min(), "max": column.max(), } ) return meta def fit(self, train_data: pd.DataFrame): data = train_data.values self.meta = self.get_metadata(train_data) model = [] self.ordering = [] self.output_info = [] self.output_dim = 0 self.components = [] self.filter_arr = [] for id_, info in enumerate(self.meta): if info["type"] == ColumnType.CONTINUOUS: gm = BayesianGaussianMixture( self.n_clusters, weight_concentration_prior_type="dirichlet_process", weight_concentration_prior=0.001, max_iter=100, n_init=1, random_state=42, ) gm.fit(data[:, id_].reshape([-1, 1])) mode_freq = ( pd.Series(gm.predict(data[:, id_].reshape([-1, 1]))) .value_counts() .keys() ) model.append(gm) old_comp = gm.weights_ > self.eps comp = [] for i in range(self.n_clusters): if (i in (mode_freq)) & old_comp[i]: comp.append(True) else: comp.append(False) self.components.append(comp) self.output_info += [(1, "tanh"), (np.sum(comp), "softmax")] self.output_dim += 1 + np.sum(comp) elif info["type"] == ColumnType.MIXED: gm1 = BayesianGaussianMixture( self.n_clusters, weight_concentration_prior_type="dirichlet_process", weight_concentration_prior=0.001, max_iter=100, n_init=1, random_state=42, ) gm2 = BayesianGaussianMixture( self.n_clusters, weight_concentration_prior_type="dirichlet_process", weight_concentration_prior=0.001, max_iter=100, n_init=1, random_state=42, ) gm1.fit(data[:, id_].reshape([-1, 1])) filter_arr = [] for element in data[:, id_]: if element not in info["modal"]: filter_arr.append(True) else: filter_arr.append(False) gm2.fit(data[:, id_][filter_arr].reshape([-1, 1])) mode_freq = ( pd.Series( gm2.predict(data[:, id_][filter_arr].reshape([-1, 1])) ) .value_counts() .keys() ) self.filter_arr.append(filter_arr) model.append((gm1, gm2)) old_comp = gm2.weights_ > self.eps comp = [] for i in range(self.n_clusters): if (i in (mode_freq)) & old_comp[i]: comp.append(True) else: comp.append(False) self.components.append(comp) self.output_info += [ (1, "tanh"), (np.sum(comp) + len(info["modal"]), "softmax"), ] self.output_dim += 1 + np.sum(comp) + len(info["modal"]) else: model.append(None) self.components.append(None) self.output_info += [(info["size"], "softmax")] self.output_dim += info["size"] self.model = model def transform(self, data, ispositive=False, positive_list=None): values = [] mixed_counter = 0 for id_, info in enumerate(self.meta): current = data[:, id_] if info["type"] == ColumnType.CONTINUOUS: current = current.reshape([-1, 1]) means = self.model[id_].means_.reshape((1, self.n_clusters)) stds = np.sqrt(self.model[id_].covariances_).reshape( (1, self.n_clusters) ) features = np.empty(shape=(len(current), self.n_clusters)) if ispositive: if id_ in positive_list: features = np.abs(current - means) / (4 * stds) else: features = (current - means) / (4 * stds) probs = self.model[id_].predict_proba(current.reshape([-1, 1])) n_opts = sum(self.components[id_]) features = features[:, self.components[id_]] probs = probs[:, self.components[id_]] opt_sel = np.zeros(len(data), dtype="int") for i in range(len(data)): pp = probs[i] + 1e-6 pp = pp / sum(pp) opt_sel[i] = np.random.choice(np.arange(n_opts), p=pp) idx = np.arange((len(features))) features = features[idx, opt_sel].reshape([-1, 1]) features = np.clip(features, -0.99, 0.99) probs_onehot = np.zeros_like(probs) probs_onehot[np.arange(len(probs)), opt_sel] = 1 re_ordered_phot = np.zeros_like(probs_onehot) col_sums = probs_onehot.sum(axis=0) n = probs_onehot.shape[1] largest_indices = np.argsort(-1 * col_sums)[:n] self.ordering.append(largest_indices) for id, val in enumerate(largest_indices): re_ordered_phot[:, id] = probs_onehot[:, val] values += [features, re_ordered_phot] elif info["type"] == "mixed": means_0 = self.model[id_][0].means_.reshape([-1]) stds_0 = np.sqrt(self.model[id_][0].covariances_).reshape([-1]) zero_std_list = [] means_needed = [] stds_needed = [] for mode in info["modal"]: if mode != -9999999: dist = [] for idx, val in enumerate(list(means_0.flatten())): dist.append(abs(mode - val)) index_min = np.argmin(np.array(dist)) zero_std_list.append(index_min) else: continue for idx in zero_std_list: means_needed.append(means_0[idx]) stds_needed.append(stds_0[idx]) mode_vals = [] for i, j, k in zip(info["modal"], means_needed, stds_needed): this_val = np.abs(i - j) / (4 * k) mode_vals.append(this_val) if -9999999 in info["modal"]: mode_vals.append(0) current = current.reshape([-1, 1]) filter_arr = self.filter_arr[mixed_counter] current = current[filter_arr] means = self.model[id_][1].means_.reshape((1, self.n_clusters)) stds = np.sqrt(self.model[id_][1].covariances_).reshape( (1, self.n_clusters) ) features = np.empty(shape=(len(current), self.n_clusters)) if ispositive: if id_ in positive_list: features = np.abs(current - means) / (4 * stds) else: features = (current - means) / (4 * stds) probs = self.model[id_][1].predict_proba( current.reshape([-1, 1]) ) n_opts = sum(self.components[id_]) # 8 features = features[:, self.components[id_]] probs = probs[:, self.components[id_]] opt_sel = np.zeros(len(current), dtype="int") for i in range(len(current)): pp = probs[i] + 1e-6 pp = pp / sum(pp) opt_sel[i] = np.random.choice(np.arange(n_opts), p=pp) idx = np.arange((len(features))) features = features[idx, opt_sel].reshape([-1, 1]) features = np.clip(features, -0.99, 0.99) probs_onehot = np.zeros_like(probs) probs_onehot[np.arange(len(probs)), opt_sel] = 1 extra_bits = np.zeros([len(current), len(info["modal"])]) temp_probs_onehot = np.concatenate( [extra_bits, probs_onehot], axis=1 ) final = np.zeros( [len(data), 1 + probs_onehot.shape[1] + len(info["modal"])] ) features_curser = 0 for idx, val in enumerate(data[:, id_]): if val in info["modal"]: category_ = list(map(info["modal"].index, [val]))[0] final[idx, 0] = mode_vals[category_] final[idx, (category_ + 1)] = 1 else: final[idx, 0] = features[features_curser] final[ idx, (1 + len(info["modal"])) : ] = temp_probs_onehot[features_curser][ len(info["modal"]) : ] features_curser = features_curser + 1 just_onehot = final[:, 1:] re_ordered_jhot = np.zeros_like(just_onehot) n = just_onehot.shape[1] col_sums = just_onehot.sum(axis=0) largest_indices = np.argsort(-1 * col_sums)[:n] self.ordering.append(largest_indices) for id, val in enumerate(largest_indices): re_ordered_jhot[:, id] = just_onehot[:, val] final_features = final[:, 0].reshape([-1, 1]) values += [final_features, re_ordered_jhot] mixed_counter = mixed_counter + 1 else: self.ordering.append(None) col_t = np.zeros([len(data), info["size"]]) idx = list(map(info["i2s"].index, current)) col_t[np.arange(len(data)), idx] = 1 values.append(col_t) return np.concatenate(values, axis=1) def inverse_transform(self, data): data_t = np.zeros([len(data), len(self.meta)]) st = 0 for id_, info in enumerate(self.meta): if info["type"] == ColumnType.CONTINUOUS: u = data[:, st] v = data[:, st + 1 : st + 1 + np.sum(self.components[id_])] order = self.ordering[id_] v_re_ordered = np.zeros_like(v) for id, val in enumerate(order): v_re_ordered[:, val] = v[:, id] v = v_re_ordered u = np.clip(u, -1, 1) v_t = np.ones((data.shape[0], self.n_clusters)) * -100 v_t[:, self.components[id_]] = v v = v_t st += 1 + np.sum(self.components[id_]) means = self.model[id_].means_.reshape([-1]) stds = np.sqrt(self.model[id_].covariances_).reshape([-1]) p_argmax = np.argmax(v, axis=1) std_t = stds[p_argmax] mean_t = means[p_argmax] tmp = u * 4 * std_t + mean_t data_t[:, id_] = tmp elif info["type"] == "mixed": u = data[:, st] full_v = data[ :, (st + 1) : (st + 1) + len(info["modal"]) + np.sum(self.components[id_]), ] order = self.ordering[id_] full_v_re_ordered = np.zeros_like(full_v) for id, val in enumerate(order): full_v_re_ordered[:, val] = full_v[:, id] full_v = full_v_re_ordered mixed_v = full_v[:, : len(info["modal"])] v = full_v[:, -np.sum(self.components[id_]) :] u = np.clip(u, -1, 1) v_t = np.ones((data.shape[0], self.n_clusters)) * -100 v_t[:, self.components[id_]] = v v = np.concatenate([mixed_v, v_t], axis=1) st += 1 + np.sum(self.components[id_]) + len(info["modal"]) means = self.model[id_][1].means_.reshape([-1]) stds = np.sqrt(self.model[id_][1].covariances_).reshape([-1]) p_argmax = np.argmax(v, axis=1) result = np.zeros_like(u) for idx in range(len(data)): if p_argmax[idx] < len(info["modal"]): argmax_value = p_argmax[idx] result[idx] = float( list( map(info["modal"].__getitem__, [argmax_value]) )[0] ) else: std_t = stds[(p_argmax[idx] - len(info["modal"]))] mean_t = means[(p_argmax[idx] - len(info["modal"]))] result[idx] = u[idx] * 4 * std_t + mean_t data_t[:, id_] = result else: current = data[:, st : st + info["size"]] st += info["size"] idx = np.argmax(current, axis=1) data_t[:, id_] = list(map(info["i2s"].__getitem__, idx)) return data_t class ImageTransformer(BaseDataTransformer): def __init__(self, side): self.height = side def transform(self, data): if self.height * self.height > len(data[0]): padding = torch.zeros( (len(data), self.height * self.height - len(data[0])) ).to(data.device) data = torch.cat([data, padding], axis=1) return data.view(-1, 1, self.height, self.height) def inverse_transform(self, data): data = data.view(-1, self.height * self.height) return data
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/data_transformer/ctab_data_transformer.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import namedtuple import cudf import numpy as np import pandas as pd from sklearn.mixture import BayesianGaussianMixture from syngen.generator.tabular.transforms import OneHotEncoding from syngen.generator.tabular.data_transformer.base_data_transformer import ( BaseDataTransformer, ) SpanInfo = namedtuple("SpanInfo", ["dim", "activation_fn"]) ColumnTransformInfo = namedtuple( "ColumnTransformInfo", [ "column_name", "column_type", "transform", "transform_aux", "output_info", "output_dimensions", ], ) class CTGANDataTransformer(BaseDataTransformer): """Data Transformer for CTGAN. Adopted from: https://github.com/sdv-dev/CTGAN Model continuous columns with a BayesianGMM and normalized to a scalar [0, 1] and a vector. Discrete columns are encoded using a scikit-learn OneHotEncoder. """ def __init__(self, max_clusters=10, weight_threshold=0.005): """Create a data transformer. Args: max_clusters (int): Maximum number of Gaussian distributions in Bayesian GMM. weight_threshold (float): Weight threshold for a Gaussian distribution to be kept. """ self._max_clusters = max_clusters self._weight_threshold = weight_threshold def _fit_continuous(self, column_name, raw_column_data): """Train Bayesian GMM for continuous column.""" gm = BayesianGaussianMixture( n_components=self._max_clusters, weight_concentration_prior_type="dirichlet_process", weight_concentration_prior=0.001, n_init=1, ) gm.fit(raw_column_data.reshape(-1, 1)) valid_component_indicator = gm.weights_ > self._weight_threshold num_components = valid_component_indicator.sum() return ColumnTransformInfo( column_name=column_name, column_type="continuous", transform=gm, transform_aux=valid_component_indicator, output_info=[ SpanInfo(1, "tanh"), SpanInfo(num_components, "softmax"), ], output_dimensions=1 + num_components, ) def _fit_discrete(self, column_name, raw_column_data): """Fit one hot encoder for discrete column.""" ohe = OneHotEncoding() ohe.fit(raw_column_data) num_categories = len(ohe.dummies) return ColumnTransformInfo( column_name=column_name, column_type="discrete", transform=ohe, transform_aux=None, output_info=[SpanInfo(num_categories, "softmax")], output_dimensions=num_categories, ) def get_metadata(self): if hasattr(self, "_column_transform_info_list"): return self._column_transform_info_list return [] def fit(self, raw_data, discrete_columns=tuple()): """Fit GMM for continuous columns and One hot encoder for discrete columns. This step also counts the #columns in matrix data, and span information. """ self.output_info_list = [] self.output_dimensions = 0 if not isinstance(raw_data, (pd.DataFrame, cudf.DataFrame)): self.dataframe = False raw_data = pd.DataFrame(raw_data) else: self.dataframe = True self._column_raw_dtypes = raw_data.dtypes self._column_transform_info_list = [] for column_name in raw_data.columns: raw_column_data = raw_data[column_name].values if not isinstance(raw_column_data, np.ndarray): raw_column_data = raw_column_data.get() # cupy to numpy if column_name in discrete_columns: column_transform_info = self._fit_discrete( column_name, raw_column_data ) else: column_transform_info = self._fit_continuous( column_name, raw_column_data ) self.output_info_list.append(column_transform_info.output_info) self.output_dimensions += column_transform_info.output_dimensions self._column_transform_info_list.append(column_transform_info) def _transform_continuous(self, column_transform_info, raw_column_data): gm = column_transform_info.transform valid_component_indicator = column_transform_info.transform_aux num_components = valid_component_indicator.sum() means = gm.means_.reshape((1, self._max_clusters)) stds = np.sqrt(gm.covariances_).reshape((1, self._max_clusters)) normalized_values = ((raw_column_data - means) / (4 * stds))[ :, valid_component_indicator ] component_probs = gm.predict_proba(raw_column_data)[ :, valid_component_indicator ] selected_component = np.zeros(len(raw_column_data), dtype="int") for i in range(len(raw_column_data)): component_porb_t = component_probs[i] + 1e-6 component_porb_t = component_porb_t / component_porb_t.sum() selected_component[i] = np.random.choice( np.arange(num_components), p=component_porb_t ) selected_normalized_value = normalized_values[ np.arange(len(raw_column_data)), selected_component ].reshape([-1, 1]) selected_normalized_value = np.clip( selected_normalized_value, -0.99, 0.99 ) selected_component_onehot = np.zeros_like(component_probs) selected_component_onehot[ np.arange(len(raw_column_data)), selected_component ] = 1 return [selected_normalized_value, selected_component_onehot] def _transform_discrete(self, column_transform_info, raw_column_data): ohe = column_transform_info.transform return [ohe.transform(raw_column_data)] def transform(self, raw_data): """Take raw data and output a matrix data.""" if not isinstance(raw_data, (pd.DataFrame, cudf.DataFrame)): raw_data = pd.DataFrame(raw_data) column_data_list = [] for column_transform_info in self._column_transform_info_list: column_data = raw_data[[column_transform_info.column_name]].values if not isinstance(column_data, np.ndarray): column_data = column_data.get() # cupy to numpy if column_transform_info.column_type == "continuous": column_data_list += self._transform_continuous( column_transform_info, column_data ) else: assert column_transform_info.column_type == "discrete" column_data_list += self._transform_discrete( column_transform_info, column_data ) return np.concatenate(column_data_list, axis=1).astype(float) def _inverse_transform_continuous( self, column_transform_info, column_data, sigmas, st ): gm = column_transform_info.transform valid_component_indicator = column_transform_info.transform_aux selected_normalized_value = column_data[:, 0] selected_component_probs = column_data[:, 1:] if sigmas is not None: sig = sigmas[st] selected_normalized_value = np.random.normal( selected_normalized_value, sig ) selected_normalized_value = np.clip(selected_normalized_value, -1, 1) component_probs = ( np.ones((len(column_data), self._max_clusters)) * -100 ) component_probs[ :, valid_component_indicator ] = selected_component_probs means = gm.means_.reshape([-1]) stds = np.sqrt(gm.covariances_).reshape([-1]) selected_component = np.argmax(component_probs, axis=1) std_t = stds[selected_component] mean_t = means[selected_component] column = selected_normalized_value * 4 * std_t + mean_t return column def _inverse_transform_discrete(self, column_transform_info, column_data): ohe = column_transform_info.transform return ohe.inverse_transform(column_data) def inverse_transform(self, data, sigmas=None): """Take matrix data and output raw data. Output uses the same type as input to the transform function. Either np array or pd dataframe. """ st = 0 recovered_column_data_list = [] column_names = [] for column_transform_info in self._column_transform_info_list: dim = column_transform_info.output_dimensions column_data = data[:, st : st + dim] if column_transform_info.column_type == "continuous": recovered_column_data = self._inverse_transform_continuous( column_transform_info, column_data, sigmas, st ) else: assert column_transform_info.column_type == "discrete" recovered_column_data = self._inverse_transform_discrete( column_transform_info, column_data ) recovered_column_data_list.append(recovered_column_data) column_names.append(column_transform_info.column_name) st += dim recovered_data = np.column_stack(recovered_column_data_list) recovered_data = pd.DataFrame( recovered_data, columns=column_names ).astype(self._column_raw_dtypes) if not self.dataframe: recovered_data = recovered_data.values return recovered_data def convert_column_name_value_to_id(self, column_name, value): discrete_counter = 0 column_id = 0 for column_transform_info in self._column_transform_info_list: if column_transform_info.column_name == column_name: break if column_transform_info.column_type == "discrete": discrete_counter += 1 column_id += 1 else: raise ValueError( f"The column_name `{column_name}` doesn't exist in the data." ) one_hot = column_transform_info.transform.transform(np.array([value]))[ 0 ] if sum(one_hot) == 0: raise ValueError( f"The value `{value}` doesn't exist in the column `{column_name}`." ) return { "discrete_column_id": discrete_counter, "column_id": column_id, "value_id": np.argmax(one_hot), }
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/data_transformer/ctgan_data_transformer.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/data_transformer/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC class BaseDataTransformer(ABC): """Base class for all data transformers. The `BaseDataTransformer` provides the transformation required by generators to transform (encode) and inverse_transform (decode) data. It contains the `fit`, `transform`, `inverse_transform`, and `get_metadata` functions that must be implemented by specific data transformer objects. """ def fit(self, data): """Fits the data transform to the data. This is optional Args: data (pandas.Series or cudf.Series or numpy.array or cupy.array): Data to transform. Returns: None """ pass def transform(self, data): """Transform the data. Args: data (pandas.Series or cudf.Series or numpy.array or cupy.array): Data to transform. Returns: numpy.array: Transformed data. """ raise NotImplementedError() def fit_transform(self, data): """Fit to the data and then return the transformed data. Args: data (pandas.Series or cudf.Series or numpy.array or cupy.array): Data to fit and transform Returns: Transformed data. """ self.fit(data) return self.transform(data) def inverse_transform(self, data): """Reverses the transformation done on the data back to original values. Args: data (pandas.Series or cudf.Series or numpy.array or cupy.array): Data to inverse-transform. Returns: raw_data: inverse transformed data """ raise NotImplementedError()
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/data_transformer/base_data_transformer.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import warnings from typing import List, Optional, Set, Tuple import numpy as np from syngen.generator.graph.base_graph_generator import BaseBipartiteGraphGenerator from syngen.generator.graph.fitter import RMATFitter from syngen.generator.graph.utils import ( effective_nonsquare_rmat_exact, generate_gpu_rmat, get_reversed_part, rearrange_graph, recreate_graph, generate_gpu_chunked_rmat, ) class RMATBipartiteGenerator(BaseBipartiteGraphGenerator): """ Graph generator based on RMAT that generate bipartite graphs Args: seed (int): Seed to reproduce the results. If None then random seed will be used. logdir (str): Directory to store the logging results. fitter (RMATFitter): RMATFitter to be used. """ def __init__( self, seed: Optional[int] = None, logdir: str = "./logs", gpu: bool = True, fitter: Optional[RMATFitter] = None, **kwargs, ): super().__init__(seed, logdir, gpu) self.fitter = fitter or RMATFitter() def fit( self, graph: List[Tuple[int, int]], src_set: Optional[Set[int]], dst_set: Optional[Set[int]], is_directed: bool, transform_graph: bool = True, ): """ Fits generator on the graph Args: graph (List[Tuple[int, int]]): graph to be fitted on transform_graph (bool): defines if the generator should transform the input graph using src and dst node sets src_set (Set[int]): set of source nodes dst_set (Set[int]): set of destination nodes is_directed (bool): flag indicating whether the graph is directed """ assert graph is not None, "Wrong graph" if transform_graph: lower, upper = rearrange_graph(graph, src_set, dst_set, assume_unique=True) else: assert not is_directed upper = graph lower = [] if ( len(lower) and is_directed ): # No need to fit lower part for undirected graph self._fit_dst_src_results = self.fitter.fit(lower) if len(upper): self._fit_src_dst_results = self.fitter.fit(upper) self.logger.log(f"Fit results dst_src: {self._fit_dst_src_results}") self.logger.log(f"Fit results src_dst: {self._fit_src_dst_results}") def _generate_part( self, fit_results: Tuple[float, float, float, float], part_shape: Tuple[int, int], num_edges: int, noise: float, batch_size: int, return_node_ids: bool, save_path: Optional[str], ): if self.gpu: return self._generate_part_gpu( fit_results=fit_results, part_shape=part_shape, num_edges=num_edges, noise=noise, return_node_ids=return_node_ids, save_path=save_path, ) else: return self._generate_part_cpu( fit_results=fit_results, part_shape=part_shape, num_edges=num_edges, noise=noise, batch_size=batch_size, return_node_ids=return_node_ids, ) def _generate_part_cpu( self, fit_results: Tuple[float, float, float, float], part_shape: Tuple[int, int], num_edges: int, noise: float, batch_size: int, return_node_ids: bool, ): a, b, c, d = fit_results theta = np.array([[a, b], [c, d]]) theta /= a + b + c + d res = effective_nonsquare_rmat_exact( theta, num_edges, part_shape, noise_scaling=noise, batch_size=batch_size, dtype=np.int64, custom_samplers=None, generate_back_edges=False, remove_selfloops=False, return_node_ids=2 if return_node_ids else 0, verbose=self.verbose, ) if return_node_ids: return res[0], res[1], res[2] return res[0] def _generate_part_gpu( self, fit_results: Tuple[float, float, float, float], part_shape: Tuple[int, int], num_edges: int, noise: float, return_node_ids: bool, save_path: Optional[str] = None, _chunked: bool = True, ): a, b, c, d = fit_results theta = np.array([a, b, c, d]) theta /= a + b + c + d a, b, c, d = theta r_scale, c_scale = part_shape if _chunked: res = generate_gpu_chunked_rmat( a, b, c, d, r_scale=r_scale, c_scale=c_scale, n_edges=num_edges, noise=noise, is_directed=True, has_self_loop=True, return_node_ids=2 if return_node_ids else 0, save_path=save_path, verbose=self.verbose, ) else: res = generate_gpu_rmat( a, b, c, d, r_scale=r_scale, c_scale=c_scale, n_edges=num_edges, noise=noise, is_directed=True, has_self_loop=True, return_node_ids=2 if return_node_ids else 0 ) if return_node_ids: return res[0], res[1], res[2] return res def generate( self, num_nodes_src_set: int, num_nodes_dst_set: int, num_edges_src_dst: int, num_edges_dst_src: int, is_directed: bool, apply_edge_mirroring = True, transform_graph: bool = True, noise: float = 0.5, batch_size: int = 1_000_000, return_node_ids=False, save_path: Optional[str] = None, ): """ Generates graph with approximately `num_nodes_src_set`/`num_nodes_dst_set` nodes and exactly `num_edges_src_dst`/`num_edges_dst_src` edges from generator Args: num_nodes_src_set (int): approximate number of source nodes to be generated num_nodes_dst_set (int): approximate number of destination nodes to be generated num_edges_src_dst (int): exact number of source->destination edges to be generated num_edges_dst_src (int): exact number of destination->source to be generated is_directed (bool): flag indicating whether the generated graph has to be directed transform_graph (bool): defines if the generator should transform the output graph to avoid node id conflict between src and dst nodes noise (float): noise for RMAT generation to get better degree distribution batch_size (int): size of the edge chunk that will be generated in one generation step return_node_ids (bool): flag indicating whether the generator has to return nodes_ids as the second output save_path (bool): path to store the graph. if specified the method return the number of edges in the graph Returns: new_graph (np.array[int, int]): generated graph """ assert ( num_nodes_src_set > 0 and num_nodes_dst_set > 0 ), "Wrong number of nodes" assert ( num_edges_src_dst >= 0 and num_edges_dst_src >= 0 ), "Wrong number of edges" max_edges = num_nodes_src_set * num_nodes_dst_set assert ( num_edges_src_dst < max_edges and num_edges_dst_src < max_edges ), "Configuration of nodes nad edges cannot form any graph" assert ( self._fit_src_dst_results or self._fit_dst_src_results ), "There are no fit results, \ call fit method first or load the seeding matrix from the file" if (self._fit_dst_src_results is not None) != is_directed: requested = "directed" if is_directed else "undirected" fitted = "undirected" if requested == "directed" else "directed" raise RuntimeError( f"Fitted {fitted} graph but requested to generate {requested} one" ) if apply_edge_mirroring and is_directed: warnings.warn('edge mirroring works only for undirected graphs') if not is_directed: assert ( num_edges_src_dst == num_edges_dst_src ), "For undirected graph expected the same number of edges for each side" assert ( self._fit_dst_src_results is None ), "For undirected graph expected only src->dst results to be present" log2_row = math.ceil(math.log2(num_nodes_src_set)) log2_col = math.ceil(math.log2(num_nodes_dst_set)) part_shape_upper = (log2_row, log2_col) part_shape_lower = (log2_col, log2_row) offset = int(2 ** log2_row) if self._fit_src_dst_results and num_edges_src_dst: upper_part_res = self._generate_part( self._fit_src_dst_results, part_shape_upper, num_edges_src_dst, noise, batch_size, return_node_ids=return_node_ids, save_path=save_path, ) if return_node_ids: upper_part, upper_part_src_node_ids, upper_part_dst_node_ids = upper_part_res else: upper_part = upper_part_res else: upper_part = [] if self._fit_dst_src_results: if save_path is not None: raise NotImplementedError('save_path works only for undirected bipartite graphs') if num_edges_dst_src: lower_part_res = self._generate_part( self._fit_dst_src_results, part_shape_lower, num_edges_dst_src, noise, batch_size, save_path=save_path, return_node_ids=return_node_ids, ) if return_node_ids: lower_part, lower_part_src_node_ids, lower_part_dst_node_ids = lower_part_res else: lower_part = lower_part_res else: lower_part = [] elif not is_directed and apply_edge_mirroring: # Recreate lower part for undirected graph if return_node_ids: lower_part_src_node_ids, lower_part_dst_node_ids = upper_part_dst_node_ids, upper_part_src_node_ids lower_part = get_reversed_part(upper_part) else: lower_part = [] if transform_graph: new_graph = recreate_graph(lower_part, upper_part, offset) if return_node_ids: lower_part_src_node_ids = lower_part_src_node_ids + offset upper_part_dst_node_ids = upper_part_dst_node_ids + offset src_node_ids = np.union1d(upper_part_src_node_ids, lower_part_dst_node_ids) dst_node_ids = np.union1d(upper_part_dst_node_ids, lower_part_src_node_ids) else: if apply_edge_mirroring: raise NotImplementedError('apply edge mirroring works only with `transform_graph=True`') new_graph = upper_part if return_node_ids: src_node_ids, dst_node_ids = upper_part_src_node_ids, upper_part_dst_node_ids if return_node_ids: return new_graph, src_node_ids, dst_node_ids return new_graph
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/graph/rmat_bipartite.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # flake8: noqa from .base_graph_generator import BaseGenerator, BaseGraphGenerator, BaseBipartiteGraphGenerator from .rmat import RMATGenerator from .rmat_bipartite import RMATBipartiteGenerator from .random import RandomGraph from .random_bipartite import RandomBipartite def get_structural_generator_class(type, is_bipartite, is_random): if type == 'RMAT': rmats = { (True, True): RandomBipartite, (True, False): RMATBipartiteGenerator, (False, True): RandomGraph, (False, False): RMATGenerator } return rmats[(is_bipartite, is_random)] else: raise ValueError("unsupported generator type")
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/graph/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Set, Tuple from syngen.generator.graph.fitter import RMATFitter from syngen.generator.graph.rmat import RMATGenerator class RandomGraph(RMATGenerator): """ Graph generator based on erdos-renyi model that generate random non-partite graphs Args: seed (int): Seed to reproduce the results. If None then random seed will be used. logdir (str): Directory to store the logging results. fitter (RMATFitter): RMATFitter to be used. """ def __init__(self, seed: Optional[int] = None, logdir: str = "./logs", gpu: bool = True, **kwargs): super().__init__(seed, logdir, gpu, fitter=RMATFitter(random=True)) self.fit() def fit( self, graph: Optional[List[Tuple[int, int]]] = None, is_directed: bool = None, **kwargs, ): """ Fits generator on the graph. For random graph it's graph independent.""" self._fit_results = self.fitter.fit(graph)
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/graph/random.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional import cupy as cp import numpy as np class BaseSeeder: """ Base seeder Args: seed (int): optional global seed """ def __init__(self, seed: Optional[int] = None): self.seed = seed @property def seed(self): return self._seed @seed.setter def seed(self, value): self._seed = value if value is not None else np.random.randint(0, 100) def reseed(self): """Sets the seed for the project""" np.random.seed(self.seed) cp.random.seed(self.seed)
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/graph/seeder.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, List, Optional, Set, Tuple from syngen.generator.graph.fitter import RMATFitter from syngen.generator.graph.rmat_bipartite import RMATBipartiteGenerator class RandomBipartite(RMATBipartiteGenerator): """ Graph generator based on erdos-renyi model that generate random bipartite graphs Args: seed (int): Seed to reproduce the results. If None then random seed will be used. logdir (str): Directory to store the logging results. Defaults to ./logs. fitter (RMATFitter): RMATFitter to be used. """ def __init__(self, seed: Optional[int] = None, logdir: str = "./logs", gpu: bool = True, **kwargs,): super().__init__(seed, logdir, gpu, fitter=RMATFitter(random=True)) self.fit() def fit( self, graph: Optional[List[Tuple[int, int]]] = None, src_set: Optional[Set[int]] = None, dst_set: Optional[Set[int]] = None, is_directed: bool = False, transform_graph: bool = True, ): """ Fits generator on the graph. For random graph it is graph independent.""" self._fit_src_dst_results = self.fitter.fit(graph) self._fit_dst_src_results = ( None if not is_directed else self.fitter.fit(graph) )
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/graph/random_bipartite.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import logging import math import multiprocessing from datetime import datetime from functools import partial from typing import Tuple, Union, Optional import cupy as cp import matplotlib.pyplot as plt import numpy as np from tqdm import tqdm from pylibraft.random import rmat from scipy import stats from syngen.utils import NDArray, infer_operator from syngen.utils.utils import infer_operator from syngen.utils.io_utils import dump_generated_graph from syngen.utils.memory_manager import MemoryManager from syngen.utils.types import NDArray logger = logging.getLogger(__name__) def move_ndarray_to_host(ndarray: NDArray): if isinstance(ndarray, np.ndarray): return ndarray elif isinstance(ndarray, cp.ndarray): return cp.asnumpy(ndarray) else: raise ValueError('supports only numpy and cupy ndarrays') def rearrange_graph( edge_list: NDArray, src_nodes: NDArray, dst_nodes: NDArray, assume_unique: bool = False, ) -> Tuple[NDArray, NDArray]: """ Transforms a bipartite graph from edge list format to lower_left and upper_right adjacency matrices. Returned matrices are in coordinate list format. """ operator = infer_operator(edge_list) if not isinstance(src_nodes, (np.ndarray, cp.ndarray)): raise ValueError('src_nodes: expected type NDArray, but %s was passed', type(src_nodes)) if not isinstance(dst_nodes, (np.ndarray, cp.ndarray)): raise ValueError('dst_nodes: expected type NDArray, but %s was passed', type(dst_nodes)) if not assume_unique: src_nodes = operator.unique(src_nodes) dst_nodes = operator.unique(dst_nodes) if operator.intersect1d(src_nodes, dst_nodes, assume_unique=True).size != 0: raise ValueError('node sets cannot intersect') edge_list = edge_list.flatten() node_set = operator.hstack([src_nodes, dst_nodes]) pos_to_new_id = operator.argsort(node_set) sorted_node_set = node_set[pos_to_new_id] pos_in_sorted_nodeset = operator.searchsorted(sorted_node_set, edge_list) # need to validate since errors could be ignored # https://docs.cupy.dev/en/stable/user_guide/difference.html#out-of-bounds-indices message = 'all ids in a graph should be in one of the node sets' if operator.any(pos_in_sorted_nodeset == len(sorted_node_set)): raise ValueError(message) if operator.any(sorted_node_set[pos_in_sorted_nodeset] != edge_list): raise ValueError(message) edge_list_mapped = pos_to_new_id[pos_in_sorted_nodeset].reshape(-1, 2) upper_right = edge_list_mapped[edge_list_mapped[:, 0] < len(src_nodes)] upper_right[:, 1] -= len(src_nodes) lower_left = edge_list_mapped[edge_list_mapped[:, 0] >= len(src_nodes)] lower_left[:, 0] -= len(src_nodes) return lower_left, upper_right def reindex_graph( edge_list: NDArray, return_counts: bool = False, ) -> Union[NDArray, Tuple[NDArray, int, int]]: """ Reindexes a graph by assigning node ids starting from 0. Returns the processed graph and, optionally, number of nodes and number of edges. """ operator = infer_operator(edge_list) nodes, inverse_flat = operator.unique(edge_list, return_inverse=True) edge_list_reindexed = inverse_flat.reshape(edge_list.shape) if return_counts: return edge_list_reindexed, len(nodes), len(edge_list) else: return edge_list_reindexed def get_reversed_part(part, gpu=False, operator=None): operator = operator or (cp if gpu else np) new_part = operator.empty_like(part) new_part[:, 0] = part[:, 1] new_part[:, 1] = part[:, 0] return new_part # Postprocessing def recreate_graph(lower: NDArray, upper: NDArray, offset: int, gpu=False): assert ( lower is not None and upper is not None ), "Upper and lower cannot be None" operator = cp if gpu else np lower[:, 0] = lower[:, 0] + offset upper[:, 1] = upper[:, 1] + offset new_graph = operator.concatenate((lower, upper), axis=0) return new_graph def recreate_bipartite_nondirected(graph, row_shape): upper = [(row, col + row_shape) for row, col in graph] lower = [(col, row) for row, col in upper] new_graph = upper + lower return new_graph def to_adj_matrix(graph, shape): matrix = np.zeros(shape=shape, dtype=np.bool) arr_indicies = np.array(graph) matrix[arr_indicies[:, 0], arr_indicies[:, 1]] = 1 return matrix def plot_graph_adj(graph, shape): graph_adj = to_adj_matrix(graph, shape=shape) return plt.imshow(graph_adj, cmap="binary", interpolation="nearest") def graph_to_snap_file(A, filename): np.savetxt(filename, A, fmt="%i", delimiter="\t") def effective_nonsquare_rmat_approximate( theta, E, A_shape, noise_scaling=1.0, batch_size=1000, dtype=np.int64, custom_samplers=None, generate_back_edges=False, verbose=False, ): """ This function generates list of edges using modified RMat approach Args: theta (np.array): seeding matrix, needs to be shape 2x2 E (int): number of edges to be generated A_shape (tuple): shape of resulting adjacency matrix. numbers has to be powers of 2 A_shape should be equal to (ceil(log2(X)),ceil(log2(Y))) X,Y are dimensions of original adjacency noise_scaling (float 0..1): noise scaling factor for good degree distribution batch_size (int): edges are generated in batches of batch_size size dtype (numpy dtype np.int32/np.int64): dtype of nodes id's custom_samplers (List[scipy.stats.rv_discrete]): samplers for each step of genration process generate_back_edges (bool): if True then generated edges will also have "back" edges. Not that setting to True for partite graphs makes no sense. Returns: A (np.array 2 x E): matrix containing in every row a signle edge. Edge is always directed 0'th column is FROM edge 1st is TO edge mtx_shape (tuple) - shape of adjecency matrix (A contains list of edges, this is Adjecency metrix shape) custom_samplers (List[scipy.stats.rv_discrete]) - list of samplers needed to generate edges from the same disctribution for multiple runs of the function Description: The generation will consist of theta^[n] (x) theta_p^[m] (x) theta_q^[l] ^[n] is kronecker power (x) is matrix kronecker product theta_p (2x1) and theta_q(1x2) are marginals of theta This way we can generate rectangular shape of adjecency matrix e.g. for bipatrite graphs """ def get_row_col_addres(thetas_n): thetas_r = [t.shape[0] for t in thetas_n] thetas_c = [t.shape[1] for t in thetas_n] row_n = np.prod(thetas_r) # theta_r**quadrant_sequence.shape[1] col_n = np.prod(thetas_c) # theta_c**quadrant_sequence.shape[1] row_adders = np.array( [ int(row_n / thetas_r[i] ** (i + 1)) % row_n for i in range(len(thetas_n)) ] ) # there has to be % as we can have thetas_r[i]==1 col_adders = np.array( [ int(col_n / thetas_c[i] ** (i + 1)) % col_n for i in range(len(thetas_n)) ] ) return row_adders, col_adders, thetas_r, thetas_c, row_n, col_n def parse_quadrants( quadrant_sequence, thetas_n, row_adders, col_addres, thetas_r, thetas_c, row_n, col_n, dtype=np.int64, ): N = len(thetas_n) new_edges = np.zeros( shape=(quadrant_sequence.shape[0], 2) ) # 2 because 0 col=rows_addresses, 1st col = columns row_addr = np.array(quadrant_sequence // thetas_c, dtype=dtype) col_addr = np.array(quadrant_sequence % thetas_c, dtype=dtype) row_adders = np.array( [int(row_n / thetas_r[i] ** (i + 1)) % row_n for i in range(N)] ) # there has to be % as we can have thetas_r[i]==1 col_adders = np.array( [int(col_n / thetas_c[i] ** (i + 1)) % col_n for i in range(N)] ) new_edges[:, 0] = np.sum(np.multiply(row_addr, row_adders), axis=1) new_edges[:, 1] = np.sum(np.multiply(col_addr, col_adders), axis=1) return new_edges if batch_size > E: # if bs>E batch_size = int(E // 2 * 2) if generate_back_edges: assert ( batch_size % 2 == 0 and batch_size >= 2 ), "batch size has to be odd and >1" assert ( np.abs((np.sum(theta) - 1.0)) < 1e-6 ), "Theta probabilities has to sum to 1.0" assert (theta.shape[0] == 2) and ( theta.shape[1] == 2 ), "Only 2x2 seeding matrixes are acceptable" assert len(A_shape) == 2, "A_shape needs to be of len 2" # get appropriate number of n,m,l always m=0 or l=0 (or both for rectangular adjecency) r = A_shape[0] c = A_shape[1] n = min(r, c) # theta^[n] (x) theta_p^[m] (x) theta_q^[l] m = max(0, r - c) # flake8: noqa l = max(0, c - r) # calc values of marginal theta matrixes theta_p = theta.sum(axis=1).reshape((2, -1)) # 2x1 theta_q = theta.sum(axis=0).reshape((1, -1)) # 1x2 # get all thetas thetas_n = [theta] * n + [theta_p] * m + [theta_q] * l # prepare samplers for each of n+m+l steps if custom_samplers is None: custom_samplers = [] for i in range(n + m + l): theta_n = thetas_n[ i ] # each of n+m+l steps have their own theta_n which can be theta/theta_p or theta_q + # noise noise = noise_scaling * np.random.uniform( -1, 1, size=theta_n.shape ) noise_to_add = np.multiply(theta_n, noise) theta_n = theta_n + noise_to_add theta_n = theta_n / np.sum(theta_n) cstm_n = "step_" + str(i) theta_r = theta_n.shape[0] theta_c = theta_n.shape[1] xk = tuple(range(theta_r * theta_c)) pk = theta_n.reshape(-1) cstm_s = stats.rv_discrete(name=cstm_n, values=(xk, pk)) custom_samplers.append(cstm_s) # Prepare all batch sizes needed for generation if batch_size == 0: batch_count = 0 # XXX: why does this happen anyways? else: batch_count = E // batch_size last_batch_size = E - batch_count * batch_size if last_batch_size % 2 > 0 and generate_back_edges: last_batch_size -= 1 A = np.zeros((E, 2), dtype=np.int64) num_sequences = batch_size last_num_sequences = last_batch_size if ( generate_back_edges ): # in case of generating back edges we need to sample just E/2 last_num_sequences = last_batch_size // 2 num_sequences = batch_size // 2 new_back_edges = np.zeros(shape=(num_sequences, 2)) quadrant_sequence = np.zeros(shape=(num_sequences, n + m + l), dtype=dtype) ( row_adders, col_addres, thetas_r, thetas_c, row_n, col_n, ) = get_row_col_addres(thetas_n) # generate sequences of quadrants from previously prepared samplers batch_itr = range(batch_count) if verbose: batch_itr = tqdm(batch_itr) for e in batch_itr: for i in range( n + m + l ): # each steps in generation has its own sampler smpl = custom_samplers[i].rvs(size=num_sequences) quadrant_sequence[:, i] = smpl # produce new edges new_edges = parse_quadrants( quadrant_sequence, thetas_n, row_adders, col_addres, thetas_r, thetas_c, row_n, col_n, dtype=dtype, ) if generate_back_edges: new_back_edges[:, [0, 1]] = new_edges[:, [1, 0]] # swap columns A[ e * batch_size: (e + 1) * batch_size: 2, : ] = new_edges # we need interleave so that back edges are "right after" normal edges A[ e * batch_size + 1: (e + 1) * batch_size: 2, : ] = new_back_edges else: A[e * batch_size: (e + 1) * batch_size, :] = new_edges # generate last batch if last_batch_size > 0: for i in range(n + m + l): smpl = custom_samplers[i].rvs(size=last_num_sequences) quadrant_sequence[:last_num_sequences, i] = smpl new_edges = parse_quadrants( quadrant_sequence[:last_num_sequences, :], thetas_n, row_adders, col_addres, thetas_r, thetas_c, row_n, col_n, dtype=dtype, ) if generate_back_edges: new_back_edges[:last_num_sequences, [0, 1]] = new_edges[ :last_num_sequences, [1, 0] ] # we need interleave so that back edges are "right after" normal edges A[ batch_count * batch_size: batch_count * batch_size + last_batch_size: 2, :, ] = new_edges # np.concatenate((new_edges,new_back_edges[:last_num_sequences,:]),axis=0) A[ batch_count * batch_size + 1: batch_count * batch_size + last_batch_size: 2, :, ] = new_back_edges[:last_num_sequences, :] else: A[ batch_count * batch_size: batch_count * batch_size + last_batch_size, :, ] = new_edges mtx_shape = ( np.prod([t.shape[0] for t in thetas_n]), np.prod([t.shape[1] for t in thetas_n]), ) # shape of resulting adjacency matrix return A, mtx_shape, custom_samplers def effective_nonsquare_rmat_exact( theta, E, A_shape, noise_scaling=1.0, batch_size=1000, dtype=np.int64, custom_samplers=None, remove_selfloops=False, generate_back_edges=False, return_node_ids=0, verbose=False, ): """ This function generates list of edges using modified RMat approach based on effective_nonsuqare_rmat_approximate Args: theta (np.array): seeding matrix, needs to be shape 2x2 E (int): number of edges to be generated A_shape (tuple): shape of resulting adjacency matrix. numbers has to be powers of 2 A_shape should be equal to (ceil(log2(X)),ceil(log2(Y))) X,Y are dimensions of original adjacency noise_scaling (float 0..1): noise scaling factor for good degree distribution batch_size (int): edges are generated in batches of batch_size size dtype (numpy dtype np.int32/np.int64): dtype of nodes id's remove_selfloops (bool): If true edges n->n will not be generated. Note that for partite graphs this makes no sense generate_back_edges (bool): if True then generated edges will also have "back" edges. Not that setting to True for partite graphs makes no sense. Returns: A (np.array 2 x E) - matrix containing in every row a signle edge. Edge is always directed 0'th column is FROM edge 1st is TO edge mtx_shape (tuple) - shape of adjecency matrix (A contains list of edges, this is Adjecency metrix shape) custom_samplers (List[scipy.stats.rv_discrete]) - list of samplers needed to generate edges from the same disctribution for multiple runs of the function Description: see effective_nonsuqare_rmat_approximate """ heuristics = 1.5 if verbose: print("Getting egdes") A, mtx_shape, cs = effective_nonsquare_rmat_approximate( theta, int(heuristics * E), A_shape, noise_scaling=noise_scaling, batch_size=batch_size, dtype=dtype, custom_samplers=custom_samplers, generate_back_edges=generate_back_edges, verbose=verbose, ) if generate_back_edges: A = A[ np.sort(np.unique(A, return_index=True, axis=0)[1]) ] # permutation is not needed here else: if verbose: print("Getting unique egdes") A = np.unique(A, axis=0) if verbose: print("Permuting edges") perm = np.random.permutation( A.shape[0] ) # we need to permute it as othervise unique returns edges in order A = A[perm] if remove_selfloops: if verbose: print("Removing selfloops") A = np.delete(A, np.where(A[:, 0] == A[:, 1]), axis=0) E_already_generated = A.shape[0] if E_already_generated >= E: if return_node_ids == 2: return A[:E, :], np.unique(A[:E, :][:, 0]), np.unique(A[:E, :][:, 1]), mtx_shape, cs if return_node_ids == 1: return A[:E, :], np.unique(A[:E, :]), mtx_shape, cs return A[:E, :], mtx_shape, cs else: while E_already_generated < E: if verbose: print("Generating some additional edges") E_to_generate = int(heuristics * (E - E_already_generated)) A_next, mtx_shape, cs = effective_nonsquare_rmat_approximate( theta, E_to_generate, A_shape, noise_scaling=noise_scaling, batch_size=batch_size, dtype=dtype, custom_samplers=cs, verbose=verbose, ) if remove_selfloops: A_next = np.delete( A_next, np.where(A_next[:, 0] == A_next[:, 1]), axis=0 ) A = np.concatenate((A, A_next), axis=0) if generate_back_edges: A = A[np.sort(np.unique(A, return_index=True, axis=0)[1])] else: A = np.unique(A, axis=0) perm = np.random.permutation(A.shape[0]) A = A[perm] E_already_generated = A.shape[0] if return_node_ids == 2: return A[:E, :], np.unique(A[:E, :][:, 0]), np.unique(A[:E, :][:, 1]), mtx_shape, cs if return_node_ids == 1: return A[:E, :], np.unique(A[:E, :]), mtx_shape, cs return A[:E, :], mtx_shape, cs def cupy_unique_axis0(array): # https://stackoverflow.com/questions/58662085/is-there-a-cupy-version-supporting-axis-option-in-cupy-unique-function-any sortarr = array[cp.lexsort(array.T[::-1])] mask = cp.empty(array.shape[0], dtype=cp.bool_) mask[0] = True mask[1:] = cp.any(sortarr[1:] != sortarr[:-1], axis=1) return sortarr[mask] def unique_axis0(ar: NDArray) -> NDArray: """ Uniform way of calling operator.unique(ar, axis=0). axis != None is not supported in cupy yet. This function provides a workaround for one of the cases. """ operator = infer_operator(ar) if operator == cp: return cupy_unique_axis0(ar) else: return np.unique(ar, axis=0) def generate_gpu_rmat( a, b, c, d, r_scale, c_scale, n_edges, noise=0.5, is_directed=False, has_self_loop=False, return_node_ids=0, ): if not is_directed and r_scale != c_scale: raise ValueError('undirected generation works only for square adj matrix') if not is_directed: n_edges = n_edges // 2 gen_graph = None HEURISTIC = 1.2 edges_to_generate = int(HEURISTIC * n_edges) theta_len = max(r_scale, c_scale) base_theta = [a, b, c, d] if noise > 0: full_theta = [] for i in range(theta_len): noise_uniform = noise * np.random.uniform( -1, 1, size=len(base_theta) ) noise_to_add = np.multiply(base_theta, noise_uniform) theta_n = base_theta + noise_to_add theta_n = theta_n / np.sum(theta_n) full_theta.append(theta_n) else: full_theta = base_theta * theta_len theta_cpu = np.array(full_theta, dtype=np.float32) theta = cp.asarray(theta_cpu) while gen_graph is None or gen_graph.shape[0] < n_edges: tmp = cp.empty((edges_to_generate, 2), dtype=cp.int32) seed = cp.random.randint(0, high=1_000_000, size=None, dtype=int) rmat(tmp, theta, r_scale, c_scale, seed=seed) # Remove self loops if not has_self_loop: tmp = tmp[tmp[:, 0] != tmp[:, 1]] # Keep only one sided edges if not is_directed: tmp = tmp[tmp[:, 0] <= tmp[:, 1]] if gen_graph is None: # Remove duplicates gen_graph = cupy_unique_axis0(tmp) else: gen_graph = cp.concatenate((gen_graph, tmp), axis=0) # Remove duplicates gen_graph = cupy_unique_axis0(gen_graph) gen_graph = gen_graph[:n_edges] if not is_directed: gen_graph_backward = cp.empty((n_edges, 2), dtype=cp.int32) gen_graph_backward[:, 0] = gen_graph[:, 1] gen_graph_backward[:, 1] = gen_graph[:, 0] gen_graph = cp.concatenate((gen_graph, gen_graph_backward), axis=0) gen_graph = cupy_unique_axis0( gen_graph ) if not has_self_loop: gen_graph = gen_graph[gen_graph[:, 0] != gen_graph[:, 1]] if return_node_ids == 2: return cp.asnumpy(gen_graph), cp.asnumpy(cp.unique(gen_graph[:, 0])), cp.asnumpy(cp.unique(gen_graph[:, 1])) if return_node_ids == 1: return cp.asnumpy(gen_graph), cp.asnumpy(cp.unique(gen_graph)) return cp.asnumpy(gen_graph) def generate_theta(base_theta, noise, theta_len, is_directed): if noise > 0: full_theta = [] for i in range(theta_len): noise_uniform = noise * np.random.uniform( -1, 1, size=len(base_theta) ) noise_to_add = np.multiply(base_theta, noise_uniform) theta_n = base_theta + noise_to_add if not is_directed: theta_n[2] = theta_n[1] theta_n = theta_n / np.sum(theta_n) full_theta.append(theta_n) else: full_theta = [base_theta] * theta_len return full_theta def prepare_chunks(full_theta, r_scale, c_scale, gpu_bytes_to_use, edges_to_generate): if r_scale > 32 or c_scale > 32: bytes_per_edge = 8 max_id = 9223372036854775807 # int64 max else: bytes_per_edge = 4 max_id = 2147483647 # int32 max bytes_to_generate = edges_to_generate * 2 * bytes_per_edge skip_theta = 0 # approximation while (bytes_to_generate >> 2 * skip_theta) > gpu_bytes_to_use \ or (bytes_to_generate >> 2 * skip_theta) > max_id: skip_theta += 1 if skip_theta == 0: return [], np.array([edges_to_generate]), full_theta, 0, r_scale, c_scale # chunk size is limited by the smaller side of the rectangular graph while abs(r_scale - c_scale) > skip_theta: skip_theta += 1 def repeat(a, scale): if scale == 1: return a return np.repeat(np.repeat(a, scale, axis=0), scale, axis=1) def tile(a, scale): if scale == 1: return a return np.tile(a, (scale, scale)) def prepare_prefixes(skip_theta): if skip_theta > 0: prefix_theta = full_theta[:skip_theta] gen_theta_len = max(r_scale, c_scale) - skip_theta prefix_edges = np.ones((1 << skip_theta, 1 << skip_theta), dtype=np.float64) prefixes = np.zeros((2, 1 << skip_theta, 1 << skip_theta), dtype=np.int32) for theta_idx, theta in enumerate(prefix_theta): pref_src = np.array([[0, 0], [1, 1]]) << theta_idx pref_dst = np.array([[0, 1], [0, 1]]) << theta_idx theta = np.array(theta, dtype=np.float64).reshape(2, 2) repeat_scale = 1 << (skip_theta - theta_idx - 1) tile_scale = 1 << theta_idx prefix_edges = prefix_edges * tile(repeat(theta, repeat_scale), tile_scale) prefixes[0] = prefixes[0] + tile(repeat(pref_src, repeat_scale), tile_scale) prefixes[1] = prefixes[1] + tile(repeat(pref_dst, repeat_scale), tile_scale) if r_scale != c_scale: # probabilities in the rectangular matrix should sum up to 1.0 r_len = 2 ** (r_scale - gen_theta_len) c_len = 2 ** (c_scale - gen_theta_len) prefix_edges[:r_len, :c_len] = prefix_edges[:r_len, :c_len] / prefix_edges[:r_len, :c_len].sum() prefixes[int(r_scale > c_scale), :r_len, :c_len] = \ prefixes[int(r_scale > c_scale), :r_len, :c_len] >> abs(r_scale - c_scale) prefix_edges = np.ceil(prefix_edges * edges_to_generate).astype(np.int32).reshape(-1) prefixes = prefixes.reshape(2, -1) else: prefixes = [] prefix_edges = np.array([edges_to_generate]) return prefixes, prefix_edges prefixes, prefix_edges = prepare_prefixes(skip_theta) while prefix_edges.max() * 2 * bytes_per_edge > gpu_bytes_to_use: skip_theta += 1 prefixes, prefix_edges = prepare_prefixes(skip_theta) generation_theta = full_theta[skip_theta:] return prefixes, prefix_edges, generation_theta, skip_theta, len(generation_theta), len(generation_theta) def _generate_gpu_chunk_rmat( chunk_info, prefixes, prefix_edges, has_self_loop, is_directed, generation_theta, r_log2_nodes, c_log2_nodes, r_pref_len, c_pref_len, row_len, gpus, dtype='int32', return_node_ids=0, memmap_kwargs: Optional = None, chunk_save_path_format: Optional[str] = None): chunk_id, chunk_end = chunk_info chunk_size = prefix_edges[chunk_id] if gpus > 1: gpu_id = int(multiprocessing.current_process()._identity[0]) % gpus else: gpu_id = 0 theta_cpu = np.array(generation_theta, dtype=np.float32) edge_list = None is_diagonal_chunk = ((chunk_id // row_len) == (chunk_id % row_len)) use_memmap = memmap_kwargs is not None if use_memmap: memmap_outfile = np.load(file=memmap_kwargs['filename'], mmap_mode='r+') with cp.cuda.Device(gpu_id): theta = cp.asarray(theta_cpu) while edge_list is None or edge_list.shape[0] < prefix_edges[chunk_id]: tmp = cp.empty((prefix_edges[chunk_id], 2), dtype=dtype) seed = cp.random.randint(0, high=1_000_000, size=None, dtype=int) rmat(tmp, theta, r_log2_nodes, c_log2_nodes, seed=seed) if not has_self_loop and is_diagonal_chunk: tmp = tmp[tmp[:, 0] != tmp[:, 1]] # undirected diagonal_case if not is_directed and is_diagonal_chunk: tmp = tmp[tmp[:, 0] <= tmp[:, 1]] tmp = cupy_unique_axis0(tmp) if edge_list is None: edge_list = tmp else: edge_list = cp.concatenate((edge_list, tmp), axis=0) del tmp edge_list = cupy_unique_axis0(edge_list) if len(prefix_edges) > 1: edge_list[:, 0] = (edge_list[:, 0] << r_pref_len) + prefixes[0][chunk_id] edge_list[:, 1] = (edge_list[:, 1] << c_pref_len) + prefixes[1][chunk_id] edge_list = edge_list[:prefix_edges[chunk_id]] if return_node_ids == 2: src_nodes_ids = cp.asnumpy(cp.unique(edge_list[:, 0])) dst_nodes_ids = cp.asnumpy(cp.unique(edge_list[:, 1])) if return_node_ids == 1: nodes_ids = cp.asnumpy(cp.unique(edge_list)) result = cp.asnumpy(edge_list) if use_memmap: memmap_outfile[chunk_end-chunk_size:chunk_end] = result del edge_list if chunk_save_path_format is not None: dump_generated_graph(chunk_save_path_format.format(chunk_id=chunk_id), result) result = len(result) if use_memmap: result = None if return_node_ids == 2: return result, src_nodes_ids, dst_nodes_ids if return_node_ids == 1: return result, nodes_ids return result def generate_gpu_chunked_rmat( a, b, c, d, r_scale, c_scale, n_edges, noise=0.5, is_directed=False, has_self_loop=False, gpus=None, return_node_ids=0, save_path: Optional[str] = None, verbose: bool = False, ): if not is_directed and r_scale != c_scale: raise ValueError('undirected generation works only for square adj matrix') base_theta = [a, b, c, d] theta_len = max(r_scale, c_scale) full_theta = generate_theta(base_theta, noise, theta_len, is_directed) if gpus is None: gpus = MemoryManager().get_available_gpus() gpu_bytes_to_use = MemoryManager().get_min_available_across_gpus_memory(gpus=gpus) gpu_bytes_to_use = math.floor(gpu_bytes_to_use * 0.10) prefixes, prefix_edges, generation_theta, prefix_len, r_log2_nodes, c_log2_nodes = \ prepare_chunks(full_theta, r_scale, c_scale, gpu_bytes_to_use, n_edges) chunk_ids = list(range(len(prefix_edges))) row_len = 1 << prefix_len r_pref_len = r_scale - len(generation_theta) c_pref_len = c_scale - len(generation_theta) if not is_directed: # generate a triangular adj matrix chunk_ids = [i for i in chunk_ids if (i // row_len) <= (i % row_len)] # reduce the diagonal chunks for i in range(prefix_len * 2): prefix_edges[i * row_len + i] = prefix_edges[i * row_len + i] // 2 if r_scale != c_scale: chunk_ids = [i for i in chunk_ids if (i // row_len) < 2 ** r_pref_len and (i % row_len) < 2 ** c_pref_len] is_single_chunk = len(chunk_ids) == 1 memmap_kwargs = None chunk_save_path_format = None use_memmap = False if save_path and os.path.isdir(save_path): chunk_save_path_format = os.path.join(save_path, 'chunk_{chunk_id}.npy') elif save_path and save_path.endswith('.npy'): use_memmap = True memmap_shape = (sum(prefix_edges[chunk_ids]), 2) memmap_dtype = np.uint64 if theta_len > 32 else np.uint32 memmap_kwargs = dict( filename=save_path, ) memmap_outfile = np.lib.format.open_memmap(save_path, dtype=memmap_dtype, shape=memmap_shape, mode='w+') dtype = cp.int64 if theta_len > 32 else cp.int32 _generate_gpu_chunk_rmat_p = partial( _generate_gpu_chunk_rmat, prefixes=prefixes, prefix_edges=prefix_edges, has_self_loop=has_self_loop, is_directed=is_directed, generation_theta=generation_theta, r_log2_nodes=r_log2_nodes, c_log2_nodes=c_log2_nodes, r_pref_len=r_pref_len, c_pref_len=c_pref_len, row_len=row_len, dtype=dtype, return_node_ids=return_node_ids, chunk_save_path_format=chunk_save_path_format, memmap_kwargs=memmap_kwargs, gpus=1 if is_single_chunk else gpus, ) if is_single_chunk: chunk_res = _generate_gpu_chunk_rmat_p((chunk_ids[0], prefix_edges[chunk_ids[0]])) if return_node_ids == 2: result, src_node_ids, dst_node_ids = chunk_res elif return_node_ids == 1: result, node_ids = chunk_res else: result = chunk_res if use_memmap: result = memmap_outfile else: multiprocessing.set_start_method('spawn', force=True) sub_res_lists = [] if return_node_ids == 2: src_node_ids_presence = np.full(2**r_scale, False) dst_node_ids_presence = np.full(2**c_scale, False) elif return_node_ids == 1: node_ids_presence = np.full(2**theta_len, False) with multiprocessing.Pool(processes=gpus) as pool: chunk_res = pool.imap_unordered(_generate_gpu_chunk_rmat_p, zip(chunk_ids, np.cumsum(prefix_edges[chunk_ids])), chunksize=(len(chunk_ids)+gpus-1) // gpus ) if verbose: chunk_res = tqdm(chunk_res, total=len(chunk_ids)) if return_node_ids == 2: for res, src_n_ids, dst_n_ids in chunk_res: sub_res_lists.append(res) src_node_ids_presence[src_n_ids] = True dst_node_ids_presence[dst_n_ids] = True elif return_node_ids == 1: for res, n_ids in chunk_res: sub_res_lists.append(res) node_ids_presence[n_ids] = True else: sub_res_lists = list(chunk_res) if use_memmap: result = memmap_outfile elif chunk_save_path_format is None: result = np.concatenate(sub_res_lists) else: result = int(np.sum(sub_res_lists)) if return_node_ids == 2: src_node_ids, = np.where(src_node_ids_presence) dst_node_ids, = np.where(dst_node_ids_presence) elif return_node_ids == 1: node_ids, = np.where(node_ids_presence) if return_node_ids == 2: return result, src_node_ids, dst_node_ids if return_node_ids == 1: return result, node_ids return result def get_degree_distribution(vertices, gpu=False, operator=None): operator = operator or (cp if gpu else np) _, degrees = operator.unique(vertices, return_counts=True) degree_values, degree_counts = operator.unique(degrees, return_counts=True) return degree_values, degree_counts class BaseLogger: """ Base logger class Args: logdir (str): path to the logging directory """ def __init__(self, logdir: str = "tmp"): self.logdir = logdir os.makedirs(self.logdir, exist_ok=True) currentDateAndTime = datetime.now() self.logname = ( f'{currentDateAndTime.strftime("%Y_%m_%d_%H_%M_%S")}.txt' ) self.logpath = os.path.join(self.logdir, self.logname) self.setup_logger() self.log("Initialized logger") def setup_logger(self): """ This function setups logger """ logging.basicConfig( filename=self.logpath, filemode="a", format="%(asctime)s| %(message)s", datefmt="%Y/%m/%d %H:%M:%S", level=logging.DEBUG, ) def log(self, msg: str): """ This function logs messages in debug mode Args: msg (str): message to be printed """ logging.debug(msg) def _reshuffle(X: NDArray, mask: NDArray, max_node_id: int) -> None: """ Shuffles dst nodes of edges specified by idx. Preserves degree distribution and keeps edge list sorted. """ operator = infer_operator(X) if not operator.any(mask): return target = X[mask, 1] operator.random.shuffle(target) X[mask, 1] = target src_node_mask = operator.zeros(max_node_id + 1, dtype=operator.bool_) src_node_mask[X[mask, 0]] = True to_sort_mask = operator.zeros(X.shape[0], dtype=operator.bool_) to_sort_mask = src_node_mask[X[:, 0]] to_sort = X[to_sort_mask] to_sort = to_sort[operator.lexsort(to_sort.T[::-1])] X[to_sort_mask] = to_sort def _find_correct_edges( X: NDArray, self_loops: bool = False, assume_sorted: bool = False, ) -> Tuple[NDArray, NDArray]: """ Finds duplicates and self loops in an edge list. """ operator = infer_operator(X) if not assume_sorted: X = X[operator.lexsort(X.T[::-1])] mask = operator.empty(X.shape[0], dtype=operator.bool_) mask[0] = True mask[1:] = operator.any(X[1:] != X[:-1], axis=1) if not self_loops: mask &= X[:, 0] != X[:, 1] return X, mask def postprocess_edge_list(X: NDArray, n_reshuffle: int = 0, self_loops: bool = False) -> NDArray: """ Removes multi-edges and (optionally) self-loops. If n_reshuffle > 0 is specified, edges are shuffled between nodes so that the degree distribution is preserved and less edges will be removed. Assumes node set is reindexed from min_id > 0 to max_id ~ N. """ max_node_id = X.max().item() X, mask = _find_correct_edges(X, self_loops=self_loops) for _ in range(n_reshuffle): _reshuffle(X, ~mask, max_node_id) X, mask = _find_correct_edges(X, self_loops=self_loops, assume_sorted=True) return X[mask]
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/graph/utils.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import json import pickle from abc import ABC from typing import List, Optional, Set, Tuple import numpy as np from syngen.generator.graph.utils import BaseLogger from syngen.generator.graph.seeder import BaseSeeder class BaseGenerator(abc.ABC): """ BaseGenerator class """ JSON_ASSERTION = "Expected file to be json" @classmethod def get_generators(cls, include_parents=True): """ Recursively find subclasses Args: include_parents (bool): whether to include parents to other classes. (default: `True`) Returns: generators: dictionary with all the subclasses """ generators = dict() for child in cls.__subclasses__(): children = child.get_generators(include_parents) generators.update(children) if include_parents or not children: if abc.ABC not in child.__bases__ and BaseGenerator not in child.__bases__: generators[child.__name__] = child return generators def save(self, path): raise NotImplementedError() @classmethod def load(cls, path): raise NotImplementedError() class BaseGraphGenerator(BaseGenerator, ABC): """ Base class for all graph generators Args: *args: optional positional args **kwargs: optional key-word args """ def __init__( self, seed: Optional[int] = None, logdir: str = "./logs", gpu: bool = True, verbose: bool = False, *args, **kwargs, ): self._fit_results = None self.seeder = BaseSeeder(seed) self.seeder.reseed() self.logger = BaseLogger(logdir) self.logger.log(f"Using seed: {self.seeder.seed}") self.gpu = gpu self.verbose = verbose def fit( self, graph: List[Tuple[int, int]], is_directed: bool, *args, **kwargs ): """ Fits generator on the graph Args: graph (List[Tuple[int, int]]): graph to be fitted on is_directed (bool): flag indicating whether the graph is directed *args: optional positional args **kwargs: optional key-word args """ raise NotImplementedError() def generate( self, num_nodes: int, num_edges: int, is_directed: bool, *args, return_node_ids: bool = False, **kwargs, ) -> np.ndarray: """ Generates graph with approximately `num_nodes` and exactly `num_edges` from generator Args: num_nodes (int): approximate number of nodes to be generated num_edges (int): exact number of edges to be generated is_directed (bool): flag indicating whether the generated graph has to be directed return_node_ids (bool): flag indicating whether the generator has to return nodes_ids as the second output *args: optional positional args **kwargs: optional key-word args """ raise NotImplementedError() def set_fit_results(self, fit_results): self._fit_results = fit_results def get_fit_results(self): return self._fit_results def save_fit_results(self, save_path: str = "./fit_results.json"): """ Store fitted results into json file Args: save_path (str): path to the json file with the fitted result """ assert ( self._fit_results ), "There are no fit results to be saved, \ call fit method first or load the results from the file" assert save_path.endswith(".json"), self.JSON_ASSERTION with open(save_path, "w") as fjson: json.dump(self._fit_results, fjson) def load_fit_results(self, load_path: str = "./fit_results.json"): """load fitted results from json file Args: load_path (str): path to the json file with the fitted result """ assert load_path.endswith(".json"), self.JSON_ASSERTION with open(load_path, "r") as fjson: self._fit_results = json.load(fjson) def save(self, path): with open(path, 'wb') as file_handler: pickle.dump(self, file_handler, protocol=pickle.HIGHEST_PROTOCOL) @classmethod def load(cls, path): with open(path, 'rb') as file_handler: model = pickle.load(file_handler) return model @staticmethod def add_args(parser): return parser class BaseBipartiteGraphGenerator(BaseGenerator, ABC): """ Base class for all bipartite graph generators Args: *args: optional positional args **kwargs: optional key-word args """ def __init__( self, seed: Optional[int] = None, logdir: str = "./logs", gpu: bool = True, verbose: bool = False, *args, **kwargs, ): self._fit_src_dst_results = None self._fit_dst_src_results = None self.seeder = BaseSeeder(seed) self.seeder.reseed() self.logger = BaseLogger(logdir) self.logger.log(f"Using seed: {self.seeder.seed}") self.gpu = gpu self.verbose = verbose def fit( self, graph: List[Tuple[int, int]], src_set: Set[int], dst_set: Set[int], is_directed: bool, transform_graph: bool, *args, **kwargs, ): """ Fits generator on the graph Args: graph (List[Tuple[int, int]]): graph to be fitted on src_set (Set[int]): set of source nodes dst_set (Set[int]): set of destination nodes is_directed (bool): flag indicating whether the graph is directed *args: optional positional args **kwargs: optional key-word args """ raise NotImplementedError() def generate( self, num_nodes_src_set: int, num_nodes_dst_set: int, num_edges_src_dst: int, num_edges_dst_src: int, is_directed: bool, return_node_ids: bool = False, transform_graph: bool = True, *args, **kwargs, ): """ Generates graph with approximately `num_nodes_src_set`/`num_nodes_dst_set` nodes and exactly `num_edges_src_dst`/`num_edges_dst_src` edges from generator Args: num_nodes_src_set (int): approximate number of source nodes to be generated num_nodes_dst_set (int): approximate number of destination nodes to be generated num_edges_src_dst (int): exact number of source->destination edges to be generated num_edges_dst_src (int): exact number of destination->source to be generated is_directed (bool) flag indicating whether the generated graph has to be directed return_node_ids (bool): flag indicating whether the generator has to return nodes_ids as the second output *args: optional positional args **kwargs: optional key-word args """ raise NotImplementedError() def set_fit_results(self, fit_results): self._fit_src_dst_results, self._fit_dst_src_results = fit_results def get_fit_results(self): return self._fit_src_dst_results, self._fit_dst_src_results def save_fit_results(self, save_path: str = "./fit_results.json"): """ Stores fitted results into json file Args: save_path (str): path to the json file with the fitted result """ assert ( self._fit_src_dst_results or self._fit_dst_src_results ), "There are no fit results to be saved, \ call fit method first or load the results from the file" assert save_path.endswith(".json"), self.JSON_ASSERTION wrapped_results = { "fit_src_dst_results": self._fit_src_dst_results, "fit_dst_src_results": self._fit_dst_src_results, } with open(save_path, "w") as fjson: json.dump(wrapped_results, fjson) def load_fit_results(self, load_path: str = "./fit_results.json"): """ Loads fitted results from json file Args: load_path (str): path to the json file with the fitted result """ assert load_path.endswith(".json"), self.JSON_ASSERTION with open(load_path, "r") as fjson: wrapped_results = json.load(fjson) assert ( "fit_src_dst_results" in wrapped_results and "fit_dst_src_results" in wrapped_results ), "Required keys fit_src_dst_results and fit_dst_src_results keys in the json not found" self._fit_src_dst_results = wrapped_results["fit_src_dst_results"] self._fit_dst_src_results = wrapped_results["fit_dst_src_results"] def save(self, path): with open(path, 'wb') as file_handler: pickle.dump(self, file_handler, protocol=pickle.HIGHEST_PROTOCOL) @classmethod def load(cls, path): with open(path, 'rb') as file_handler: model = pickle.load(file_handler) return model @staticmethod def add_args(parser): return parser
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/graph/base_graph_generator.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import List, Optional, Set, Tuple import numpy as np from syngen.generator.graph.fitter import RMATFitter from syngen.generator.graph.base_graph_generator import BaseGraphGenerator from syngen.generator.graph.utils import ( effective_nonsquare_rmat_exact, generate_gpu_rmat, generate_gpu_chunked_rmat, ) class RMATGenerator(BaseGraphGenerator): """ Graph generator based on RMAT that generate non-partite graphs Args: seed (int): Seed to reproduce the results. If None then random seed will be used. logdir (str): Directory to store the logging results. fitter (RMATFitter): RMATFitter to be used. """ def __init__( self, seed: Optional[int] = None, logdir: str = "./logs", gpu: bool = True, fitter: Optional[RMATFitter] = None, **kwargs, ): super().__init__(seed, logdir, gpu) self.fitter = fitter or RMATFitter() def fit(self, *args, **kwargs): """ Fits generator on the graph Args: """ self._fit_results = self.fitter.fit(*args, **kwargs) self.logger.log(f"Fit results: {self._fit_results}") def _generate_part( self, fit_results: Tuple[float, float, float, float], part_shape: Tuple[int, int], num_edges: int, has_self_loop: bool, is_directed: bool, noise: float, batch_size: int, return_node_ids: bool, save_path: Optional[str], ): if self.gpu: return self._generate_part_gpu( fit_results=fit_results, part_shape=part_shape, num_edges=num_edges, has_self_loop=has_self_loop, is_directed=is_directed, noise=noise, return_node_ids=return_node_ids, save_path=save_path, ) else: return self._generate_part_cpu( fit_results=fit_results, part_shape=part_shape, num_edges=num_edges, has_self_loop=has_self_loop, is_directed=is_directed, noise=noise, batch_size=batch_size, return_node_ids=return_node_ids, ) def _generate_part_cpu( self, fit_results: Tuple[float, float, float, float], part_shape: Tuple[int, int], num_edges: int, has_self_loop: bool, is_directed: bool, noise: float, batch_size: int, return_node_ids: bool, ): a, b, c, d = fit_results theta = np.array([[a, b], [c, d]]) theta /= a + b + c + d res = effective_nonsquare_rmat_exact( theta, num_edges, part_shape, noise_scaling=noise, batch_size=batch_size, dtype=np.int64, custom_samplers=None, generate_back_edges=not is_directed, remove_selfloops=not has_self_loop, return_node_ids=return_node_ids, verbose=self.verbose, ) if return_node_ids: return res[0], res[1] return res[0] def _generate_part_gpu( self, fit_results: Tuple[float, float, float, float], part_shape: Tuple[int, int], num_edges: int, has_self_loop: bool, is_directed: bool, noise: float, return_node_ids: bool, save_path: Optional[str], _chunked: bool = True, ): a, b, c, d = fit_results theta = np.array([a, b, c, d]) theta /= a + b + c + d a, b, c, d = theta r_scale, c_scale = part_shape if _chunked: res = generate_gpu_chunked_rmat( a, b, c, d, r_scale=r_scale, c_scale=c_scale, n_edges=num_edges, noise=noise, is_directed=is_directed, has_self_loop=has_self_loop, return_node_ids=1 if return_node_ids else 0, save_path=save_path, verbose=self.verbose, ) else: res = generate_gpu_rmat( a, b, c, d, r_scale=r_scale, c_scale=c_scale, n_edges=num_edges, noise=noise, is_directed=is_directed, has_self_loop=has_self_loop, return_node_ids=1 if return_node_ids else 0, ) if return_node_ids: return res[0], res[1] return res def generate( self, num_nodes: int, num_edges: int, is_directed: bool, has_self_loop: bool, noise: float = 0.5, batch_size: int = 1_000_000, return_node_ids: bool = False, save_path: Optional[str] = None, *args, **kwargs, ): """ Generates graph with approximately `num_nodes` nodes and exactly `num_edges` edges from generator Args: num_nodes (int): approximate number of nodes to be generated num_edges(int): exact number of edges to be generated is_directed (bool): flag indicating whether the generated graph has to be directed has_self_loop (bool): flag indicating whether to generate self loops noise (float): noise for RMAT generation to get better degree distribution batch_size (int): size of the edge chunk that will be generated in one generation step (cpu parameter) return_node_ids (bool): flag indicating whether the generator has to return nodes_ids as the second output save_path (bool): path to store the graph. if specified the method return the number of edges in the graph Returns: new_graph (np.array[int, int]): generated graph """ assert num_nodes > 0, "Wrong number of nodes" assert num_edges > 0, "Wrong number of edges" max_edges = ( num_nodes * num_nodes if has_self_loop else num_nodes * (num_nodes - 1) ) if is_directed: max_edges = max_edges / 2 assert ( num_edges < max_edges ), "Configuration of nodes and edges cannot form any graph" assert ( self._fit_results ), "There are no fit results, call fit method first or load the seeding matrix from the file" log2_nodes = math.ceil(math.log2(num_nodes)) part_shape = (log2_nodes, log2_nodes) res = self._generate_part( fit_results=self._fit_results, part_shape=part_shape, num_edges=num_edges, has_self_loop=has_self_loop, is_directed=is_directed, noise=noise, batch_size=batch_size, return_node_ids=return_node_ids, save_path=save_path, ) if return_node_ids: return res[0], res[1] return res
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/graph/rmat.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import Optional import numpy as np from scipy.optimize import minimize from syngen.utils.types import NDArray from syngen.generator.graph.utils import get_degree_distribution, move_ndarray_to_host from syngen.utils.utils import infer_operator MAXK = 1000 class RMATFitter(object): def __init__(self, fast=True, random=False): self._loglik = self._fast_loglik if fast else self._original_loglik self.random = random def _get_p_directed_graph(self, dd, verbose=False): num_nodes = dd[:, 1].sum() n_exp2 = int(np.ceil(np.log2(num_nodes))) E = (dd[:, 0] * dd[:, 1]).sum() mx = min(dd[-1, 0], MAXK) logeck = np.zeros(shape=(mx + 1), dtype=np.float64) tmp = 0 for k in range(1, mx + 1): logeck[k] = tmp + np.log(E - k + 1) - np.log(k) tmp = logeck[k] lognci = np.zeros(shape=(n_exp2 + 1), dtype=np.float64) tmp = 0 for i in range(1, n_exp2 + 1): lognci[i] = tmp + np.log(n_exp2 - i + 1) - np.log(i) tmp = lognci[i] x0 = np.array([0.5], dtype=np.float64) self.optimization_steps = [] fun = lambda x: self._loglik(x, E, n_exp2, dd, logeck, lognci, MAXK) res = minimize( fun, x0, method="Nelder-Mead", bounds=[(1e-4, 1.0 - 1e-4)], options={"disp": verbose, "fatol": 1e-4}, ) return res.x[0] def _original_loglik(self, p, E, n_exp, count, logeck, lognci, k_cost_threeshold): if p <= 0.0 or p >= 1.0: return 1e100 q = p a = 0.75 * p b = p - a c = q - a if (a + b + c) >= 1.0: return 1e100 Sx = 0.0 Sx2 = 0.0 Sx3 = 0.0 Sx4 = 0.0 Sy = 0.0 Sxy = 0.0 Sx2y = 0.0 numX = count[-1, 0] totObs = 0.0 prevY = 0.0 for m in range(1, numX + 1): x = np.log(m) if m <= MAXK: current_sum = np.exp( logeck[m] + np.log(p) * (n_exp * m) + np.log(1 - p ** n_exp) * (E - m) ) for i in range(1, n_exp + 1): current_sum = current_sum + np.exp( logeck[m] + lognci[i] + np.log(p) * (m * (n_exp - i)) + np.log(1.0 - p) * (m * i) + np.log(1.0 - p ** (n_exp - i) * (1.0 - p) ** i) * (E - m) ) else: logecm = ( E * np.log(E) - m * np.log(m) - (E - m) * np.log(E - m) ) current_sum = np.exp( logecm + np.log(p) * (n_exp * m) + np.log(1 - p ** n_exp) * (E - m) ) for i in range(1, n_exp + 1): current_sum = current_sum + np.exp( logecm + lognci[i] + np.log(p) * (m * (n_exp - i)) + np.log(1.0 - p) * (m * i) + np.log(1.0 - p ** (n_exp - i) * (1.0 - p) ** i) * (E - m) ) y = np.log(current_sum) y = max(0, y) interpY = y while interpY > 0 and (m == 1 or x > np.log(m - 1)): Sx = Sx + x Sx2 = Sx2 + x * x Sx3 = Sx3 + x * x * x Sx4 = Sx4 + x * x * x * x Sy = Sy + interpY Sxy = Sxy + x * interpY Sx2y = Sx2y + x * x * interpY x = x - (np.log(numX) - np.log(numX - 1)) if prevY <= 0: interpY = 0 else: interpY = interpY - (interpY - prevY) / ( np.log(m) - np.log(m - 1) ) * (np.log(numX) - np.log(numX - 1)) totObs = totObs + 1 prevY = y res = np.linalg.inv( np.array([[totObs, Sx, Sx2], [Sx, Sx2, Sx3], [Sx2, Sx3, Sx4]]) ) @ np.array([Sy, Sxy, Sx2y]) ParabolaA = res[0] ParabolaB = res[1] ParabolaC = res[2] l = np.array([0.0], dtype=np.float64) for m in range(1, len(count) + 1): k = np.log(count[m - 1, 1]) expectedLogY = ( ParabolaA + ParabolaB * np.log(count[m - 1, 0]) + ParabolaC * np.log(count[m - 1, 0]) * np.log(count[m - 1, 0]) ) l = l + (k - expectedLogY) * (k - expectedLogY) self.optimization_steps.append((p[0], l[0])) return l def _fast_loglik(self, p, E, n_exp, count, logeck, lognci, k_cost_threeshold): if p <= 0.0 or p >= 1.0: return 1e100 q = p a = 0.75 * p b = p - a c = q - a if (a + b + c) >= 1.0: return 1e100 l = np.array([0.0], dtype=np.float64) for j in range(len(count)): m = count[j, 0] ck = np.log(count[j, 1]) if ck > np.log(k_cost_threeshold): if m <= MAXK: current_sum = np.exp( logeck[m] + np.log(p) * (n_exp * m) + np.log(1 - p ** n_exp) * (E - m) ) for i in range(1, n_exp + 1): current_sum = current_sum + np.exp( logeck[m] + lognci[i] + np.log(p) * (m * (n_exp - i)) + np.log(1 - p) * (m * i) + np.log(1 - p ** (n_exp - i) * (1 - p) ** i) * (E - m) ) else: logecm = ( E * np.log(E) - m * np.log(m) - (E - m) * np.log(E - m) ) current_sum = np.exp( logecm + np.log(p) * (n_exp * m) + np.log(1 - p ** n_exp) * (E - m) ) for i in range(1, n_exp + 1): current_sum = current_sum + np.exp( logecm + lognci[i] + np.log(p) * (m * (n_exp - i)) + np.log(1 - p) * (m * i) + np.log(1 - p ** (n_exp - i) * (1 - p) ** i) * (E - m) ) y = np.log(current_sum) y = max(0, y) l = l + (np.exp(ck) - np.exp(y)) * (np.exp(ck) - np.exp(y)) self.optimization_steps.append((p[0], l[0])) return l def _check_optimization_history(self): optimization_steps = np.array(self.optimization_steps) function_values = np.unique(optimization_steps[:, 1]) if len(function_values) <= 1: warnings.warn( "the optimization function is constant for the RMATFitter(fast=True). " "Please, use RMATFitter(fast=False) instead." ) self.optimization_steps = [] def fit(self, graph: Optional[NDArray] = None, is_directed=True, ): if self.random: return 0.25, 0.25, 0.25, 0.25 operator = infer_operator(graph) degree_values, degree_counts = get_degree_distribution(graph[:, 0], operator=operator) out_dd = operator.stack([degree_values, degree_counts], axis=1) out_dd = move_ndarray_to_host(out_dd) if is_directed: degree_values, degree_counts = get_degree_distribution(graph[:, 1], operator=operator) in_dd = operator.stack([degree_values, degree_counts], axis=1) in_dd = move_ndarray_to_host(in_dd) p = self._get_p_directed_graph(out_dd) self._check_optimization_history() if is_directed: q = self._get_p_directed_graph(in_dd) self._check_optimization_history() else: q = p a = 0.75 * (p + q) / 2 b = p - a c = q - a assert (a + b + c) < 1.0, "Cannot get correct RMat fit!" d = 1.0 - (a + b + c) return a, b, c, d
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/graph/fitter.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/preprocessing/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from abc import ABC, abstractmethod from typing import Optional from syngen.utils.types import MetaData from syngen.configuration import SynGenDatasetFeatureSpec logger = logging.getLogger(__name__) log = logger class BasePreprocessing(ABC): """Base class for all preprocessing transforms. Args: source_path: path to the raw dataset destination_path: path to store the dataset in SynGen format download: tries automatically download the dataset if True """ def __init__( self, source_path: str, destination_path: Optional[str] = None, download: bool = False, **kwargs, ): self.source_path = source_path self.destination_path = destination_path or os.path.join(source_path, 'syngen_preprocessed') if download: self.download() assert self._check_files() def _prepare_feature_list(self, tabular_data, cat_columns, cont_columns): feature_list = [ { MetaData.NAME: feat_name, MetaData.DTYPE: str(tabular_data[feat_name].dtype), MetaData.FEATURE_TYPE: MetaData.CONTINUOUS, } for feat_name in cont_columns ] feature_list.extend([ { MetaData.NAME: feat_name, MetaData.DTYPE: str(tabular_data[feat_name].dtype), MetaData.FEATURE_TYPE: MetaData.CATEGORICAL, } for feat_name in cat_columns ]) return feature_list @abstractmethod def transform(self, gpu=False, use_cache=False) -> SynGenDatasetFeatureSpec: raise NotImplementedError() @abstractmethod def download(self): raise NotImplementedError() @abstractmethod def _check_files(self) -> bool: raise NotImplementedError() @classmethod def add_cli_args(cls, parser): return parser
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/preprocessing/base_preprocessing.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil from typing import Optional import cudf import cupy as cp import numpy as np import pandas as pd from ogb.nodeproppred import NodePropPredDataset from syngen.configuration import SynGenDatasetFeatureSpec from syngen.preprocessing.base_preprocessing import BasePreprocessing from syngen.utils.io_utils import dump_dataframe from syngen.utils.types import MetaData class OGBN_MAG_Preprocessing(BasePreprocessing): """ The OGBN_MAG_Preprocessing class includes the transformation operation for a subset of the Microsoft Academic Graph (MAG). It's a heterogeneous network that contains four types of entities—papers (736,389 nodes), authors (1,134,649 nodes), institutions (8,740 nodes), and fields of study (59,965 nodes)—as well as four types of directed relations connecting two types of entities—an author is “affiliated with” an institution, an author “writes” a paper, a paper “cites” a paper, and a paper “has a topic of” a field of study. For more information, please check https://ogb.stanford.edu/docs/nodeprop/ """ def __init__( self, source_path: str, destination_path: Optional[str] = None, download: bool = False, **kwargs, ): super().__init__(source_path, destination_path, download, **kwargs) def download(self): NodePropPredDataset(name="ogbn-mag", root=self.source_path) def _check_files(self) -> bool: return True def transform(self, gpu=False, use_cache=False): tabular_operator = cudf if gpu else pd operator = cp if gpu else np if use_cache and os.path.exists(self.destination_path): return SynGenDatasetFeatureSpec.instantiate_from_preprocessed(self.destination_path) shutil.rmtree(self.destination_path, ignore_errors=True) os.makedirs(self.destination_path) dataset = NodePropPredDataset(name="ogbn-mag", root=self.source_path)[0] data = dataset[0] labels = dataset[1]["paper"] graph_metadata = { MetaData.NODES: [], MetaData.EDGES: [], } connections = {} for e, edges in data["edge_index_dict"].items(): structural_data = pd.DataFrame(edges.T, columns=[MetaData.SRC, MetaData.DST]) connections[e[1]] = tabular_operator.DataFrame({ "src_id": edges[0, :], "dst_id": edges[1, :], }) edata = data["edge_reltype"][e] edge_type = { MetaData.NAME: e[1], MetaData.COUNT: len(structural_data), MetaData.SRC_NODE_TYPE: e[0], MetaData.DST_NODE_TYPE: e[2], MetaData.DIRECTED: False, MetaData.FEATURES: [{ MetaData.NAME: 'feat', MetaData.DTYPE: str(edata.dtype), MetaData.FEATURE_TYPE: MetaData.CATEGORICAL, }], MetaData.FEATURES_PATH: f"{e[1]}_features.parquet", MetaData.STRUCTURE_PATH: f"{e[1]}_list.parquet", } dump_dataframe(tabular_operator.DataFrame(edata, columns=['feat']), os.path.join(self.destination_path, edge_type[MetaData.FEATURES_PATH])) dump_dataframe(structural_data, os.path.join(self.destination_path, edge_type[MetaData.STRUCTURE_PATH])) graph_metadata[MetaData.EDGES].append(edge_type) # paper node type continuous_column_names = ["feat_" + str(i) for i in range(data["node_feat_dict"]["paper"].shape[1])] paper_features_dataframe = tabular_operator.DataFrame( data["node_feat_dict"]["paper"], columns=continuous_column_names, ).astype("float32") paper_features_dataframe["year"] = tabular_operator.DataFrame(data["node_year"]["paper"]).astype("int32") paper_features_dataframe["venue"] = tabular_operator.DataFrame(labels).astype("int32") paper_node_type = { MetaData.NAME: "paper", MetaData.COUNT: data["num_nodes_dict"]['paper'], MetaData.FEATURES: [ { MetaData.NAME: name, MetaData.DTYPE: str(dtype), MetaData.FEATURE_TYPE: MetaData.CATEGORICAL if str(dtype).startswith('int') else MetaData.CONTINUOUS, } for name, dtype in paper_features_dataframe.dtypes.items() ], MetaData.FEATURES_PATH: "paper.parquet", } dump_dataframe(paper_features_dataframe, os.path.join(self.destination_path, paper_node_type[MetaData.FEATURES_PATH])) graph_metadata[MetaData.NODES].append(paper_node_type) # author node type paper_features_dataframe["paper_id"] = operator.arange(paper_features_dataframe.shape[0]) author_feat = connections["writes"].merge( paper_features_dataframe, left_on="dst_id", right_on="paper_id", how="left" ).groupby("src_id", sort=True).mean() author_features_dataframe = author_feat[continuous_column_names] author_node_type = { MetaData.NAME: "author", MetaData.COUNT: data["num_nodes_dict"]['author'], MetaData.FEATURES: [ { MetaData.NAME: name, MetaData.DTYPE: str(dtype), MetaData.FEATURE_TYPE: MetaData.CONTINUOUS, } for name, dtype in author_features_dataframe.dtypes.items() ], MetaData.FEATURES_PATH: "author.parquet", } dump_dataframe(author_features_dataframe, os.path.join(self.destination_path, author_node_type[MetaData.FEATURES_PATH])) graph_metadata[MetaData.NODES].append(author_node_type) # institution node type author_features_dataframe["author_id"] = operator.arange(author_features_dataframe.shape[0]) institution_feat = connections["affiliated_with"].merge( author_features_dataframe, left_on="src_id", right_on="author_id" ).groupby("dst_id", sort=True).mean() institution_dataframe = institution_feat[continuous_column_names] institution_node_type = { MetaData.NAME: "institution", MetaData.COUNT: data["num_nodes_dict"]['institution'], MetaData.FEATURES: [ { MetaData.NAME: name, MetaData.DTYPE: str(dtype), MetaData.FEATURE_TYPE: MetaData.CONTINUOUS, } for name, dtype in institution_dataframe.dtypes.items() ], MetaData.FEATURES_PATH: "institution.parquet", } dump_dataframe(institution_dataframe, os.path.join(self.destination_path, institution_node_type[MetaData.FEATURES_PATH])) graph_metadata[MetaData.NODES].append(institution_node_type) # field_of_study node type field_of_study_feat = connections["has_topic"].merge( paper_features_dataframe, left_on="src_id", right_on="paper_id" ).groupby("dst_id", sort=True).mean() field_of_study_dataframe = field_of_study_feat[continuous_column_names] field_of_study_node_type = { MetaData.NAME: "field_of_study", MetaData.COUNT: data["num_nodes_dict"]['field_of_study'], MetaData.FEATURES: [ { MetaData.NAME: name, MetaData.DTYPE: str(dtype), MetaData.FEATURE_TYPE: MetaData.CONTINUOUS, } for name, dtype in field_of_study_dataframe.dtypes.items() ], MetaData.FEATURES_PATH: "field_of_study.parquet", } dump_dataframe(field_of_study_dataframe, os.path.join(self.destination_path, field_of_study_node_type[MetaData.FEATURES_PATH])) graph_metadata[MetaData.NODES].append(field_of_study_node_type) with open(os.path.join(self.destination_path, 'graph_metadata.json'), 'w') as f: json.dump(graph_metadata, f, indent=4) graph_metadata[MetaData.PATH] = self.destination_path return SynGenDatasetFeatureSpec(graph_metadata)
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/preprocessing/datasets/ogbn_mag.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil from typing import Optional import numpy as np import pandas as pd from ogb.lsc import MAG240MDataset from syngen.preprocessing.base_preprocessing import BasePreprocessing from syngen.configuration import SynGenDatasetFeatureSpec from syngen.utils.io_utils import dump_dataframe from syngen.utils.types import MetaData class MAG240mPreprocessing(BasePreprocessing): def __init__( self, source_path: str, destination_path: Optional[str] = None, download: bool = False, skip_node_features=False, **kwargs, ): super().__init__(source_path, destination_path, download, **kwargs) self.include_node_features = not skip_node_features def download(self): MAG240MDataset(root=self.source_path) def _check_files(self) -> bool: return True def transform(self, gpu=False, use_cache=False): if gpu: raise ValueError("MAG240m support does not support gpu preprocessing at the moment") if use_cache and os.path.exists(self.destination_path): return SynGenDatasetFeatureSpec.instantiate_from_preprocessed(self.destination_path) shutil.rmtree(self.destination_path, ignore_errors=True) os.makedirs(self.destination_path) dataset = MAG240MDataset(root=self.source_path) graph_metadata = { MetaData.NODES: [], MetaData.EDGES: [], } # paper node type features = [] features_path = None if self.include_node_features: features_path = 'paper_tabular_features' os.makedirs(os.path.join(self.destination_path, features_path)) column_names = ["feat_" + str(i) for i in range(0, dataset.num_paper_features)] feat_memmap = dataset.paper_feat features = [ { MetaData.NAME: name, MetaData.DTYPE: str(feat_memmap.dtype), MetaData.FEATURE_TYPE: MetaData.CONTINUOUS, MetaData.FEATURE_FILE: 'paper_feats.npy' } for name in column_names ] np.save(os.path.join(self.destination_path, features_path, 'paper_feats.npy'), feat_memmap) features.append({ MetaData.NAME: 'year', MetaData.DTYPE: "int32", MetaData.FEATURE_TYPE: MetaData.CATEGORICAL, MetaData.FEATURE_FILE: 'year_label.npy' }) features.append({ MetaData.NAME: 'label', MetaData.DTYPE: "int32", MetaData.FEATURE_TYPE: MetaData.CATEGORICAL, MetaData.FEATURE_FILE: 'year_label.npy' }) year_label_df = pd.DataFrame() year_label_df['year'] = dataset.all_paper_year year_label_df['label'] = np.nan_to_num(dataset.all_paper_label, nan=-2) np.save(os.path.join(self.destination_path, features_path, 'year_label.npy'), year_label_df.values) del year_label_df paper_node_type = { MetaData.NAME: "paper", MetaData.COUNT: dataset.num_papers, MetaData.FEATURES: features, MetaData.FEATURES_PATH: features_path, } graph_metadata[MetaData.NODES].append(paper_node_type) # author node type author_node_type = { MetaData.NAME: "author", MetaData.COUNT: dataset.num_authors, MetaData.FEATURES_PATH: None, } graph_metadata[MetaData.NODES].append(author_node_type) # institution node type institution_node_type = { MetaData.NAME: "institution", MetaData.COUNT: dataset.num_institutions, MetaData.FEATURES_PATH: None, } graph_metadata[MetaData.NODES].append(institution_node_type) for (src_node_type, dst_node_type), edge_name in dataset.__rels__.items(): edges = dataset.edge_index(src_node_type, dst_node_type) structural_data = pd.DataFrame(edges.T, columns=[MetaData.SRC, MetaData.DST]) edge_type = { MetaData.NAME: edge_name, MetaData.COUNT: len(structural_data), MetaData.SRC_NODE_TYPE: src_node_type, MetaData.DST_NODE_TYPE: dst_node_type, MetaData.DIRECTED: False, MetaData.FEATURES: [], MetaData.FEATURES_PATH: None, MetaData.STRUCTURE_PATH: f"{edge_name}_list.parquet", } dump_dataframe(structural_data, os.path.join(self.destination_path, edge_type[MetaData.STRUCTURE_PATH])) graph_metadata[MetaData.EDGES].append(edge_type) with open(os.path.join(self.destination_path, 'graph_metadata.json'), 'w') as f: json.dump(graph_metadata, f, indent=4) graph_metadata[MetaData.PATH] = self.destination_path return SynGenDatasetFeatureSpec(graph_metadata) @classmethod def add_cli_args(cls, parser): parser.add_argument( "-snf", "--skip-node-features", action='store_true', help='Prepares only the structural part of the MAG240m dataset' ) return parser
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/preprocessing/datasets/ogbn_mag240m.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .cora import CORAPreprocessing from .epinions import EpinionsPreprocessing from .ogbn_mag import OGBN_MAG_Preprocessing from .ogbn_mag240m import MAG240mPreprocessing from .ieee import IEEEPreprocessing from .tabformer import TabFormerPreprocessing DATASETS = { 'cora': CORAPreprocessing, 'epinions': EpinionsPreprocessing, 'ogbn_mag': OGBN_MAG_Preprocessing, 'ogbn_mag240m': MAG240mPreprocessing, 'ieee': IEEEPreprocessing, 'tabformer': TabFormerPreprocessing, }
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/preprocessing/datasets/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import logging import shutil import subprocess from typing import List, Union, Optional import numpy as np import pandas as pd from syngen.configuration import SynGenDatasetFeatureSpec from syngen.preprocessing.base_preprocessing import BasePreprocessing from syngen.utils.types import MetaData logger = logging.getLogger(__name__) log = logger class CORAPreprocessing(BasePreprocessing): def __init__( self, source_path: str, destination_path: Optional[str] = None, download: bool = False, **kwargs, ): """ preprocessing for https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz """ super().__init__(source_path, destination_path, download, **kwargs) def transform(self, gpu=False, use_cache=False): assert not gpu, "CORA preprocessing does not support cudf preprocessing" if use_cache and os.path.exists(self.destination_path): return SynGenDatasetFeatureSpec.instantiate_from_preprocessed(self.destination_path) tabular_operator = pd operator = np examples = {} with open(os.path.join(self.source_path, 'cora.content'), "r") as cora_content: for line in cora_content: entries = line.rstrip("\n").split("\t") # entries contains [ID, Word1, Word2, ..., Label]; "Words" are 0/1 values. words = list(map(int, entries[1:-1])) example_id = int(entries[0]) label = entries[-1] features = { "id": example_id, "label": label, } for i, w in enumerate(words): features[f"w_{i}"] = w examples[example_id] = features tabular_data = tabular_operator.DataFrame.from_dict( examples, orient="index" ).reset_index(drop=True) node_features = [ { MetaData.NAME: f"w_{i}", MetaData.DTYPE: 'int64', MetaData.FEATURE_TYPE: MetaData.CATEGORICAL, } for i in range(len(words)) ] node_features.extend([ { MetaData.NAME: name, MetaData.DTYPE: 'int64', MetaData.FEATURE_TYPE: MetaData.CATEGORICAL, } for name in ["label"] ]) for c in tabular_data.columns: tabular_data[c] = tabular_data[c].astype("category").cat.codes.astype(int) tabular_data = tabular_data.set_index('id') structural_data = tabular_operator.read_csv(os.path.join(self.source_path, "cora.cites")) structural_data.columns = ["src", "dst"] for c in ["src", "dst"]: structural_data[c] = structural_data[c].astype(int) paper_ids = operator.unique(operator.concatenate([ structural_data["src"].values, structural_data["dst"].values, ])) mapping = operator.empty(int(paper_ids.max()) + 1, dtype=int) mapping[paper_ids] = operator.arange(len(paper_ids)) for c in ["src", "dst"]: structural_data[c] = mapping[structural_data[c]] graph_metadata = { MetaData.NODES: [ { MetaData.NAME: "paper", MetaData.COUNT: len(tabular_data), MetaData.FEATURES: node_features, MetaData.FEATURES_PATH: "paper.parquet", }, ], MetaData.EDGES: [{ MetaData.NAME: "cite", MetaData.COUNT: len(structural_data), MetaData.SRC_NODE_TYPE: "paper", MetaData.DST_NODE_TYPE: "paper", MetaData.DIRECTED: False, MetaData.FEATURES: [], MetaData.FEATURES_PATH: None, MetaData.STRUCTURE_PATH: "cite_edge_list.parquet", }] } shutil.rmtree(self.destination_path, ignore_errors=True) os.makedirs(self.destination_path) tabular_data.to_parquet(os.path.join(self.destination_path, "paper.parquet")) structural_data.to_parquet(os.path.join(self.destination_path, "cite_edge_list.parquet")) with open(os.path.join(self.destination_path, 'graph_metadata.json'), 'w') as f: json.dump(graph_metadata, f, indent=4) graph_metadata[MetaData.PATH] = self.destination_path return SynGenDatasetFeatureSpec(graph_metadata) def download(self): log.info("downloading CORA dataset...") cmds = [ fr"mkdir -p {self.source_path}", fr"wget 'https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz' -P {self.source_path}", fr"tar -xf {self.source_path}/cora.tgz -C {self.source_path}", fr"sed -i 's/\t/,/g' {self.source_path}/cora/cora.cites", fr"sed -i '1s/^/src,dst\n/' {self.source_path}/cora/cora.cites", fr"mv {self.source_path}/cora/* {self.source_path}/.", fr"rm -r {self.source_path}/cora", ] for cmd in cmds: try: subprocess.check_output(cmd, shell=True) except subprocess.CalledProcessError as e: raise Exception(e.output) def _check_files(self): files = ['cora.cites', 'cora.content'] return all(os.path.exists(os.path.join(self.source_path, file)) for file in files)
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/preprocessing/datasets/cora.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os import shutil import tarfile from typing import Optional from urllib.request import urlopen import cudf import cupy as cp import numpy as np import pandas as pd from syngen.configuration import SynGenDatasetFeatureSpec from syngen.preprocessing.base_preprocessing import BasePreprocessing from syngen.utils.types import MetaData logger = logging.getLogger(__name__) log = logger class EpinionsPreprocessing(BasePreprocessing): ITEM_SPACE_ARCHIVE_URL = ( "http://konect.cc/files/download.tsv.epinions-rating.tar.bz2" ) SOCIAL_SPACE_ARCHIVE_URL = ( "http://konect.cc/files/download.tsv.epinions.tar.bz2" ) def __init__( self, source_path: str, destination_path: Optional[str] = None, download: bool = False, **kwargs, ): """ preprocessing for http://www.trustlet.org/wiki/Extended_Epinions_dataset Args: """ self.ratings_file = os.path.join(source_path, 'epinions-rating', 'out.epinions-rating') self.trust_file = os.path.join(source_path, 'epinions', 'out.epinions') super().__init__(source_path, destination_path, download, **kwargs) def transform(self, gpu=False, use_cache=False): if use_cache and os.path.exists(self.destination_path): return SynGenDatasetFeatureSpec.instantiate_from_preprocessed(self.destination_path) operator = cp if gpu else np tabular_operator = cudf if gpu else pd item_space_data = tabular_operator.read_csv( self.ratings_file, sep=" ", names=["userId", "itemId", "rating", "timestamp"], skiprows=1, ) social_space_data = tabular_operator.read_csv( self.trust_file, sep=" ", names=["userId", "friendId", "trust", "timestamp"], skiprows=1, ) social_space_data = social_space_data[social_space_data["trust"] == 1] min_item_id = int(item_space_data['itemId'].min()) item_space_data['itemId'] = item_space_data['itemId'] - min_item_id min_user_id = min( int(item_space_data['userId'].min()), int(social_space_data['userId'].min()), int(social_space_data['friendId'].min()) ) item_space_data['userId'] = item_space_data['userId'] - min_user_id social_space_data['userId'] = social_space_data['userId'] - min_user_id social_space_data['friendId'] = social_space_data['friendId'] - min_user_id graph_metadata = { MetaData.NODES: [ { MetaData.NAME: "user", MetaData.COUNT: int(item_space_data['userId'].max()), MetaData.FEATURES: [], MetaData.FEATURES_PATH: None, }, { MetaData.NAME: "item", MetaData.COUNT: int(item_space_data['itemId'].max()), MetaData.FEATURES: [], MetaData.FEATURES_PATH: None, } ], MetaData.EDGES: [ { MetaData.NAME: "user-item", MetaData.COUNT: len(item_space_data), MetaData.SRC_NODE_TYPE: "user", MetaData.DST_NODE_TYPE: "item", MetaData.DIRECTED: False, MetaData.FEATURES: [ { MetaData.NAME: "rating", MetaData.DTYPE: str(item_space_data["rating"].dtype), MetaData.FEATURE_TYPE: MetaData.CATEGORICAL, } ], MetaData.FEATURES_PATH: "user-item.parquet", MetaData.STRUCTURE_PATH: "user-item_edge_list.parquet", }, { MetaData.NAME: "user-user", MetaData.COUNT: len(social_space_data), MetaData.SRC_NODE_TYPE: "user", MetaData.DST_NODE_TYPE: "item", MetaData.DIRECTED: False, MetaData.FEATURES: [], MetaData.FEATURES_PATH: None, MetaData.STRUCTURE_PATH: "user-user_edge_list.parquet", } ] } shutil.rmtree(self.destination_path, ignore_errors=True) os.makedirs(self.destination_path) item_space_data[['rating']] \ .to_parquet(os.path.join(self.destination_path, "user-item.parquet")) item_space_data[['userId', 'itemId']] \ .rename(columns={'userId': MetaData.SRC, 'itemId': MetaData.DST}) \ .to_parquet(os.path.join(self.destination_path, "user-item_edge_list.parquet")) social_space_data[['userId', 'friendId']] \ .rename(columns={'userId': MetaData.SRC, 'friendId': MetaData.DST}) \ .to_parquet(os.path.join(self.destination_path, "user-user_edge_list.parquet")) with open(os.path.join(self.destination_path, 'graph_metadata.json'), 'w') as f: json.dump(graph_metadata, f, indent=4) graph_metadata[MetaData.PATH] = self.destination_path return SynGenDatasetFeatureSpec(graph_metadata) def download(self): if not os.path.exists(self.source_path): os.makedirs(self.source_path) if not os.path.exists(self.ratings_file): with tarfile.open(fileobj=urlopen(self.ITEM_SPACE_ARCHIVE_URL), mode="r|bz2") as tar: tar.extractall(self.source_path) if not os.path.exists(self.trust_file): with tarfile.open(fileobj=urlopen(self.SOCIAL_SPACE_ARCHIVE_URL), mode="r|bz2") as tar: tar.extractall(self.source_path) def _check_files(self): files = [self.ratings_file, self.trust_file] return all(os.path.exists(file) for file in files)
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/preprocessing/datasets/epinions.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import json import logging import shutil from typing import Optional import cudf import cupy as cp import numpy as np import pandas as pd from syngen.configuration import SynGenDatasetFeatureSpec from syngen.preprocessing.base_preprocessing import BasePreprocessing from syngen.utils.types import MetaData logger = logging.getLogger(__name__) log = logger class IEEEPreprocessing(BasePreprocessing): """ preprocessing for https://www.kaggle.com/competitions/ieee-fraud-detection """ def __init__( self, source_path: str, destination_path: Optional[str] = None, download: bool = False, **kwargs, ): super().__init__(source_path, destination_path, download, **kwargs) def transform(self, gpu=False, use_cache=False): if use_cache and os.path.exists(self.destination_path): return SynGenDatasetFeatureSpec.instantiate_from_preprocessed(self.destination_path) operator = cp if gpu else np tabular_operator = cudf if gpu else pd data = tabular_operator.read_csv(os.path.join(self.source_path, 'data.csv')) data = data.fillna(0) cont_columns = [ 'TransactionDT', 'TransactionAmt', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C14', 'V279', 'V280', 'V284', 'V285', 'V286', 'V287', 'V290', 'V291', 'V292', 'V293', 'V294', 'V295', 'V297', 'V298', 'V299', 'V302', 'V303', 'V304', 'V305', 'V306', 'V307', 'V308', 'V309', 'V310', 'V311', 'V312', 'V316', 'V317', 'V318', 'V319', 'V320', 'V321', ] cat_columns = ["isFraud"] for col in ('user_id', 'product_id', *cat_columns): data[col] = data[col].astype("category").cat.codes data[col] = data[col].astype(int) structural_data = data[['user_id', 'product_id']] tabular_data = data[[*cat_columns, *cont_columns]] edge_features = self._prepare_feature_list(tabular_data, cat_columns, cont_columns) graph_metadata = { MetaData.NODES: [ { MetaData.NAME: "user", MetaData.COUNT: int(structural_data['user_id'].max()), MetaData.FEATURES: [], MetaData.FEATURES_PATH: None, }, { MetaData.NAME: "product", MetaData.COUNT: int(structural_data['product_id'].max()), MetaData.FEATURES: [], MetaData.FEATURES_PATH: None, } ], MetaData.EDGES: [ { MetaData.NAME: "user-product", MetaData.COUNT: len(structural_data), MetaData.SRC_NODE_TYPE: "user", MetaData.DST_NODE_TYPE: "product", MetaData.DIRECTED: False, MetaData.FEATURES: edge_features, MetaData.FEATURES_PATH: "user-product.parquet", MetaData.STRUCTURE_PATH: "user-product_edge_list.parquet", } ] } shutil.rmtree(self.destination_path, ignore_errors=True) os.makedirs(self.destination_path) tabular_data.to_parquet(os.path.join(self.destination_path, "user-product.parquet")) structural_data.to_parquet(os.path.join(self.destination_path, "user-product_edge_list.parquet")) with open(os.path.join(self.destination_path, 'graph_metadata.json'), 'w') as f: json.dump(graph_metadata, f, indent=4) graph_metadata[MetaData.PATH] = self.destination_path return SynGenDatasetFeatureSpec(graph_metadata) def download(self): raise NotImplementedError( "IEEE dataset does not support automatic downloading. Please run /workspace/scripts/get_datasets.sh" ) def _check_files(self) -> bool: files = ['data.csv'] return all(os.path.exists(os.path.join(self.source_path, file)) for file in files)
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/preprocessing/datasets/ieee.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import os import json import logging import shutil from typing import Optional import cudf import cupy as cp import numpy as np import pandas as pd from syngen.utils.types import DataFrameType from syngen.configuration import SynGenDatasetFeatureSpec from syngen.preprocessing.base_preprocessing import BasePreprocessing from syngen.utils.types import MetaData class TabFormerPreprocessing(BasePreprocessing): """ preprocessing for https://github.com/IBM/TabFormer """ def __init__( self, source_path: str, destination_path: Optional[str] = None, download: bool = False, **kwargs, ): super().__init__(source_path, destination_path, download, **kwargs) @staticmethod def nanNone(X: DataFrameType) -> DataFrameType: return X.where(X.notnull(), "None") @staticmethod def amountEncoder(X: DataFrameType) -> DataFrameType: return ( X.str.slice(start=1) .astype(float) .clip(lower=1.0) .map(lambda x: math.log(x)) ) def transform(self, gpu=False, use_cache=False) -> SynGenDatasetFeatureSpec: if use_cache and os.path.exists(self.destination_path): return SynGenDatasetFeatureSpec.instantiate_from_preprocessed(self.destination_path) operator = cp if gpu else np tabular_operator = cudf if gpu else pd data = tabular_operator.read_csv(os.path.join(self.source_path, 'card_transaction.v2.csv')) data.columns = [ i.lower().replace(" ", "_") for i in data.columns.tolist() ] data = data.rename( columns={"is_fraud?": "is_fraud", "errors?": "errors", "merchant_name": "merchant_id"} ) data['card_id'] = data['user'] + data['card'] data.drop(columns=['user', 'card'], inplace=True) data["errors"] = data["errors"].fillna(0) data["use_chip"] = self.nanNone(data["use_chip"]) data["amount"] = self.amountEncoder(data["amount"]) cont_columns = ["amount"] cat_columns = ["use_chip", "errors", "is_fraud"] for col in ("card_id", "merchant_id", *cat_columns): data[col] = data[col].astype("category").cat.codes data[col] = data[col].astype(int) structural_data = data[['card_id', 'merchant_id']] tabular_data = data[[*cat_columns, *cont_columns]] edge_features = self._prepare_feature_list(tabular_data, cat_columns, cont_columns) graph_metadata = { MetaData.NODES: [ { MetaData.NAME: "card", MetaData.COUNT: int(structural_data['card_id'].max()), MetaData.FEATURES: [], MetaData.FEATURES_PATH: None, }, { MetaData.NAME: "merchant", MetaData.COUNT: int(structural_data['merchant_id'].max()), MetaData.FEATURES: [], MetaData.FEATURES_PATH: None, } ], MetaData.EDGES: [ { MetaData.NAME: "transaction", MetaData.COUNT: len(structural_data), MetaData.SRC_NODE_TYPE: "card", MetaData.DST_NODE_TYPE: "merchant", MetaData.DIRECTED: False, MetaData.FEATURES: edge_features, MetaData.FEATURES_PATH: "transaction.parquet", MetaData.STRUCTURE_PATH: "transaction_edge_list.parquet", } ] } shutil.rmtree(self.destination_path, ignore_errors=True) os.makedirs(self.destination_path) tabular_data.to_parquet(os.path.join(self.destination_path, "transaction.parquet")) structural_data.to_parquet(os.path.join(self.destination_path, "transaction_edge_list.parquet")) with open(os.path.join(self.destination_path, 'graph_metadata.json'), 'w') as f: json.dump(graph_metadata, f, indent=4) graph_metadata[MetaData.PATH] = self.destination_path return SynGenDatasetFeatureSpec(graph_metadata) def download(self): raise NotImplementedError( "TabFormer dataset does not support automatic downloading. Please run /workspace/scripts/get_datasets.sh" ) def _check_files(self) -> bool: files = ['card_transaction.v2.csv'] return all(os.path.exists(os.path.join(self.source_path, file)) for file in files)
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/preprocessing/datasets/tabformer.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # flake8: noqa from .tabular_metrics import TabularMetrics from .utils import load_data
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/analyzer/tabular/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, List, Tuple, Union import pandas as pd try: import ipywidgets as widgets from IPython import get_ipython from IPython.core.display import HTML, Markdown, display except ImportError: print("IPython not installed.") from typing import Dict def load_data( path_real: str, path_fake: str, real_sep: str = ",", fake_sep: str = ",", drop_columns: List = None, ) -> Tuple[pd.DataFrame, pd.DataFrame]: """ Load data from a real and synthetic data csv. This function makes sure that the loaded data has the same columns with the same data types. Args: path_real: string path to csv with real data path_fake: string path to csv with real data real_sep: separator of the real csv fake_sep: separator of the fake csv drop_columns: names of columns to drop. Return: Tuple with DataFrame containing the real data and DataFrame containing the synthetic data. """ real = pd.read_csv(path_real, sep=real_sep, low_memory=False) fake = pd.read_csv(path_fake, sep=fake_sep, low_memory=False) if set(fake.columns.tolist()).issubset(set(real.columns.tolist())): real = real[fake.columns] elif drop_columns is not None: real = real.drop(drop_columns, axis=1) try: fake = fake.drop(drop_columns, axis=1) except: print(f"Some of {drop_columns} were not found on fake.index.") assert len(fake.columns.tolist()) == len( real.columns.tolist() ), f"Real and fake do not have same nr of columns: {len(fake.columns)} and {len(real.columns)}" fake.columns = real.columns else: fake.columns = real.columns for col in fake.columns: fake[col] = fake[col].astype(real[col].dtype) return real, fake def dict_to_df(data: Dict[str, Any]): return pd.DataFrame( {"result": list(data.values())}, index=list(data.keys()) ) class EvaluationResult(object): def __init__( self, name, content, prefix=None, appendix=None, notebook=False ): self.name = name self.prefix = prefix self.content = content self.appendix = appendix self.notebook = notebook def show(self): if self.notebook: output = widgets.Output() with output: display(Markdown(f"## {self.name}")) if self.prefix: display(Markdown(self.prefix)) display(self.content) if self.appendix: display(Markdown(self.appendix)) return output else: print(f"\n{self.name}") if self.prefix: print(self.prefix) print(self.content) if self.appendix: print(self.appendix)
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/analyzer/tabular/utils.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import warnings from collections import Counter from itertools import combinations from typing import Dict, List, Optional import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from dython.nominal import associations, numerical_encoding from scipy import stats from scipy.spatial import distance from scipy.special import kl_div from sklearn.decomposition import PCA from syngen.utils.types import DataFrameType, ColumnType warnings.simplefilter(action="ignore", category=pd.errors.PerformanceWarning) matplotlib._log.disabled = True logger = logging.getLogger() logger.setLevel(logging.CRITICAL) class TabularMetrics(object): def __init__( self, real: DataFrameType, fake: DataFrameType, categorical_columns: Optional[List] = [], nrows: Optional[int] = None, seed: Optional[int] = 123, verbose: bool = False, debug: bool = False, ): """ Args: real (DataFrameType): the original dataset fake (DataFrameType): the generated dataset categorical_columns (list): list of categorical columns in tabular data nrows (int): number of rows to use for evaluation (default: None), will use the minimum of real/fake data length seed (int): sets the random seed for reproducibility. (default: 123) verbose (bool): print intermediate results (default: False) debug (bool): debug mode (default: False) """ assert all(c in fake.columns for c in real.columns) and len( real.columns ) == len(fake.columns), r"Real and fake have different columns." self.real = real self.fake = fake[real.columns] self.nrows = nrows self.seed = seed self.verbose = verbose self.debug = debug self.categorical_columns = categorical_columns self.numerical_columns = [ column for column in real.columns if column not in categorical_columns ] # Make sure columns and their order are the same. if len(real.columns) == len(fake.columns): fake = fake[real.columns.tolist()] assert ( real.columns.tolist() == fake.columns.tolist() ), "Columns in real and fake dataframe are not the same" # Make sure the number of samples is equal in both datasets. if nrows is None: self.nrows = min(len(self.real), len(self.fake)) elif len(fake) >= nrows and len(real) >= nrows: self.nrows = nrows else: raise Exception( f"Make sure nrows < len(fake/real). len(real): {len(real)}, len(fake): {len(fake)}" ) self.real = self.real.sample(self.nrows) self.fake = self.fake.sample(self.nrows) self.real.loc[:, self.categorical_columns] = ( self.real.loc[:, self.categorical_columns] .fillna("[NAN]") .astype(str) ) self.fake.loc[:, self.categorical_columns] = ( self.fake.loc[:, self.categorical_columns] .fillna("[NAN]") .astype(str) ) self.real.loc[:, self.numerical_columns] = self.real.loc[ :, self.numerical_columns ].fillna(self.real[self.numerical_columns].mean()) self.fake.loc[:, self.numerical_columns] = self.fake.loc[ :, self.numerical_columns ].fillna(self.fake[self.numerical_columns].mean()) def kl_divergence(self) -> float: def get_frequencies(real, synthetic): f_obs, f_exp = [], [] real, synthetic = Counter(real), Counter(synthetic) for value in synthetic: if value not in real: warnings.warn( f"Unexpected value {value} in synthetic data." ) real[value] += 1e-6 # Regularization to prevent NaN. for value in real: f_obs.append(synthetic[value] / sum(synthetic.values())) f_exp.append(real[value] / sum(real.values())) return f_obs, f_exp numerical_columns = self.numerical_columns # - continuous columns cont_scores = [] for columns in combinations(numerical_columns, r=2): columns = list(columns) rd_cont = self.real[columns] rd_cont[pd.isna(rd_cont)] = 0.0 rd_cont[pd.isna(rd_cont)] = 0.0 column1, column2 = rd_cont.columns[:2] real, xedges, yedges = np.histogram2d( rd_cont[column1], rd_cont[column2] ) fake, _, _ = np.histogram2d( self.fake[column1], self.fake[column2], bins=[xedges, yedges] ) f_obs, f_exp = fake.flatten() + 1e-5, real.flatten() + 1e-5 f_obs, f_exp = f_obs / np.sum(f_obs), f_exp / np.sum(f_exp) score = 1 / (1 + np.sum(kl_div(f_obs, f_exp))) cont_scores.append(score) # - discrete columns categorical_columns = self.categorical_columns cat_scores = [] for columns in combinations(categorical_columns, r=2): columns = list(columns) real = self.real[columns].itertuples(index=False) fake = self.fake[columns].itertuples(index=False) f_obs, f_exp = get_frequencies(real, fake) score = 1 / (1 + np.sum(kl_div(f_obs, f_exp))) cat_scores.append(score) return np.nanmean(cont_scores + cat_scores) def correlation_correlation( self, comparison_metric: str = "pearsonr" ) -> float: """ computes the column-wise correlation of each dataset, and outputs the `comparison_metric` score between the datasets. Args: comparison_metric (str): metric to be used to compare between the datasets see `scipy.stats` Returns: corr (float): correlation score """ comparison_metric = getattr(stats, comparison_metric) total_metrics = pd.DataFrame() for ds_name in ["real", "fake"]: ds = getattr(self, ds_name) corr_df = associations( ds, nominal_columns=self.categorical_columns, nom_nom_assoc='theil', compute_only=True ) values = corr_df['corr'].values values = values[~np.eye(values.shape[0], dtype=bool)].reshape( values.shape[0], -1 ) total_metrics[ds_name] = values.flatten() correlation_correlations = total_metrics corr, p = comparison_metric( total_metrics["real"], total_metrics["fake"] ) if self.debug: print("\nColumn correlation between datasets:") print(total_metrics.to_string()) return corr def statistical_correlation(self, comparison_metric="spearmanr") -> float: """ computes correlation between basic statistics of each dataset for each column Args: comparison_metric (str): metric to be used to compare between the datasets see `scipy.stats` Returns: corr (float): correlation score """ total_metrics = pd.DataFrame() comparison_metric = getattr(stats, comparison_metric) discrete_values = { c: self.real[c].unique() for c in self.categorical_columns } for ds_name in ["real", "fake"]: ds = getattr(self, ds_name) metrics = {} num_ds = ds.loc[:, self.numerical_columns] cat_ds = ds.loc[:, self.categorical_columns] for idx, value in num_ds.mean().items(): metrics[f"mean_{idx}"] = value for idx, value in num_ds.median().items(): metrics[f"median_{idx}"] = value for idx, value in num_ds.std().items(): metrics[f"std_{idx}"] = value for idx, value in num_ds.var().items(): metrics[f"variance_{idx}"] = value for cc in self.categorical_columns: cdf = ds[cc] v = cdf.value_counts(normalize=True) unique_vals = set(v.index) for d in discrete_values[cc]: if d not in unique_vals: metrics[f"count_{d}"] = 0.0 else: metrics[f"count_{d}"] = v[d] total_metrics[ds_name] = metrics.values() total_metrics.index = metrics.keys() statistical_results = total_metrics if self.debug: print("\nBasic statistical attributes:") print(total_metrics.to_string()) corr, p = comparison_metric( statistical_results["real"], statistical_results["fake"] ) return corr def plot_cumsums(self, nr_cols=4, fname=None): """ Plot the cumulative sums for all columns in the real and fake dataset. Height of each row scales with the length of the labels. Each plot contains the values of a real columns and the corresponding fake column. Args: fname: If not none, saves the plot with this file name. """ nr_charts = len(self.real.columns) nr_rows = max(1, nr_charts // nr_cols) nr_rows = nr_rows + 1 if nr_charts % nr_cols != 0 else nr_rows max_len = 0 # Increase the length of plots if the labels are long if not self.real.select_dtypes(include=["object"]).empty: lengths = [] for d in self.real.select_dtypes(include=["object"]): lengths.append( max( [ len(x.strip()) for x in self.real[d].unique().tolist() ] ) ) max_len = max(lengths) row_height = 6 + (max_len // 30) fig, ax = plt.subplots( nr_rows, nr_cols, figsize=(16, row_height * nr_rows) ) fig.suptitle("Cumulative Sums per feature", fontsize=16) if nr_rows == 1 and nr_cols == 1: axes = [ax] else: axes = ax.flatten() for i, col in enumerate(self.real.columns): r = self.real[col] f = self.fake.iloc[:, self.real.columns.tolist().index(col)] self.cdf(r, f, col, "Cumsum", ax=axes[i]) plt.tight_layout(rect=[0, 0.02, 1, 0.98]) if fname is not None: plt.savefig(fname) plt.show() def plot_mean_std(self, ax=None, fname=None) -> None: """ Plot the means and standard deviations of each dataset. Args: ax: Axis to plot on. If none, a new figure is made. fname: If not none, saves the plot with this file name. """ real = self.real fake = self.fake if ax is None: fig, ax = plt.subplots(1, 2, figsize=(10, 5)) fig.suptitle( "Absolute Log Mean and STDs of numeric data\n", fontsize=16 ) ax[0].grid(True) ax[1].grid(True) real = real.select_dtypes(include=np.number).reset_index() fake = fake.select_dtypes(include=np.number).reset_index() real_mean = np.log(np.add(abs(real.mean()).values, 1e-5)) fake_mean = np.log(np.add(abs(fake.mean()).values, 1e-5)) min_mean = min(real_mean) - 1 max_mean = max(real_mean) + 1 line = np.arange(min_mean, max_mean) sns.lineplot(x=line, y=line, ax=ax[0]) sns.scatterplot(x=real_mean, y=fake_mean, ax=ax[0]) ax[0].set_title("Means of real and fake data") ax[0].set_xlabel("real data mean (log)") ax[0].set_ylabel("fake data mean (log)") real_std = np.log(np.add(real.std().values, 1e-5)) fake_std = np.log(np.add(fake.std().values, 1e-5)) min_std = min(real_std) - 1 max_std = max(real_std) + 1 line = np.arange(min_std, max_std) sns.lineplot(x=line, y=line, ax=ax[1]) sns.scatterplot(x=real_std, y=fake_std, ax=ax[1]) ax[1].set_title("Stds of real and fake data") ax[1].set_xlabel("real data std (log)") ax[1].set_ylabel("fake data std (log)") if fname is not None: plt.savefig(fname) if ax is None: plt.show() def convert_numerical(self, real, fake): """ Convert categorical columns to numerical """ for c in self.categorical_columns: if real[c].dtype == "object": real[c] = pd.factorize(real[c], sort=True)[0] fake[c] = pd.factorize(fake[c], sort=True)[0] return real, fake def cdf( self, real_data, fake_data, xlabel: str = "Values", ylabel: str = "Cumulative Sum", ax=None, ) -> None: """ Plot continous density function on optionally given ax. If no ax, cdf is plotted and shown. Args: xlabel: Label to put on the x-axis ylabel: Label to put on the y-axis ax: The axis to plot on. If ax=None, a new figure is created. """ x1 = np.sort(real_data) x2 = np.sort(fake_data) y = np.arange(1, len(real_data) + 1) / len(real_data) ax = ax if ax else plt.subplots()[1] axis_font = {"size": "14"} ax.set_xlabel(xlabel, **axis_font) ax.set_ylabel(ylabel, **axis_font) ax.grid() ax.plot(x1, y, marker="o", linestyle="none", label="Real", ms=8) ax.plot(x2, y, marker="o", linestyle="none", label="Fake", alpha=0.5) ax.tick_params(axis="both", which="major", labelsize=8) ax.legend(loc="upper center", bbox_to_anchor=(0.5, 1.1), ncol=3) import matplotlib.ticker as mticker # If labels are strings, rotate them vertical if isinstance(real_data, pd.Series) and real_data.dtypes == "object": ticks_loc = ax.get_xticks() r_unique = real_data.sort_values().unique() if len(r_unique) > len(ticks_loc): import pdb; pdb.set_trace() ticks_loc = ticks_loc[: len(r_unique)] ax.xaxis.set_major_locator(mticker.FixedLocator(ticks_loc)) ax.set_xticklabels(r_unique, rotation="vertical") if ax is None: plt.show() def plot_correlation_difference( self, plot_diff: bool = True, cat_cols: list = None, annot=False, fname=None, ) -> None: """ Plot the association matrices for the `real` dataframe, `fake` dataframe and plot the difference between them. Has support for continuous and categorical data types. All Object and Category dtypes are considered to be categorical columns if `cat_cols` is not passed. - Continuous - Continuous: Uses Pearson's correlation coefficient - Continuous - Categorical: Uses so called correlation ratio (https://en.wikipedia.org/wiki/Correlation_ratio) for both continuous - categorical and categorical - continuous. - Categorical - Categorical: Uses Theil's U, an asymmetric correlation metric for Categorical associations Args: plot_diff: Plot difference if True, else not cat_cols: List of Categorical columns boolean annot: Whether to annotate the plot with numbers indicating the associations. """ real = self.real fake = self.fake cmap = sns.diverging_palette(220, 10, as_cmap=True) if cat_cols is None: cat_cols = real.select_dtypes(["object", "category"]) if plot_diff: fig, ax = plt.subplots(1, 3, figsize=(24, 7)) else: fig, ax = plt.subplots(1, 2, figsize=(20, 8)) real_corr = associations( real, nominal_columns=cat_cols, plot=False, nom_nom_assoc='theil', mark_columns=True, annot=annot, ax=ax[0], cmap=cmap, )["corr"] fake_corr = associations( fake, nominal_columns=cat_cols, plot=False, nom_nom_assoc='theil', mark_columns=True, annot=annot, ax=ax[1], cmap=cmap, )["corr"] if plot_diff: diff = abs(real_corr - fake_corr) sns.set(style="white") sns.heatmap( diff, ax=ax[2], cmap=cmap, vmax=0.3, square=True, annot=annot, center=0, linewidths=0.5, cbar_kws={"shrink": 0.5}, fmt=".2f", ) titles = ( ["Real", "Fake", "Difference"] if plot_diff else ["Real", "Fake"] ) for i, label in enumerate(titles): title_font = {"size": "18"} ax[i].set_title(label, **title_font) plt.tight_layout() if fname is not None: plt.savefig(fname) plt.show() def plot_pca(self, fname=None): """ Plot the first two components of a PCA of real and fake data. Args: fname: If not none, saves the plot with this file name. """ real, fake = self.convert_numerical(self.real, self.fake) pca_r = PCA(n_components=2) pca_f = PCA(n_components=2) real_t = pca_r.fit_transform(real) fake_t = pca_f.fit_transform(fake) fig, ax = plt.subplots(1, 2, figsize=(12, 6)) fig.suptitle("First two components of PCA", fontsize=16) sns.scatterplot(ax=ax[0], x=real_t[:, 0], y=real_t[:, 1]) sns.scatterplot(ax=ax[1], x=fake_t[:, 0], y=fake_t[:, 1]) ax[0].set_title("Real data") ax[1].set_title("Fake data") if fname is not None: plt.savefig(fname) plt.show() def visual_evaluation(self, save_dir=None, **kwargs): """ Plots mean, std, cumulative sum, correlation difference and PCA Args: save_dir: directory path to save images kwargs: any key word argument for matplotlib. """ if save_dir is None: self.plot_mean_std() self.plot_cumsums() self.plot_correlation_difference( plot_diff=True, cat_cols=self.categorical_columns, **kwargs ) self.plot_pca() else: save_dir = Path(save_dir) save_dir.mkdir(parents=True, exist_ok=True) self.plot_mean_std(fname=save_dir / "mean_std.png") self.plot_cumsums(fname=save_dir / "cumsums.png") self.plot_correlation_difference( plot_diff=True, cat_cols=self.categorical_columns, fname=save_dir / "correlation_difference.png", **kwargs, ) self.plot_pca(fname=save_dir / "pca.png") def evaluate( self, comparison_metric: str = "pearsonr" ) -> Dict[str, float]: """ evaluate synthetic data Args: comparison_metric (str): metric to be used to compare between the datasets see `scipy.stats` Returns: results (dict<str, float>): dictionary containing computed metrics, <key> := metric_name, <value>:= score """ statistical_correlation = self.statistical_correlation( comparison_metric ) kl_divergence = self.kl_divergence() correlation_correlation = self.correlation_correlation() results = { "statistical_correlation": statistical_correlation, "kl_divergence": kl_divergence, "correlation_correlation": correlation_correlation, } return results def dd_feat_heatmap( data, feat_name_col_info: Dict[str, ColumnType], src_col: str = "src", dst_col: str = "dst", ): src_degree = ( data.groupby(src_col, as_index=False) .count()[[src_col, dst_col]] .rename(columns={dst_col: "src_degree"}) ) # - normalized src_degree src_degree_vals = src_degree["src_degree"].values normalized_src_degree = src_degree_vals / np.sum(src_degree_vals) src_degree.loc[:, "src_degree"] = normalized_src_degree # - normalized dst_degree dst_degree = ( data.groupby(dst_col, as_index=False) .count()[[src_col, dst_col]] .rename(columns={src_col: "dst_degree"}) ) dst_degree_vals = dst_degree["dst_degree"].values normalized_dst_degree = dst_degree_vals / np.sum(dst_degree_vals) dst_degree.loc[:, "dst_degree"] = normalized_dst_degree # - merge data = data.merge(src_degree, how="outer", on=src_col) data = data.merge(dst_degree, how="outer", on=dst_col) # - normalize continuous columns for feat, col_info in feat_name_col_info.items(): col_type = col_info["type"] min_ = col_info["min"] max_ = col_info["max"] if col_type == ColumnType.CONTINUOUS: vals = data[feat].values data.loc[:, feat] = (vals - min_) / (max_ - min_) # - plot heat maps def heat_map(x, y): heatmap, xedges, yedges = np.histogram2d(x, y, bins=10) extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]] return heatmap.T, extent heat_maps = [] for feat in feat_name_col_info: heatmap, _ = heat_map(data["src_degree"].values, data[feat].values) heat_maps.append(heatmap) return heat_maps def compute_dd_feat_js( real, fake, feat_name_col_info: Dict[str, ColumnType], src_col: str = "src", dst_col: str = "dst", ): col_info = {} for col_name, col_type in feat_name_col_info.items(): if col_type == ColumnType.CONTINUOUS: min_ = real[col_name].min() max_ = real[col_name].max() col_info[col_name] = {"type": col_type, "min": min_, "max": max_} elif col_type == ColumnType.CATEGORICAL: # - none of the datsets align on categorical for now.. pass real_heatmaps = dd_feat_heatmap( real, col_info, src_col=src_col, dst_col=dst_col ) fake_heatmaps = dd_feat_heatmap( fake, col_info, src_col=src_col, dst_col=dst_col ) heatmaps = list(zip(real_heatmaps, fake_heatmaps)) score = 0.0 for r, f in heatmaps: s = distance.jensenshannon(r, f, axis=1) # - along feats np.nan_to_num(s, copy=False, nan=1.0) s = np.mean(s) score += s return score def get_frequencies(real, synthetic): f_obs, f_exp = [], [] real, synthetic = Counter(real), Counter(synthetic) for value in synthetic: if value not in real: warnings.warn(f"Unexpected value {value} in synthetic data.") real[value] += 1e-6 # Regularization to prevent NaN. for value in real: f_obs.append(synthetic[value] / sum(synthetic.values())) f_exp.append(real[value] / sum(real.values())) return f_obs, f_exp
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/analyzer/tabular/tabular_metrics.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from functools import partial from typing import Dict import matplotlib import matplotlib.pyplot as plt import numpy as np from scipy.sparse.linalg import eigsh from syngen.analyzer.graph.graph import safeSNAP from syngen.utils.types import ColumnType TMP_NAME = "tmp" def common_plot(f, ax, *graphs, **kwargs): for i, G in enumerate(graphs, 1): f(G, i, ax, **kwargs) if len(graphs) > 1: ax.legend() def parse_file(plot, filename): parsed_filename = f"{plot}.{filename}.tab" with open(parsed_filename, "r") as f: lines = f.read().splitlines() x_values = [] y_values = [] for line in lines: if len(line) and "#" not in line: x, y = line.split() x_values.append(float(x)) y_values.append(float(y)) return x_values, y_values def clear_files(plot, filename): files_to_clean = [ f"./{plot}.{filename}.plt", f"./{plot}.{filename}.png", f"./{plot}.{filename}.tab", ] for file in files_to_clean: try: os.remove(file) except FileNotFoundError: print(f"File {file} attempted to be removed, but not found") def parse_snap_object(snap_object): zipped = [(pair.GetVal1(), pair.GetVal2()) for pair in snap_object] x, y = zip(*zipped) return x, y def get_degree_dist(snapGraph): return parse_snap_object(snapGraph.GetDegCnt()) def get_in_degree_dist(snapGraph): return parse_snap_object(snapGraph.GetInDegCnt()) def get_out_degree_dist(snapGraph): return parse_snap_object(snapGraph.GetOutDegCnt()) def get_clustering_coef_dist(snapGraph): return parse_snap_object(snapGraph.GetClustCf(True, -1)[1]) def get_strongly_connected_component(snapGraph): return parse_snap_object(snapGraph.GetSccSzCnt()) def get_weakly_connected_component(snapGraph): return parse_snap_object(snapGraph.GetWccSzCnt()) @safeSNAP def _add_to_axis_idd(G, i, ax): graph_name = G.name or f"Graph {i}" title = "Log-log in degree distribution" G = G.snapGraph x, y = get_in_degree_dist(G) ax.set_xscale("log") ax.set_xlabel("In degree") ax.set_yscale("log") ax.set_ylabel("Number of nodes") ax.set_title(title) ax.scatter(x, y, label=graph_name, s=5) @safeSNAP def _add_to_axis_odd(G, i, ax): graph_name = G.name or f"Graph {i}" title = "Log-log out degree distribution" G = G.snapGraph x, y = get_out_degree_dist(G) ax.set_xscale("log") ax.set_xlabel("Out degree") ax.set_yscale("log") ax.set_ylabel("Number of nodes") ax.set_title(title) ax.scatter(x, y, label=graph_name, s=5) @safeSNAP def _add_to_axis_dd(G, i, ax): graph_name = G.name or f"Graph {i}" title = "Log-log degree distribution" G = G.snapGraph x, y = get_degree_dist(G) ax.set_xscale("log") ax.set_xlabel("Degree") ax.set_yscale("log") ax.set_ylabel("Number of nodes") ax.set_title(title) ax.scatter(x, y, label=graph_name, s=5) @safeSNAP def _add_to_axis_ccd(G, i, ax): graph_name = G.name or f"Graph {i}" title = "Log-log distribution of clustering coefficient" G = G.snapGraph x, y = get_clustering_coef_dist(G) ax.set_xscale("log") ax.set_xlabel("Degree") ax.set_yscale("symlog") ax.set_ylabel("Clustering coefficient") ax.set_title(title) ax.scatter(x, y, label=graph_name, s=5) @safeSNAP def _add_to_axis_scc(G, i, ax): graph_name = G.name or f"Graph {i}" title = "Log-log distribution of sizes of strongly connected components" G = G.snapGraph x, y = get_strongly_connected_component(G) ax.set_xscale("log") ax.set_xlabel("Size of strongly connected component") ax.set_yscale("symlog") ax.set_ylabel("Number of components") ax.set_title(title) ax.scatter(x, y, label=graph_name, s=5) @safeSNAP def _add_to_axis_wcc(G, i, ax): is_directed = G.is_directed weakly_string = " weakly " if is_directed else " " title = ( f"Log-log distribution of sizes of{weakly_string}connected components" ) graph_name = G.name or f"Graph {i}" G = G.snapGraph x, y = get_weakly_connected_component(G) ax.set_xscale("log") ax.set_xlabel(f"Size of{weakly_string}connected component") ax.set_yscale("symlog") ax.set_ylabel("Number of components") ax.set_title(title) ax.scatter(x, y, label=graph_name, s=5) @safeSNAP def _add_to_axis_hp(G, i, ax, hop_plot_iters=128): is_directed = G.is_directed graph_name = G.name or f"Graph {i}" title = "Hop plot" plot = "hop" G = G.snapGraph G.PlotHops(TMP_NAME, "Hop plot", is_directed, hop_plot_iters) num_hops, num_nodes = parse_file(plot=plot, filename=TMP_NAME) num_hops = [int(num_hop) for num_hop in num_hops] parse_file(plot=plot, filename=TMP_NAME) clear_files(plot=plot, filename=TMP_NAME) ax.set_xlabel("Number of hops") ax.set_ylabel("Number of pairs of nodes") ax.set_yscale("log") ax.set_title(title) ax.plot(num_hops, num_nodes, "--", marker="o", label=graph_name) @safeSNAP def _add_to_axis_svr(G, i, ax, num_spectral_values=100): graph_name = G.name or f"Graph {i}" title = "Singular value rank distribution" plot = "sngVal" G = G.snapGraph G.PlotSngValRank(num_spectral_values, TMP_NAME, title) ranks, sin_values = parse_file(plot, filename=TMP_NAME) ranks = [int(rank) for rank in ranks] parse_file(plot=plot, filename=TMP_NAME) clear_files(plot=plot, filename=TMP_NAME) ax.set_xlabel("Rank") ax.set_ylabel("Singular value") ax.set_yscale("log") ax.set_title(title) ax.plot( ranks, sin_values, "--", marker="o", label=graph_name, markersize=5 ) @safeSNAP def _add_to_axis_evr(G, i, ax, num_spectral_values=100): graph_name = G.name or f"Graph {i}" title = "Eigenvalue rank distribution" plot = "eigVal" G = G.snapGraph G.PlotEigValRank(num_spectral_values, TMP_NAME, title) ranks, eig_values = parse_file(plot, filename=TMP_NAME) ranks = [int(rank) for rank in ranks] parse_file(plot=plot, filename=TMP_NAME) clear_files(plot=plot, filename=TMP_NAME) ax.set_xlabel("Rank") ax.set_ylabel("Eigenvalue") ax.set_yscale("log") ax.set_title(title) ax.plot( ranks, eig_values, "--", marker="o", label=graph_name, markersize=5 ) @safeSNAP def _add_to_axis_svd(G, i, ax, num_spectral_values=100): graph_name = G.name or f"Graph {i}" title = "Singular value distribution" plot = "sngDistr" G = G.snapGraph G.PlotSngValDistr(num_spectral_values, TMP_NAME, title) sin_values, counts = parse_file(plot=plot, filename=TMP_NAME) parse_file(plot=plot, filename=TMP_NAME) clear_files(plot=plot, filename=TMP_NAME) ax.set_xlabel("Singular value") ax.set_ylabel("Count") ax.set_yscale("symlog") ax.set_title(title) ax.plot( sin_values, counts, "--", marker="o", label=graph_name, markersize=5 ) @safeSNAP def _add_to_axis_evd(G, i, ax, num_spectral_values=100): graph_name = G.name or f"Graph {i}" title = "Eigenvalue distribution" plot = "eigDistr" G = G.snapGraph G.PlotEigValDistr(num_spectral_values, TMP_NAME, title) eig_values, counts = parse_file(plot, filename=TMP_NAME) parse_file(plot=plot, filename=TMP_NAME) clear_files(plot=plot, filename=TMP_NAME) ax.set_xlabel("Eigenvalue") ax.set_ylabel("Count") ax.set_yscale("symlog") ax.set_title(title) ax.plot( eig_values, counts, "--", marker="o", label=graph_name, markersize=5 ) @safeSNAP def _add_to_axis_lsv(G, i, ax): graph_name = G.name or f"Graph {i}" title = "Leading singular vector rank distribution" plot = "sngVecL" G = G.snapGraph G.PlotSngVec(TMP_NAME, title) ranks, components = parse_file(plot, filename=TMP_NAME) ranks = [int(rank) for rank in ranks] parse_file(plot=plot, filename=TMP_NAME) clear_files(plot=plot, filename=TMP_NAME) ax.set_xlabel("Rank") ax.set_ylabel("Component of leading singular vector") ax.set_yscale("log") ax.set_title(title) ax.plot( ranks, components, "--", marker="o", label=graph_name, markersize=5 ) def plot_node_degree_centrality_feat_dist( data, feat_name_col_info: Dict[str, ColumnType], src_col: str = "src", dst_col: str = "dst", ): # - suppress matplotlib debug logger matplotlib_logger = logging.getLogger("matplotlib") matplotlib_logger.setLevel(logging.WARNING) src_degree = ( data.groupby(src_col, as_index=False) .count()[[src_col, dst_col]] .rename(columns={dst_col: "src_degree"}) ) # - normalized src_degree src_degree_vals = src_degree["src_degree"].values normalized_src_degree = (src_degree_vals - np.min(src_degree_vals)) / ( np.max(src_degree_vals) - np.min(src_degree_vals) ) src_degree.loc[:, "src_degree"] = normalized_src_degree # - normalized dst_degree dst_degree = ( data.groupby(dst_col, as_index=False) .count()[[src_col, dst_col]] .rename(columns={src_col: "dst_degree"}) ) dst_degree_vals = dst_degree["dst_degree"].values normalized_dst_degree = (dst_degree_vals - np.min(dst_degree_vals)) / ( np.max(dst_degree_vals) - np.min(dst_degree_vals) ) dst_degree.loc[:, "dst_degree"] = normalized_dst_degree # - merge data = data.merge(src_degree, how="outer", on=src_col) data = data.merge(dst_degree, how="outer", on=dst_col) # - normalize continuous columns for feat, col_info in feat_name_col_info.items(): col_type = col_info["type"] if col_type == ColumnType.CONTINUOUS: vals = data[feat].values min_, max_ = np.min(vals), np.max(vals) data.loc[:, feat] = (vals - min_) / (max_ - min_) # - plot heat maps def heat_map(x, y): heatmap, xedges, yedges = np.histogram2d(x, y, bins=30) extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]] return heatmap.T, extent nr = 1 # - num plots per row fig, axs = plt.subplots(len(feat_name_col_info), nr, figsize=(12, 8)) c = 0 for feat in feat_name_col_info: if nr * len(feat_name_col_info) == 1: heatmap, extent = heat_map( data["src_degree"].values, data[feat].values ) axs.imshow(heatmap, extent=extent, origin="lower") axs.set_xlabel("src_degree") axs.set_ylabel("feat") else: # - src degree dist heatmap, extent = heat_map( data["src_degree"].values, data[feat].values ) axs[c].imshow(heatmap, extent=extent, origin="lower") axs[c].set_xlabel("src_degree") axs[c].set_ylabel("feat") c += nr return fig # Degree distribution plot_degree_distribution = partial(common_plot, _add_to_axis_dd) # In degree distribution plot_in_degree_distribution = partial(common_plot, _add_to_axis_idd) # Out degree distribution plot_out_degree_distribution = partial(common_plot, _add_to_axis_odd) # Hop plot plot_hopplot = partial(common_plot, _add_to_axis_hp) # Clustering coefficient distribution plot_clustering_coef_distribution = partial(common_plot, _add_to_axis_ccd) # Strongly connected component distribution plot_strongly_connected_component_distribution = partial( common_plot, _add_to_axis_scc ) # Weakly connected component distribution plot_weakly_connected_component_distribution = partial( common_plot, _add_to_axis_wcc ) # Eigenvalue rank distribution plot_eigenvalue_rank_distribution = partial(common_plot, _add_to_axis_evr) # Singular value rank distribution plot_singular_value_rank_distribution = partial(common_plot, _add_to_axis_svr) # Eigenvalue rank distribution plot_eigenvalue_histogram_distribution = partial(common_plot, _add_to_axis_evd) # Singular value rank distribution plot_singular_value_histogram_distribution = partial( common_plot, _add_to_axis_svd ) # Leading singular vector rank distribution plot_leading_singular_vector_rank = partial(common_plot, _add_to_axis_lsv)
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/analyzer/graph/plotting.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import snap from syngen.utils.types import MetaData def safeSNAP(f): def wrapper(*args, **kwargs): graph = args[0] graph.maybe_load_snap() return f(*args, **kwargs) return wrapper class Graph(object): def __init__(self, path=None, name=None, load_eagerly=False, is_directed=False, _snap_graph=None): self.path = path self.name = name self.is_directed = is_directed self.snapGraph = _snap_graph if load_eagerly: self.maybe_load_snap() def maybe_load_snap(self): if not self.snapGraph: graph_type = snap.TNGraph if self.is_directed else snap.TUNGraph self.snapGraph = snap.LoadConnList(graph_type, self.path) @staticmethod def instantiate_from_feature_spec(feature_spec, edge_name, graph_name=None): edge_info = feature_spec.get_edge_info(edge_name) is_bipartite = edge_info[MetaData.SRC_NODE_TYPE] != edge_info[MetaData.DST_NODE_TYPE] is_directed = edge_info[MetaData.DIRECTED] graph_type = snap.TNGraph if is_directed else snap.TUNGraph struct_data = feature_spec.get_structural_data(edge_name) if is_bipartite: num_src_nodes = feature_spec.get_node_info(edge_info[MetaData.SRC_NODE_TYPE])[MetaData.COUNT] num_dst_nodes = feature_spec.get_node_info(edge_info[MetaData.DST_NODE_TYPE])[MetaData.COUNT] num_nodes = num_src_nodes + num_dst_nodes else: num_nodes = feature_spec.get_node_info(edge_info[MetaData.SRC_NODE_TYPE])[MetaData.COUNT] snap_graph = graph_type.New(num_nodes, len(struct_data)) for i in range(num_nodes): snap_graph.AddNode(i) for e in struct_data: snap_graph.AddEdge(int(e[0]), int(e[1])) return Graph(_snap_graph=snap_graph, is_directed=is_directed, name=graph_name) @safeSNAP def edge_count(self): return self.snapGraph.GetEdges() @safeSNAP def node_count(self): return self.snapGraph.GetNodes() @safeSNAP def get_edges(self): return [ (EI.GetSrcNId(), EI.GetDstNId()) for EI in self.snapGraph.Edges() ]
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/analyzer/graph/graph.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # flake8: noqa from .graph import *
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/analyzer/graph/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time def timed(F, desc): def inner(*args, **kwargs): start = time.perf_counter() res = F(*args, **kwargs) elapsed = time.perf_counter() - start print(f'"{desc}" took {elapsed:.2f}s') return res return inner
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/analyzer/graph/utils.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from copy import deepcopy from operator import itemgetter import numpy as np from syngen.analyzer.graph.graph import safeSNAP def get_normalised_cdf(nodes, cdf_points=100, debug=False): unique_nodes, unique_nodes_counts = np.unique(nodes, return_counts=True) node_degree, node_degree_counts = np.unique( unique_nodes_counts, return_counts=True ) if debug: print( "unique_nodes,unique_nodes_counts", unique_nodes, unique_nodes_counts, ) print( "node_degree,node_degree_counts", node_degree, node_degree_counts ) node_degree_normalized = ( node_degree / node_degree[-1] ) # they are sorted, so [-1] is max node_degree_counts_normalized = node_degree_counts / np.sum( node_degree_counts ) # to have density if debug: print( "node_degree_normalized,node_degree_counts_normalized", node_degree_normalized, node_degree_counts_normalized, ) plt.plot(node_degree_normalized, node_degree_counts_normalized) plt.yscale("log") plt.xscale("log") plt.title("DD normalized log-log") plt.show() F = np.cumsum(node_degree_counts_normalized) cdf_points_for_F = (np.logspace(0, 1, num=cdf_points) - 1) / 9 F_normalized = np.zeros(shape=(cdf_points_for_F.shape[0], 2)) F_normalized[:, 0] = cdf_points_for_F for i, p in enumerate(cdf_points_for_F): matching_list = F[node_degree_normalized <= p] F_normalized[i, 1] = matching_list[-1] if len(matching_list) else 0.0 if debug: print("F_normalized", F_normalized) plt.plot(F_normalized[:, 0], F_normalized[:, 1]) plt.plot(node_degree_normalized, F) plt.yscale("log") plt.xscale("log") plt.title("Normalized CDF of DD normalized log-log ") plt.show() return F_normalized # Global stats @safeSNAP def get_global_stats(G, *args, **kwargs): is_directed = G.is_directed G = G.snapGraph num_nodes = G.GetNodes() num_edges = G.GetEdges() density = num_edges / ((num_nodes - 1) * num_nodes) if num_nodes > 1 else 0 if not is_directed: density = 2 * density average_degree = num_edges / num_nodes if num_nodes else 0 self_loops = G.CntSelfEdges() zero_degrees = num_nodes - G.CntNonZNodes() zero_in_degrees = len( [item.GetVal2() for item in G.GetNodeInDegV() if item.GetVal2() == 0] ) zero_out_degrees = len( [item.GetVal2() for item in G.GetNodeOutDegV() if item.GetVal2() == 0] ) uniq_bidirectional = G.CntUniqBiDirEdges() uniq_undirected = G.CntUniqUndirEdges() uniq_directed = G.CntUniqDirEdges() return { "Nodes": num_nodes, "Edges": num_edges, "Density": around(density, 4), "Average degree": around(average_degree, 2), "Zero deg nodes": zero_degrees, "Zero in deg nodes": zero_in_degrees, "Zero out deg nodes": zero_out_degrees, "Self loops": self_loops, "Bidirectional edges": uniq_bidirectional, "Unique undirected edges": uniq_undirected, "Unique directed edges": uniq_directed, } # Connectivity @safeSNAP def get_connectivity(G, *args, **kwargs): is_directed = G.is_directed G = G.snapGraph def get_stats(component_dist_snap): component_dist = [ (comp.GetVal1(), comp.GetVal2()) for comp in component_dist_snap ] if len(component_dist): largest_component = max(component_dist, key=itemgetter(0))[0] else: largest_component = 0 number_of_components = sum( num_component for size, num_component in component_dist ) percent = 100 * largest_component / G.GetNodes() return number_of_components, percent # Weakly connected components number_of_weak_components, percent_of_weak = get_stats(G.GetWccSzCnt()) is_weakly_connected = number_of_weak_components == 1 if is_directed: # Strongly connected components number_of_strong_components, percent_of_strong = get_stats( G.GetSccSzCnt() ) is_strongly_connected = number_of_strong_components == 1 result = { "Is strongly connected": is_strongly_connected, "Is weakly connected": is_weakly_connected, "Number of strongly connected components": number_of_strong_components, "Percent of nodes in largest strongly connected component": around( percent_of_strong ), "Number of weakly connected components": number_of_weak_components, "Percent of nodes in largest weakly connected component": around( percent_of_weak ), } else: result = { "Is connected": is_weakly_connected, "Number of connected components": number_of_weak_components, "Percent of nodes in largest component": around(percent_of_weak), } return result # Cluster coefficient and triangles @safeSNAP def get_transitivity(G, fast=True, *args, **kwargs): G = G.snapGraph results_dict = {} if fast: samples = min(G.GetNodes(), int(1e3)) results_dict["Clustering coefficient"] = G.GetClustCf(samples) else: cc, ct, op = G.GetClustCfAll()[0] results_dict = { "Clustering coefficient": cc, "Number of closed triangles": ct, "Number of open triangles": op, } return results_dict # Distances info @safeSNAP def get_path_stats(G, *args, **kwargs): is_directed = G.is_directed G = G.snapGraph # Only effective diameter if BFS will be too slow or not accurate # approx_eff_diam = G.GetAnfEffDiam() num_test_nodes = max(100, G.GetNodes() // 1000) approx_eff_diam, _, approx_diam, average_path_length = G.GetBfsEffDiamAll( num_test_nodes, is_directed ) return { "90% effective diameter": around(approx_eff_diam), "Approx. full diameter": approx_diam, "Average shortest path length": around(average_path_length), } # Degree similarity def get_dd_simmilarity_score(edges_original, edges_synthetic, cdf_points=1000): F_normalized_original = get_normalised_cdf( edges_original, cdf_points=cdf_points, debug=False ) F_normalized_synthetic = get_normalised_cdf( edges_synthetic, cdf_points=cdf_points, debug=False ) abs_F = np.abs(F_normalized_original[:, 1] - F_normalized_synthetic[:, 1]) where_non_zero = F_normalized_original[:, 1] != 0 error = np.average( np.divide( abs_F[where_non_zero], F_normalized_original[:, 1][where_non_zero] ) ) # average error of normalized CDFs error = min(error, 1) if error < 0: raise ValueError("Negative values in CDFs!") simmilarity_score = 1.0 - error return simmilarity_score def around(number, decimals=2): return np.around(number, decimals)
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/analyzer/graph/stats.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import similaritymeasures def get_normalised_cdf(nodes, cdf_points=100): unique_nodes, unique_nodes_counts = np.unique(nodes, return_counts=True) node_degree, node_degree_counts = np.unique( unique_nodes_counts, return_counts=True ) node_degree_normalized = ( node_degree / node_degree[-1] ) # they are sorted, so [-1] is max node_degree_counts_normalized = node_degree_counts / np.sum( node_degree_counts ) # to have density F = node_degree_counts_normalized cdf_points_for_F = np.array( F.shape[0] * (np.logspace(0, 1, num=cdf_points + 1) - 1) / 9, dtype=np.int32, ) F_normalized = np.zeros(shape=(cdf_points, 2)) F_normalized[:, 0] = node_degree_normalized[ np.array( (cdf_points_for_F[0:-1] + cdf_points_for_F[1:]) / 2, dtype=np.int32 ) ] for i in range(cdf_points_for_F.shape[0] - 1): beginning = cdf_points_for_F[i] end = cdf_points_for_F[i + 1] matching_list = F[beginning:end] F_normalized[i, 1] = np.mean(matching_list) F_normalized[i, 0] = ( node_degree_normalized[beginning] + ( node_degree_normalized[end - 1] - node_degree_normalized[beginning] ) / 2 ) return F_normalized def get_dd_plot2(data): out_dd, in_dd = list(zip(*data)) out_dd, in_dd = list(out_dd), list(in_dd) unique_nodes, unique_nodes_counts = np.unique(out_dd, return_counts=True) degree_counts = Counter(unique_nodes_counts) x_out, y_out = zip(*degree_counts.items()) unique_nodes, unique_nodes_counts = np.unique(in_dd, return_counts=True) degree_counts = Counter(unique_nodes_counts) x_in, y_in = zip(*degree_counts.items()) return (x_in, y_in), (x_out, y_out) def get_nan_indicies(*values): indicies = None for value in values: filtered = np.isnan(value) current_nan = filtered[:, 0] + filtered[:, 1] indicies = current_nan if indicies is None else indicies + current_nan return indicies def remove_nans(*values): indicies = get_nan_indicies(*values) return tuple(F[~indicies] for F in values) def get_frechet_score( edges_original, edges_to_compare, cdf_points=1000, log=True ): F1_normalized = get_normalised_cdf(edges_original, cdf_points=cdf_points) F2_normalized = get_normalised_cdf(edges_to_compare, cdf_points=cdf_points) F1, F2 = remove_nans(F1_normalized, F2_normalized) if log: F1 = np.log(F1) F2 = np.log(F2) score = similaritymeasures.frechet_dist(F1, F2) return score def get_frechet_score_normalized( edges_original, edges_to_compare, edges_normalize, cdf_points=1000, log=True, ): F1_normalized = get_normalised_cdf(edges_original, cdf_points=cdf_points) F2_normalized = get_normalised_cdf(edges_to_compare, cdf_points=cdf_points) F3_normalized = get_normalised_cdf(edges_normalize, cdf_points=cdf_points) F1, F2, F3 = remove_nans(F1_normalized, F2_normalized, F3_normalized) if log: F1 = np.log(F1) F2 = np.log(F2) F3 = np.log(F3) score = similaritymeasures.frechet_dist(F1, F2) worst_score = similaritymeasures.frechet_dist(F1, F3) eps = 1e-6 if worst_score < eps or score >= worst_score: normalized_score = 0 else: normalized_score = min(1 - score / worst_score, 1) return normalized_score def get_out_in_dd(edges): out_dd = edges[:, 0] in_dd = edges[:, 1] return out_dd, in_dd def get_frechet_score_directed( edges_original, edges_to_compare, cdf_points=1000, log=True ): original_out_dd, original_in_dd = get_out_in_dd(edges_original) compare_out_dd, compare_in_dd = get_out_in_dd(edges_to_compare) dd_score = get_frechet_score( edges_original, edges_to_compare, cdf_points, log ) out_dd_score = get_frechet_score( original_out_dd, compare_out_dd, cdf_points, log ) in_dd_score = get_frechet_score( original_in_dd, compare_in_dd, cdf_points, log ) return dd_score, out_dd_score, in_dd_score def get_frechet_score_directed_normalized( edges_original, edges_to_compare, edges_normalize, cdf_points=1000, log=True, ): original_out_dd, original_in_dd = get_out_in_dd(edges_original) compare_out_dd, compare_in_dd = get_out_in_dd(edges_to_compare) normalize_out_dd, normalize_in_dd = get_out_in_dd(edges_normalize) dd_normalized_score = get_frechet_score_normalized( edges_original, edges_to_compare, edges_normalize, cdf_points, log ) out_dd_normalized_score = get_frechet_score_normalized( original_out_dd, compare_out_dd, normalize_out_dd, cdf_points, log ) in_dd_normalized_score = get_frechet_score_normalized( original_in_dd, compare_in_dd, normalize_in_dd, cdf_points, log ) return dd_normalized_score, out_dd_normalized_score, in_dd_normalized_score
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/analyzer/graph/frechet.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import matplotlib.pyplot as plt import pandas as pd from syngen.analyzer.graph.plotting import ( plot_clustering_coef_distribution, plot_degree_distribution, plot_eigenvalue_histogram_distribution, plot_eigenvalue_rank_distribution, plot_hopplot, plot_in_degree_distribution, plot_leading_singular_vector_rank, plot_out_degree_distribution, plot_singular_value_histogram_distribution, plot_singular_value_rank_distribution, plot_strongly_connected_component_distribution, plot_weakly_connected_component_distribution, ) from syngen.analyzer.graph.stats import ( get_connectivity, get_global_stats, get_path_stats, get_transitivity, ) from syngen.analyzer.graph.utils import timed class AnalysisModule: @staticmethod def check_assertions(graphs): assert len(graphs), "Expected at least 1 graph" assert ( len(set([graph.is_directed for graph in graphs])) == 1 ), "All graphs have to be directed or undirected" @staticmethod def maybe_wrap_timer(f, timer, title): return timed(f, title) if timer else f def compare_graph_stats( self, *graphs, global_stats=True, connectivity=True, transitivity=True, path_stats=True, timer=False, fast=True, ): self.check_assertions(graphs) results = [] category_functions = [] if global_stats: category_functions.append(("Global stats", get_global_stats)) if connectivity: category_functions.append(("Connectivity", get_connectivity)) if transitivity: category_functions.append(("Transitivity", get_transitivity)) if path_stats: category_functions.append(("Path stats", get_path_stats)) for category, F in category_functions: start = time.perf_counter() stats = [F(G, fast=fast) for G in graphs] parsed = [ tuple( [category, statistic] + [graph_stats[statistic] for graph_stats in stats] ) for statistic in stats[0] ] results += parsed if timer: elapsed = time.perf_counter() - start print(f'Category "{category}" took {elapsed:.2f}s') names = [ graph.name if graph.name else f"G{i}" for i, graph in enumerate(graphs, 1) ] columns = ["Category", "Statistic"] + names return pd.DataFrame(results, columns=columns) def compare_graph_plots(self, *graphs, hop_plot_iters=128, timer=False): self.check_assertions(graphs) is_directed = graphs[0].is_directed if is_directed: fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2) ax1, ax2, ax3, ax4 = ax3, ax4, ax1, ax2 fig.set_size_inches(18, 6 * 2, forward=True) else: fig, (ax1, ax2) = plt.subplots(1, 2) fig.set_size_inches(18, 6, forward=True) pdd = self.maybe_wrap_timer( plot_degree_distribution, timer, "Degree distribution" ) pidd = self.maybe_wrap_timer( plot_in_degree_distribution, timer, "In degree distribution" ) podd = self.maybe_wrap_timer( plot_out_degree_distribution, timer, "Out degree distribution" ) ph = self.maybe_wrap_timer(plot_hopplot, timer, "Hop plot") if is_directed: pidd(ax3, *graphs) podd(ax4, *graphs) pdd(ax1, *graphs) ph(ax2, *graphs, hop_plot_iters=hop_plot_iters) return fig def compare_graph_dd(self, *graphs, timer=False): self.check_assertions(graphs) fig, ax1 = plt.subplots(1, 1) fig.set_size_inches(18.5, 10.5, forward=True) pdd = ( timed(plot_degree_distribution, "Degree distribution") if timer else plot_degree_distribution ) pdd(ax1, *graphs) return fig
DeepLearningExamples-master
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/analyzer/graph/analyser.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings import hydra warnings.filterwarnings("ignore") @hydra.main(config_path="conf/", config_name="converter_config") def main(cfg): print(cfg) cfg.deployment.config.checkpoint=cfg.checkpoint hydra.utils.call(cfg, _recursive_=False) if __name__ == "__main__": main()
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/launch_triton_configure.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from hydra import compose, initialize from hydra.core.global_hydra import GlobalHydra from hydra.core.hydra_config import HydraConfig from omegaconf import OmegaConf def get_config(config_name, config_path, override_list=None, return_hydra_config=False): GlobalHydra.instance().clear() initialize(config_path=config_path) cfg = compose(config_name, return_hydra_config=return_hydra_config, overrides=override_list) if return_hydra_config: HydraConfig().cfg = cfg OmegaConf.resolve(cfg) return cfg
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/hydra_utils.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings import hydra warnings.filterwarnings("ignore") @hydra.main(config_path="conf/", config_name="deployment_config") def main(cfg): print(cfg) cfg.deployment.config.checkpoint=cfg.checkpoint hydra.utils.call(cfg, _recursive_=False) if __name__ == "__main__": main()
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/launch_inference_server.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings import hydra warnings.filterwarnings("ignore") from inference.inference import run_inference @hydra.main(config_path="conf/", config_name="inference_config") def main(cfg): print(cfg) cfg.inference.config.checkpoint=cfg.checkpoint hydra.utils.call(cfg, _recursive_=False) if __name__ == "__main__": main()
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/launch_inference.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings import hydra warnings.filterwarnings("ignore") @hydra.main(config_path="conf/", config_name="preproc_config") def main(cfg): print(cfg) preprocessor = hydra.utils.instantiate(cfg, _recursive_=False) train, valid, test = preprocessor.preprocess() preprocessor.fit_scalers(train) train = preprocessor.apply_scalers(train) valid = preprocessor.apply_scalers(valid) test = preprocessor.apply_scalers(test) train = preprocessor.impute(train) valid = preprocessor.impute(valid) test = preprocessor.impute(test) preprocessor.save_state() preprocessor.save_datasets(train, valid, test) if __name__ == "__main__": main()
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/launch_preproc.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import random import torch import torch.distributed as dist from numba import cuda import warnings from dask.distributed import Client from dask_cuda import LocalCUDACluster from hydra.core.hydra_config import HydraConfig from joblib.externals.loky.backend.context import get_context def generate_seeds(rng, size): """ Generate list of random seeds :param rng: random number generator :param size: length of the returned list """ seeds = [rng.randint(0, 2 ** 32 - 1) for _ in range(size)] return seeds def broadcast_seeds(seeds, device): """ Broadcasts random seeds to all distributed workers. Returns list of random seeds (broadcasted from workers with rank 0). :param seeds: list of seeds (integers) :param device: torch.device """ if torch.distributed.is_available() and torch.distributed.is_initialized(): seeds_tensor = torch.LongTensor(seeds).to(device) torch.distributed.broadcast(seeds_tensor, 0) seeds = seeds_tensor.tolist() return seeds def setup_seeds(master_seed, epochs, device): """ Generates seeds from one master_seed. Function returns (worker_seeds, shuffling_seeds), worker_seeds are later used to initialize per-worker random number generators (mostly for dropouts), shuffling_seeds are for RNGs resposible for reshuffling the dataset before each epoch. Seeds are generated on worker with rank 0 and broadcasted to all other workers. :param master_seed: master RNG seed used to initialize other generators :param epochs: number of epochs :param device: torch.device (used for distributed.broadcast) """ if master_seed == -1: # random master seed, random.SystemRandom() uses /dev/urandom on Unix master_seed = random.SystemRandom().randint(0, 2 ** 32 - 1) if get_rank() == 0: # master seed is reported only from rank=0 worker, it's to avoid # confusion, seeds from rank=0 are later broadcasted to other # workers print(f"Using random master seed: {master_seed}") else: # master seed was specified from command line print(f"Using master seed from command line: {master_seed}") # initialize seeding RNG seeding_rng = random.Random(master_seed) # generate worker seeds, one seed for every distributed worker worker_seeds = generate_seeds(seeding_rng, get_world_size()) # generate seeds for data shuffling, one seed for every epoch shuffling_seeds = generate_seeds(seeding_rng, epochs) # broadcast seeds from rank=0 to other workers worker_seeds = broadcast_seeds(worker_seeds, device) shuffling_seeds = broadcast_seeds(shuffling_seeds, device) return worker_seeds, shuffling_seeds def get_world_size(): return int(os.environ.get("WORLD_SIZE", 1)) def reduce_tensor(tensor, num_gpus, average=False): if num_gpus > 1: rt = tensor.clone() dist.all_reduce(rt, op=dist.reduce_op.SUM) if average: if rt.is_floating_point(): rt = rt / num_gpus else: rt = rt // num_gpus return rt return tensor def init_distributed(): world_size = int(os.environ.get("WORLD_SIZE", 1)) local_rank = int(os.environ.get('LOCAL_RANK', 0)) if world_size > 1: dist.init_process_group(backend='nccl', init_method="env://") assert dist.is_initialized() torch.cuda.set_device(local_rank) torch.cuda.synchronize() def get_rank(): """ Gets distributed rank or returns zero if distributed is not initialized. """ if torch.distributed.is_available() and torch.distributed.is_initialized(): rank = torch.distributed.get_rank() else: rank = 0 return rank def is_main_process(): return get_rank() == 0 def init_parallel(): if is_parallel(): torch.cuda.set_device(HydraConfig.get().job.num % torch.cuda.device_count()) def is_parallel(): return HydraConfig.get().launcher.get('n_jobs', 0) > 1 or HydraConfig.get().sweeper.get('n_jobs', 0) > 1 def get_mp_context(): if HydraConfig.get().launcher.get('n_jobs', 0) > 1 or HydraConfig.get().sweeper.get('n_jobs', 0) > 1: return get_context('loky') return None def _pynvml_mem_size(kind="total", index=0): import pynvml pynvml.nvmlInit() size = None if kind == "free": size = int(pynvml.nvmlDeviceGetMemoryInfo(pynvml.nvmlDeviceGetHandleByIndex(index)).free) elif kind == "total": size = int(pynvml.nvmlDeviceGetMemoryInfo(pynvml.nvmlDeviceGetHandleByIndex(index)).total) else: raise ValueError("{0} not a supported option for device_mem_size.".format(kind)) pynvml.nvmlShutdown() return size def device_mem_size(kind="total"): if kind not in ["free", "total"]: raise ValueError("{0} not a supported option for device_mem_size.".format(kind)) try: if kind == "free": return int(cuda.current_context().get_memory_info()[0]) else: return int(cuda.current_context().get_memory_info()[1]) except NotImplementedError: if kind == "free": # Not using NVML "free" memory, because it will not include RMM-managed memory warnings.warn("get_memory_info is not supported. Using total device memory from NVML.") size = _pynvml_mem_size(kind="total", index=0) return size def get_rmm_size(size): return (size // 256) * 256 def calculate_frac(num_rows, num_feat, world_size): total_memory = world_size * device_mem_size(kind='total') mem_to_use = total_memory * 0.4 num_rows_to_use = mem_to_use / (num_feat * 6) print(num_rows_to_use) frac = min(num_rows_to_use / num_rows, 1.0) return frac def create_client(config): device_pool_frac = config.cluster.device_pool_frac device_size = device_mem_size(kind="total") device_pool_size = int(device_pool_frac * device_size) dask_space = "/tmp/dask_space/" protocol = config.cluster.protocol visible_devices = [i for i in range(config.cluster.world_size)] if protocol == "ucx": cluster = LocalCUDACluster( protocol=protocol, CUDA_VISIBLE_DEVICES=visible_devices, rmm_pool_size=get_rmm_size(device_pool_size), local_directory=dask_space, device_memory_limit=None, enable_tcp_over_ucx=True, enable_nvlink=True) else: cluster = LocalCUDACluster( protocol=protocol, CUDA_VISIBLE_DEVICES=visible_devices, rmm_pool_size=get_rmm_size(device_pool_size), local_directory=dask_space, device_memory_limit=None, ) client = Client(cluster) return client
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/distributed_utils.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn import torch.nn.functional as F class TSPP_criterion_wrapper(nn.Module): '''This wrapper unifies definition of forward function across different criterions''' def __init__(self, criterion, cl_start_horizon=None, cl_update=None): super().__init__() self.criterion = criterion self.base_arguments = set(criterion.forward.__code__.co_varnames) self.additional_arguments = {'weights'} self.allowed_arguments = self.base_arguments.union(self.additional_arguments) # Curriciulum learning assert bool(cl_start_horizon) == bool(cl_update), "Both cl_start_horizon and cl_update have to be set or unset at the same time" self.curr_horizon = cl_start_horizon self.horizon_update = cl_update self.cl_counter = 0 def forward(self, preds, labels, weights=None, **kwargs): disallowed_kwargs = set(kwargs.keys()) - self.allowed_arguments if disallowed_kwargs: raise TypeError(f'Invalid keyword arguments {disallowed_kwargs} for {type(self.criterion)}') kwargs = {name:arg for name, arg in kwargs.items() if name in self.base_arguments} if self.training: if self.curr_horizon: preds = preds[:, :self.curr_horizon] labels = labels[:, :self.curr_horizon] weights = weights[:, :self.curr_horizon] if weights is not None else None if (self.cl_counter + 1) % self.horizon_update == 0: self.curr_horizon += 1 self.cl_counter += 1 # We expect preds to be shaped batch_size x time x num_estimators in 3D case # or batch_size x time x num_targets x num_estimators in 4D case if len(preds.shape) == 4 and len(labels.shape) == 3: labels = labels.unsqueeze(-1) if weights is not None: weights = weights.unsqueeze(-1) loss = self.criterion(preds, labels, **kwargs) if weights is not None and weights.numel(): # Presence of weights is detected on config level. Loss is reduced accordingly loss *= weights loss = loss.view(-1, *loss.shape[2:]).mean(0) return loss class QuantileLoss(nn.Module): def __init__(self, quantiles, reduction='mean'): super().__init__() self.quantiles = quantiles self.reduce = reduction == 'mean' def forward(self, predictions, targets,weights=None): if not hasattr(self, 'q'): self.register_buffer('q', predictions.new(self.quantiles)) diff = predictions - targets losses = (1-self.q) * F.relu(diff) + self.q * F.relu(-diff) if self.reduce: losses = losses.view(-1, losses.shape[-1]).mean(0) return losses class GaussianLogLikelihood(nn.Module): def __init__(self, reduction='mean'): super().__init__() self.reduce = reduction == 'mean' def forward(self, predictions, targets): # Inputs with shape [BS, window, 2] (mean + std) # Targets with shape [BS, window, 1] mu = predictions[..., 0:1] sigma = predictions[..., 1:2] distribution = torch.distributions.normal.Normal(mu, sigma) likelihood = distribution.log_prob(targets) likelihood = -likelihood.view(targets.shape[0], targets.shape[1]) loss = torch.unsqueeze(likelihood,-1) if self.reduce: loss = loss.mean(0) return loss
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/criterion.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings import os import hydra from omegaconf import OmegaConf import torch import conf.conf_utils from distributed_utils import is_main_process, init_distributed, init_parallel from training.utils import set_seed, get_optimization_objectives from loggers.log_helper import log_parameters warnings.filterwarnings("ignore") @hydra.main(config_path="conf", config_name="train_config") def main(config): trainer_type = config.trainer._target_ set_seed(config.get("seed", None)) model = hydra.utils.instantiate(config.model) train, valid, test = hydra.utils.call(config.dataset) evaluator = hydra.utils.instantiate(config.evaluator, test_data=test) if 'CTLTrainer' in trainer_type: init_parallel() init_distributed() model = model.to(device=config.model.config.device) trainer = hydra.utils.instantiate( config.trainer, optimizer={'params': model.parameters()}, model=model, train_dataset=train, valid_dataset=valid, ) log_parameters(trainer.logger, config) trainer.train() if is_main_process(): checkpoint = torch.load("best_checkpoint.zip", map_location=evaluator.device) model.load_state_dict(checkpoint["model_state_dict"]) preds, labels, ids, weights = evaluator.predict(model) eval_metrics = evaluator.evaluate(preds, labels, ids, weights) trainer.logger.log(step=[], data=eval_metrics, verbosity=0) trainer.logger.flush() del train, valid, test, model, trainer torch.cuda.empty_cache() objectives = get_optimization_objectives(config, eval_metrics) return objectives elif 'XGBTrainer' in trainer_type or "StatTrainer" in trainer_type: del config.trainer.criterion trainer = hydra.utils.instantiate( config.trainer, model=model, train_dataset=train, valid_dataset=valid, ) trainer.train() preds, labels, ids, weights = evaluator.predict(model) eval_metrics = evaluator.evaluate(preds, labels, ids, weights) trainer.logger.log(step=[], data=eval_metrics, verbosity=0) objectives = get_optimization_objectives(config, eval_metrics) return objectives else: raise AttributeError(f"Not supported Trainer provided {trainer_type}") if __name__ == "__main__": main()
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/launch_training.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import atexit import time from collections import OrderedDict from threading import Thread from queue import Queue from functools import partial from typing import Callable from torch.utils.tensorboard import SummaryWriter from dllogger import Backend from distributed_utils import is_parallel class AverageMeter: def __init__(self): self.reset() def reset(self): self.updated = False self.avg = 0 self.sum = 0 self.count = 0 def update(self, value): self.updated = True if isinstance(value, (tuple, list)): val = value[0] n = value[1] else: val = value n = 1 self.sum += val * n self.count += n self.avg = self.sum / self.count @property def value(self): return self.avg class PerformanceMeter: def __init__(self): self.reset() def reset(self): self.updated = False self.start = time.time() self.n = 0 def update(self, val=1): self.updated = True self.n += val @property def value(self): return self.n / self.elapsed_time @property def elapsed_time(self): return time.time() - self.start class AggregatorBackend(Backend): def __init__(self, verbosity, agg_dict): super().__init__(verbosity=verbosity) self.metrics = OrderedDict({k: v() for k, v in agg_dict.items()}) self.metrics.flushed = True self.step = 0 self.epoch = 0 self.start_time = time.time() @property def log_level(self): return self._log_level def metadata(self, timestamp, elapsedtime, metric, metadata): pass def _reset_perf_meter(self, name): for agg in self.metrics[name]: if isinstance(agg, PerformanceMeter): agg.reset() def reset_perf_meters(self): # This method allows us to reset performance metrics in case we want to # exclude couple first iterations from performance measurement for name in self.metrics.keys(): self._reset_perf_meter(name) def log(self, timestamp, elapsedtime, step, data): self.step = step if self.step == []: self.metrics.flushed = True if "epoch" in data.keys(): self.epoch = data["epoch"] for k, v in data.items(): if k not in self.metrics.keys(): continue self.metrics.flushed = False self.metrics[k].update(v) def flush(self): if self.metrics.flushed: return result_string = "Epoch {} | step {} |".format(self.epoch, self.step) for name, agg in self.metrics.items(): if not agg.updated: continue if isinstance(agg, AverageMeter): _name = "avg " + name elif isinstance(agg, PerformanceMeter): _name = name + "/s" result_string += _name + " {:.3f} |".format(agg.value) agg.reset() result_string += "walltime {:.3f} |".format(time.time() - self.start_time) self.metrics.flushed = True print(result_string) class TensorBoardBackend(Backend): def __init__(self, verbosity, log_dir='.'): super().__init__(verbosity=verbosity) self.summary_writer = SummaryWriter(log_dir=os.path.join(log_dir, "TB_summary"), flush_secs=120, max_queue=200) atexit.register(self.summary_writer.close) @property def log_level(self): return self._log_level def metadata(self, timestamp, elapsedtime, metric, metadata): pass def log(self, timestamp, elapsedtime, step, data): if not isinstance(step, int): return for k, v in data.items(): self.summary_writer.add_scalar(k, v, step) def flush(self): pass
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/loggers/backends.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import json import pandas as pd import dllogger from dllogger import JSONStreamBackend, Logger, StdOutBackend from .backends import AggregatorBackend, TensorBoardBackend, AverageMeter from omegaconf import OmegaConf from distributed_utils import is_main_process def jsonlog_2_df(path, keys): with open(path, 'r') as f: log = [json.loads(l[4:]) for l in f.readlines()] log = [l for l in log if l['type'] == 'LOG' and isinstance(l['step'], (int, list))] assert log[-1]['step'] == [], "Logfile is corrupted" log[-1]['step']=log[-2]['step'] # Every log ends with step == [] log = [ { **{k:v for k,v in l.items() if not isinstance(v, dict)}, **(l['data'] if 'data' in l else {}), 'timestamp':float(l['timestamp'])*1000 } for l in log ] log = [{k:v for k,v in l.items() if k in keys} for l in log] df = pd.DataFrame(log) df = df.groupby('step').mean() return df def empty_step_format(step): return "" def empty_prefix_format(timestamp): return "" def no_string_metric_format(metric, metadata, value): unit = metadata["unit"] if "unit" in metadata.keys() else "" format = "{" + metadata["format"] + "}" if "format" in metadata.keys() else "{}" if metric == "String": return "{} {}".format(format.format(value) if value is not None else value, unit) return "{} : {} {}".format(metric, format.format(value) if value is not None else value, unit) def setup_logger(config, resume_training=False): log_filename = config.get("log_filename", "log.json") if is_main_process(): backends = [ TensorBoardBackend(verbosity=dllogger.Verbosity.VERBOSE), JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE, filename=log_filename, append=True), AggregatorBackend(verbosity=dllogger.Verbosity.VERBOSE, agg_dict={"loss": AverageMeter}), StdOutBackend( verbosity=dllogger.Verbosity.DEFAULT, step_format=empty_step_format, metric_format=no_string_metric_format, prefix_format=empty_prefix_format, ), ] logger = Logger(backends=backends) else: logger = Logger(backends=[]) container_setup_info = get_framework_env_vars() logger.log(step="PARAMETER", data=container_setup_info, verbosity=dllogger.Verbosity.VERBOSE) if not resume_training: logger.metadata("loss", {"unit": "nat", "GOAL": "MINIMIZE", "STAGE": "TRAIN"}) logger.metadata("val_loss", {"unit": "nat", "GOAL": "MINIMIZE", "STAGE": "VAL"}) return logger def restart_logger(config, logger): """An utility function to nealty close every backend holding resources""" for b in logger.backends: if hasattr(b, 'close'): b.close() return setup_logger(config, resume_training=True) def log_parameters(logger, config): model_config = flatten_config(config.model) trainer_config = flatten_config(config.trainer) additional_fields = {'seed': config.seed} logger.log(step="PARAMETER", data={**model_config, **trainer_config, **additional_fields}, verbosity=dllogger.Verbosity.VERBOSE) def flatten_config(config): config = OmegaConf.to_container(config, resolve=True) if '_target_' in config: del config['_target_'] if 'config' in config: c = config['config'] config = {**c, **config} del config['config'] config = pd.json_normalize(config, sep='.') config = config.to_dict(orient='records')[0] return config def get_framework_env_vars(): return { "NVIDIA_PYTORCH_VERSION": os.environ.get("NVIDIA_PYTORCH_VERSION"), "PYTORCH_VERSION": os.environ.get("PYTORCH_VERSION"), "CUBLAS_VERSION": os.environ.get("CUBLAS_VERSION"), "NCCL_VERSION": os.environ.get("NCCL_VERSION"), "CUDA_DRIVER_VERSION": os.environ.get("CUDA_DRIVER_VERSION"), "CUDNN_VERSION": os.environ.get("CUDNN_VERSION"), "CUDA_VERSION": os.environ.get("CUDA_VERSION"), "NVIDIA_PIPELINE_ID": os.environ.get("NVIDIA_PIPELINE_ID"), "NVIDIA_BUILD_ID": os.environ.get("NVIDIA_BUILD_ID"), "NVIDIA_TF32_OVERRIDE": os.environ.get("NVIDIA_TF32_OVERRIDE"), }
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/loggers/log_helper.py
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/distributed_launcher/__init__.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved from setuptools import find_namespace_packages, setup setup( name="hydra-torch-dist-launcher", version="0.1", author="Jan Baczek", author_email="[email protected]", description="Torch distributed launcher plugin", packages=find_namespace_packages(include=["hydra_plugins.*"]), classifiers=[ "License :: OSI Approved :: MIT Apache License, Version 2.0", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", ], install_requires=["hydra-core==1.1.1" ], include_package_data=True, )
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/distributed_launcher/setup.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved from typing import Optional, Sequence from hydra.types import HydraContext from hydra.core.utils import JobReturn from hydra.plugins.launcher import Launcher from hydra.types import TaskFunction from omegaconf import DictConfig class TorchDistributedLauncher(Launcher): def __init__(self, **kwargs) -> None: self.config: Optional[DictConfig] = None self.task_function: Optional[TaskFunction] = None self.hydra_context: Optional[HydraContext] = None def setup( self, *, hydra_context: HydraContext, task_function: TaskFunction, config: DictConfig, ) -> None: from . import _core return _core.setup(launcher=self, hydra_context=hydra_context, task_function=task_function, config=config) def launch( self, job_overrides: Sequence[Sequence[str]], initial_job_idx: int ) -> Sequence[JobReturn]: from . import _core return _core.launch(launcher=self, job_overrides=job_overrides, initial_job_idx=initial_job_idx)
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/distributed_launcher/hydra_plugins/distributed_launcher.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved from dataclasses import dataclass from hydra.core.config_store import ConfigStore @dataclass class LauncherConfig: _target_: str = ( "hydra_plugins.distributed_launcher.TorchDistributedLauncher" ) min_nodes: int = 1 max_nodes: int = 1 nproc_per_node: int = 8 rdzv_id: str = 'none' role: str = 'default' rdzv_endpoint: str = '127.0.0.1:29500' rdzv_backend: str = 'static' rdzv_timeout: int = -1 max_restarts: int = 0 monitor_interval: int = 5 log_dir = None redirects: str = '0' tee: str = '0' ConfigStore.instance().store( group="hydra/launcher", name="torchrun", node=LauncherConfig )
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/distributed_launcher/hydra_plugins/config.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved import logging from functools import partial from pathlib import Path from typing import Sequence from omegaconf import DictConfig, open_dict from hydra.types import HydraContext from hydra.core.singleton import Singleton from hydra.core.hydra_config import HydraConfig from hydra.types import TaskFunction from hydra.core.utils import ( JobReturn, configure_log, filter_overrides, run_job, setup_globals, env_override, ) from torch.distributed.launcher.api import LaunchConfig, launch_agent from torch.distributed.elastic.multiprocessing import Std from .distributed_launcher import TorchDistributedLauncher log = logging.getLogger(__name__) def setup( launcher: TorchDistributedLauncher, *, hydra_context: HydraContext, task_function: TaskFunction, config: DictConfig, ) -> None: launcher.config = config launcher.hydra_context = hydra_context launcher.task_function = task_function c = config.hydra.launcher launcher.launch_config = LaunchConfig( min_nodes=c.min_nodes, max_nodes=c.max_nodes, nproc_per_node=c.nproc_per_node, run_id=c.rdzv_id, role=c.role, rdzv_endpoint=c.rdzv_endpoint, rdzv_backend=c.rdzv_backend, rdzv_configs={'rank': 0}, max_restarts=c.max_restarts, monitor_interval=c.monitor_interval, start_method='fork', # Works only with fork. Spawn and forkserver require pickling which does't work inside wrapped function redirects=Std.from_str(c.redirects), tee=Std.from_str(c.tee), log_dir=c.get('log_dir'), ) def launch( launcher: TorchDistributedLauncher, job_overrides: Sequence[Sequence[str]], initial_job_idx: int ) -> Sequence[JobReturn]: """ :param job_overrides: a List of List<String>, where each inner list is the arguments for one job run. :param initial_job_idx: Initial job idx in batch. :return: an array of return values from run_job with indexes corresponding to the input list indexes. """ setup_globals() assert launcher.config is not None assert launcher.hydra_context is not None assert launcher.task_function is not None configure_log(launcher.config.hydra.hydra_logging, launcher.config.hydra.verbose) sweep_dir = Path(str(launcher.config.hydra.sweep.dir)) sweep_dir.mkdir(parents=True, exist_ok=True) runs = [] for idx, overrides in enumerate(job_overrides): idx = initial_job_idx + idx lst = " ".join(filter_overrides(overrides)) log.info(f"\t#{idx} : {lst}") sweep_config = launcher.hydra_context.config_loader.load_sweep_config( launcher.config, list(overrides) ) with open_dict(sweep_config): # This typically coming from the underlying scheduler (SLURM_JOB_ID for instance) # In that case, it will not be available here because we are still in the main process. # but instead should be populated remotely before calling the task_function. sweep_config.hydra.job.id = f"job_id_for_{idx}" sweep_config.hydra.job.num = idx HydraConfig.instance().set_config(sweep_config) launcher.singleton_state = Singleton.get_state() def _task_function(task_function, singleton_state, task_cfg): return launch_agent(launcher.launch_config, wrapped_task_function, [task_function, launcher.singleton_state, task_cfg] ) _task_function = partial(_task_function, launcher.task_function, launcher.singleton_state) ret = run_job( hydra_context=launcher.hydra_context, task_function=_task_function, config=sweep_config, job_dir_key="hydra.sweep.dir", job_subdir_key="hydra.sweep.subdir", ) # We assume that main process has rank 0 ret.return_value = ret.return_value[0] runs.append(ret) configure_log(launcher.config.hydra.hydra_logging, launcher.config.hydra.verbose) return runs def wrapped_task_function(task_function, singleton_state, task_cfg): Singleton.set_state(singleton_state) env_set = HydraConfig.instance().cfg.hydra.job.env_set with env_override(env_set): ret = task_function(task_cfg) return ret
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/distributed_launcher/hydra_plugins/_core.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import dllogger from callbacks.callbacks import Callback, CallbackContainer from distributed_utils import is_main_process from training.utils import round_dict from training.checkpoint_utils import save_checkpoint class CTLCallbackContainer(CallbackContainer): """ Base class for CTLTrainer callbacks storage. """ def __init__(self, trainer, callbacks): self.callbacks = callbacks self.trainer = trainer self._init_trainers() self.logs = {} super().__init__() def _init_trainers(self): for callback in self.callbacks: callback.trainer = self.trainer def on_train_begin(self, logs=None): if logs is None: logs = {} for callback in self.callbacks: callback.on_train_begin(logs) def on_train_end(self, logs=None): if logs is None: logs = {} for callback in self.callbacks: callback.on_train_end(logs) def on_epoch_begin(self, epoch, logs=None): if logs is None: logs = {} for callback in self.callbacks: callback.on_epoch_begin(epoch, logs) def on_epoch_end(self, epoch, logs=None): if logs is None: logs = {} for callback in self.callbacks: callback.on_epoch_end(epoch, logs) def on_valid_begin(self, epoch, logs=None): if logs is None: logs = {} for callback in self.callbacks: callback.on_valid_begin(epoch, logs) def on_valid_end(self, epoch, logs=None): if logs is None: logs = {} for callback in self.callbacks: callback.on_valid_end(epoch, logs) def on_batch_begin(self, batch, logs=None): if logs is None: logs = {} for callback in self.callbacks: callback.on_batch_begin(batch, logs) def on_batch_end(self, batch, logs=None): if logs is None: logs = {} for callback in self.callbacks: callback.on_batch_end(batch, logs) def on_evaluate_end(self, logs=None): if logs is None: logs = {} for callback in self.callbacks: callback.on_evaluate_end(logs) def on_evaluate_begin(self, logs=None): if logs is None: logs = {} for callback in self.callbacks: callback.on_evaluate_begin(logs) class CTLCallback(Callback): """ Base class for building new CTLTrainer callbacks. """ def __init__(self): self.trainer = None super().__init__() @property def trainer(self): return self._trainer @trainer.setter def trainer(self, trainer): self._trainer = trainer def on_train_begin(self, logs=None): pass def on_train_end(self, logs=None): pass def on_epoch_begin(self, epoch, logs=None): pass def on_epoch_end(self, epoch, logs=None): pass def on_valid_begin(self, epoch, logs=None): pass def on_valid_end(self, epoch, logs=None): pass def on_batch_begin(self, batch, logs=None): pass def on_batch_end(self, batch, logs=None): pass def on_evaluate_begin(self, logs=None): pass def on_evaluate_end(self, logs=None): pass class LoggingCallback(CTLCallback): def on_train_begin(self, logs=None): self.trainer.logger.log( step='event', data={"String": "Training with {} epochs".format(self.trainer.config.get("num_epochs", 1))}, verbosity=dllogger.Verbosity.DEFAULT, ) def on_train_end(self, logs=None): self.trainer.logger.log(step='event', data={"String": "Training Stopped"}, verbosity=dllogger.Verbosity.DEFAULT) def on_epoch_begin(self, epoch, logs=None): self.trainer.logger.log(step='event', data={"String": "Epoch {}".format(epoch)}, verbosity=dllogger.Verbosity.DEFAULT) def on_batch_end(self, batch, logs=None): if self.trainer.config.log_interval > 0 and self.trainer.global_step % self.trainer.config.log_interval == 0: self.trainer.logger.flush() def on_valid_begin(self, epoch, logs=None): self.trainer.logger.log( step='event', data={"String": "Calculating Validation Metrics"}, verbosity=dllogger.Verbosity.DEFAULT ) def on_valid_end(self, epoch, logs=None): self.trainer.logger.log( step='event', data={"String": "Epoch {} Validation Metrics: {}".format(epoch, round_dict(logs))}, verbosity=dllogger.Verbosity.DEFAULT, ) def on_epoch_end(self, epoch, logs=None): self.trainer.logger.flush() def on_evaluate_begin(self, logs=None): self.trainer.logger.log( step='event', data={"String": "Beginning Metric Evaluation"}, verbosity=dllogger.Verbosity.DEFAULT ) def on_evaluate_end(self, logs=None): self.trainer.logger.log( step='event', data={"String": "Evaluation Metrics: {}".format(round_dict(logs))}, verbosity=dllogger.Verbosity.DEFAULT ) self.trainer.logger.log(step=[], data=logs, verbosity=dllogger.Verbosity.DEFAULT) class EarlyStopping(CTLCallback): def __init__(self, metric="val_loss", min_delta=0, patience=5, max_divergence=None, divergence_patience=1): self.metric = metric self.min_delta = min_delta self.patience = patience self.max_divergence = max_divergence self.divergence_patience = divergence_patience self.divergence_stopped_epochs = 0 self.stopped_epochs = 0 self.best_loss = None super().__init__() def on_epoch_end(self, epoch, logs=None): epoch_loss = logs.get(self.metric, None) if epoch_loss is None: return if self.best_loss is None: self.best_loss = epoch_loss return if self.max_divergence and ((epoch_loss - self.best_loss) > self.max_divergence): self.divergence_stopped_epochs += 1 self.stopped_epochs += 1 if self.divergence_stopped_epochs >= self.divergence_patience: self.trainer._stop_training = True self.trainer.logger.log( step='event', data={"String": f"Applying early stopping as divergence threshold reached"}, verbosity=dllogger.Verbosity.DEFAULT ) elif (epoch_loss + self.min_delta) < self.best_loss: self.best_loss = epoch_loss self.stopped_epochs = 0 self.divergence_stopped_epochs = 0 else: self.stopped_epochs += 1 self.divergence_stopped_epochs = 0 if self.stopped_epochs >= self.patience: self.trainer._stop_training = True self.trainer.logger.log( step='event', data={"String": f"Applying early stopping"}, verbosity=dllogger.Verbosity.DEFAULT ) class SaveBestCheckpoint(CTLCallback): def __init__(self, metric="val_loss"): self.metric = metric self.best_loss = None super().__init__() def on_epoch_end(self, epoch, logs=None): epoch_loss = logs.get(self.metric, None) if epoch_loss is None: return if self.best_loss is None or epoch_loss < self.best_loss: self.best_loss = epoch_loss if is_main_process(): save_checkpoint(self.trainer, checkpoint_dir=self.trainer.log_path, filename="best_checkpoint.zip") class SaveCheckpoint(CTLCallback): def __init__(self): super().__init__() def on_epoch_end(self, epoch, logs=None): if is_main_process(): save_checkpoint(self.trainer, checkpoint_dir=self.trainer.log_path, filename="last_checkpoint.zip") class MeanAccumulator: def __init__(self): self.sum = 0 self.count = 0 def consume(self, value): self.sum += value self.count += 1 @property def value(self): if self.count == 0: return 0 return self.sum / self.count class ThroughputBenchmark(CTLCallback): def __init__(self, warmup_epochs=0): self.warmup_epochs = warmup_epochs self.train_throughput = MeanAccumulator() self.valid_throughput = MeanAccumulator() self.epoch_train_start = None self.epoch_train_end = None super().__init__() def on_train_end(self, logs=None): if self.train_throughput.value > 0: logs["Train it/s"] = self.train_throughput.value logs["Valid it/s"] = self.valid_throughput.value def on_epoch_begin(self, epoch, logs=None): self.epoch_train_start = time.time() def on_valid_end(self, epoch, logs=None): if epoch >= self.warmup_epochs: train_epoch_time = self.epoch_train_end - self.epoch_train_start valid_epoch_time = time.time() - self.epoch_train_end train_iter_per_sec = self.trainer.train_dataset_len / train_epoch_time valid_iter_per_sec = self.trainer.valid_dataset_len / valid_epoch_time logs["Train epoch it/s"] = train_iter_per_sec logs["Valid epoch it/s"] = valid_iter_per_sec self.train_throughput.consume(train_iter_per_sec) self.valid_throughput.consume(valid_iter_per_sec) def on_valid_begin(self, batch, logs=None): self.epoch_train_end = time.time()
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/callbacks/ctl_callbacks.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pandas as pd from omegaconf import OmegaConf from hydra.experimental.callback import Callback from loggers.log_helper import jsonlog_2_df class MergeLogs(Callback): def on_multirun_end(self, config, **kwargs): OmegaConf.resolve(config) ALLOWED_KEYS=['timestamp', 'elapsed_time', 'step', 'loss', 'val_loss', 'MAE', 'MSE', 'RMSE', 'P50', 'P90'] dfs = [] for p, sub_dirs, files in os.walk(config.hydra.sweep.dir): if 'log.json' in files: path = os.path.join(p, 'log.json') df = jsonlog_2_df(path, ALLOWED_KEYS) dfs.append(df) # Transpose dataframes plots = {} for c in dfs[0].columns: joint_plots = pd.DataFrame({i : df[c] for i, df in enumerate(dfs)}) metrics = {} metrics['mean'] = joint_plots.mean(axis=1) metrics['std'] = joint_plots.std(axis=1) metrics['mean_m_std'] = metrics['mean'] - metrics['std'] metrics['mean_p_std'] = metrics['mean'] + metrics['std'] metrics_df = pd.DataFrame(metrics) plots[c] = metrics_df[~metrics_df.isna().all(axis=1)] # Drop rows which contain only NaNs timestamps = plots.pop('timestamp')['mean'] timestamps = (timestamps * 1000).astype(int) if not timestamps.is_monotonic: raise ValueError('Timestamps are not monotonic')
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/callbacks/hydra_callbacks.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class Callback(object): """ Base class for building new callbacks. """ def __init__(self): pass class CallbackContainer(object): """ Base class for callbacks storage. """ def __init__(self): pass
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/callbacks/callbacks.py
# Copyright 2021-2022 NVIDIA Corporation # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright 2019 Ross Wightman # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Exponential Moving Average (EMA) of model updates """ import logging from copy import deepcopy import torch import torch.nn as nn _logger = logging.getLogger(__name__) class ModelEmaV2(nn.Module): """ Model Exponential Moving Average V2 Keep a moving average of everything in the model state_dict (parameters and buffers). V2 of this module is simpler, it does not match params/buffers based on name but simply iterates in order. It works with torchscript (JIT of full model). """ def __init__(self, model, decay=0.999, device=None): super(ModelEmaV2, self).__init__() # make a copy of the model for accumulating moving average of weights self.module = deepcopy(model) self.module.eval() self.decay = decay self.device = device # perform ema on different device from model if set if self.device is not None: self.module.to(device=device) def update(self, model): update_fn = lambda ema_v, model_v: self.decay * ema_v + (1.0 - self.decay) * model_v with torch.no_grad(): for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): if self.device is not None: model_v = model_v.to(device=self.device) ema_v.copy_(update_fn(ema_v, model_v)) def set(self, model): with torch.no_grad(): for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): if self.device is not None: model_v = model_v.to(device=self.device) ema_v.copy_(model_v) def forward(self, x): return self.module(x)
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/training/ema.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dgl import torch import numpy as np def round_dict(input_data, decimal=4): rounded_data = { key: (np.around(value, decimal) if isinstance(value, (np.floating, float)) else value) for key, value in input_data.items() } return rounded_data def to_device(batch, device=None): if isinstance(batch, torch.Tensor): return batch.to(device=device) if isinstance(batch, dict): return {k: t.to(device=device) if t.numel() else None for k, t in batch.items()} if isinstance(batch, dgl.DGLGraph): return batch.to(device=device) elif batch is None: return None def set_seed(seed): if seed is None: return if not isinstance(seed, int): raise ValueError(f"Seed has to be an integer or None, but got type {type(seed)}") np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) def get_optimization_objectives(config, metrics): objectives = tuple(v if v == v else float('inf') for k,v in metrics.items() if k in config.get('optuna_objectives', [])) if len(objectives) == 1: return objectives[0] elif not objectives: return None return objectives
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/training/utils.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import json import shutil import dllogger import torch from torch.nn.parallel import DistributedDataParallel as DDP from hydra.utils import get_original_cwd from omegaconf import OmegaConf from loggers.log_helper import restart_logger def save_checkpoint(trainer, filename="checkpoint.zip", checkpoint_dir="."): if trainer.ema: module_to_save = trainer.ema.module elif isinstance(trainer.model, DDP): module_to_save = trainer.model.module else: module_to_save = trainer.model state = { "epoch": trainer.epoch + 1, "global_step": trainer.global_step, "model_state_dict": module_to_save.state_dict(), "optimizer_state_dict": trainer.optimizer.state_dict(), } checkpoint_path = os.path.join(checkpoint_dir, filename) trainer.logger.log(step='event', data={"String": f"Saving checkpoint to {filename}"}, verbosity=dllogger.Verbosity.DEFAULT) torch.save(state, checkpoint_path) def maybe_restore_checkpoint(trainer, checkpoint_path): if checkpoint_path and os.path.isfile(checkpoint_path): trainer.logger.log( step='event', data={"String": f"Restoring checkpoint from {checkpoint_path}"}, verbosity=dllogger.Verbosity.DEFAULT ) checkpoint = torch.load(checkpoint_path, map_location=trainer.device) trainer.model.load_state_dict(checkpoint["model_state_dict"]) trainer.optimizer.load_state_dict(checkpoint["optimizer_state_dict"]) trainer.global_step = checkpoint["global_step"] trainer.epoch = checkpoint["epoch"] def trim_json_log(log_path): """ Loads dllogger's json log and returns its lines without unfinished epochs. Does not modify the logfile """ if os.path.isfile(log_path): with open(log_path, 'r') as f: lines = f.readlines() # In case log file is newly created if not lines: return lines for i, l in enumerate(reversed(lines)): d = json.loads(l[4:]) if d.get('step') == []: return lines if 'data' in d and 'String' in d['data'] and 'Epoch' in d['data']['String']: break lines = lines[:-i-1] return lines return [] def detect_duplicated_run(): """ Returns list of paths of the runs with the same config as provided """ # This is meant to be called in a trainer class, which means that this doesn't have access to the top level config current_config = OmegaConf.load('.hydra/config.yaml') rel = os.path.relpath(os.getcwd(), get_original_cwd()) rel = next(x for x in rel.split(os.path.sep)) result_dir = os.path.join(get_original_cwd(), rel) duplicated = [] for p, s, f in os.walk(result_dir): if '.hydra' in s: c = OmegaConf.load(os.path.join(p, '.hydra/config.yaml')) if hash(c) == hash(current_config): duplicated.append(p) # Don't take into account runs that ended before any checkpoint had been saved # or current run (at this point hydra's config has already been saved) duplicated = [p for p in duplicated if os.path.exists(os.path.join(p,'last_checkpoint.zip'))] return duplicated def get_most_advanced_run(paths, logfile_name): adv = 0 path = '' for p in paths: log_path = os.path.join(p, logfile_name) log_lines = trim_json_log(log_path) if len(log_lines) > adv: adv = len(log_lines) path = p return path def maybe_continue_run(trainer): duplicates = detect_duplicated_run() if not duplicates: return logfile_name = trainer.config.get('logfile_name', 'log.json') unfinished_run_path = get_most_advanced_run(duplicates, logfile_name) checkpoint_path = os.path.join(unfinished_run_path, 'last_checkpoint.zip') best_checkpoint_path = os.path.join(unfinished_run_path, 'best_checkpoint.zip') maybe_restore_checkpoint(trainer, checkpoint_path) log_lines = trim_json_log(os.path.join(unfinished_run_path, logfile_name)) # Reinitialize the logger. This will cause it to append to the copied log file. with open(logfile_name, 'w') as f: f.writelines(log_lines) trainer.logger = restart_logger(trainer.config, trainer.logger) trainer.logger.log( step='event', data={"String": f"Resuming run: {unfinished_run_path}"}, verbosity=dllogger.Verbosity.DEFAULT ) shutil.copyfile(checkpoint_path, 'last_checkpoint.zip') shutil.copyfile(best_checkpoint_path, 'best_checkpoint.zip')
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/training/checkpoint_utils.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from abc import ABC import dgl import dllogger import hydra import numpy as np import torch import torch.nn as nn import importlib try: from apex import amp except ImportError: print("Nvidia apex not available. Can't use apex Automatic Mixed Precision (AMP) for training.\ Please check: https://github.com/NVIDIA/apex for installation") from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.data import DataLoader, DistributedSampler from callbacks.ctl_callbacks import CTLCallbackContainer from data.datasets import TSBaseDataset, get_collate_fn from distributed_utils import reduce_tensor, get_mp_context from loggers.log_helper import setup_logger from training.ema import ModelEmaV2 from criterion import TSPP_criterion_wrapper from training.checkpoint_utils import maybe_continue_run from training.utils import to_device class Trainer(ABC): def train(self): return class CTLTrainer(Trainer): def __init__( self, model: nn.Module, train_dataset: TSBaseDataset, valid_dataset: TSBaseDataset, optimizer, criterion, callbacks, config, ): self.config = config self._stop_training = False self.metrics = {} callbacks = callbacks.values() self.callbacks = CTLCallbackContainer(self, callbacks) self.world_size = int(os.environ.get('WORLD_SIZE', 1)) self.local_rank = int(os.environ.get("LOCAL_RANK", 0)) self.device = next(model.parameters()).device self.valid_dataset_len = len(valid_dataset) self.train_dataset_len = len(train_dataset) self.train_sampler = None self.valid_sampler = None self.example_length = config.example_length self.encoder_length = config.encoder_length if self.world_size > 1: # XXX: is the seed argument here needed for reproducibility? # It should be set in launch_training.py with other seeds self.train_sampler = DistributedSampler( train_dataset, self.world_size, seed=config.get("seed", 1), drop_last=True ) self.valid_sampler = DistributedSampler( valid_dataset, self.world_size, seed=config.get("seed", 1), drop_last=False ) self.logger = setup_logger(self.config) self.optimizer = optimizer self.amp_enabled = self.config.get("amp", False) if not importlib.util.find_spec("apex"): self.amp_enabled = False self.model = model self.global_step = 0 self.epoch = 0 if not self.config.get('force_rerun'): maybe_continue_run(self) if config.get("ema", False): self.ema = ModelEmaV2(model, decay=self.config.get('ema_decay', 0.999), device=self.device) else: self.ema = None if self.amp_enabled: self.model, self.optimizer = amp.initialize(self.model, self.optimizer, opt_level="O2", loss_scale="dynamic") if self.world_size > 1: self.model = DDP(self.model, device_ids=[self.local_rank], output_device=self.local_rank, find_unused_parameters=True) mp_context = get_mp_context() self.train_dataloader = DataLoader( train_dataset, batch_size=self.config.batch_size, num_workers=self.config.num_workers, sampler=self.train_sampler, shuffle=True if self.train_sampler is None else False, pin_memory=True, collate_fn=get_collate_fn(config.model_type, config.encoder_length), multiprocessing_context=mp_context ) self.valid_dataloader = DataLoader( valid_dataset, batch_size=self.config.batch_size, num_workers=self.config.num_workers, sampler=self.valid_sampler, pin_memory=True, collate_fn=get_collate_fn(config.model_type, config.encoder_length), multiprocessing_context=mp_context ) # TODO: make it reccursively instantiated if self.config.get("scheduler", None): self.config.scheduler._target_ = self.config.scheduler.target del self.config.scheduler.target self.scheduler = hydra.utils.instantiate(self.config.scheduler, optimizer) else: self.scheduler = None cl_start_horizon = config.get("cl_start_horizon") cl_update = config.get("cl_update") self.criterion = TSPP_criterion_wrapper(criterion, cl_start_horizon, cl_update) self.log_path = self.config.get("log_path", os.getcwd()) def prep_data(self, batch, labels, weights): batch = to_device(batch, device=self.device) labels = to_device(labels, device=self.device) weights = to_device(weights, device=self.device) return batch, labels, weights def validate(self): self.model.eval() self.criterion.eval() with torch.no_grad(): running_losses = 0 for i, (batch, labels, weights) in enumerate(self.valid_dataloader): batch, labels, weights = self.prep_data(batch, labels, weights) if self.ema: preds = self.ema.module(batch) else: preds = self.model(batch) losses = self.criterion(preds, labels, weights=weights) losses = reduce_tensor(losses, self.world_size).detach() running_losses += losses running_losses = running_losses / (len(self.valid_dataloader.dataset) / self.config.batch_size) if len(running_losses.size()) < 1: running_losses = running_losses.unsqueeze(0) running_losses = [loss.item() for loss in running_losses] data = {"val_loss": sum(running_losses)} for i, elem in enumerate(running_losses): data["val_loss_component_" + str(i)] = elem self.logger.log(step=self.global_step, data=data, verbosity=dllogger.Verbosity.VERBOSE) self.model.train() self.criterion.train() return sum(running_losses) def train(self): self.callbacks.on_train_begin() while self.epoch < self.config.num_epochs: self.callbacks.on_epoch_begin(self.epoch) self.logger.log(step=self.global_step, data={"epoch": self.epoch}, verbosity=dllogger.Verbosity.VERBOSE) for i, (batch, labels, weights) in enumerate(self.train_dataloader): self.callbacks.on_batch_begin(i) self.optimizer.zero_grad() batch, labels, weights = self.prep_data(batch, labels, weights) preds = self.model(batch) losses = self.criterion(preds, labels, weights=weights) loss = losses.sum() if self.amp_enabled: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() if self.config.get("gradient_norm", 0.0) > 0: nn.utils.clip_grad_norm(self.model.parameters(), self.config.gradient_norm) self.optimizer.step() losses = reduce_tensor(losses, self.world_size, average=True) if len(losses.size()) < 1: losses = [losses] losses = [loss.item() for loss in losses] data = {"loss": loss.item()} for k, v in enumerate(losses): data["loss_component_" + str(k)] = v self.logger.log(step=self.global_step, data=data, verbosity=dllogger.Verbosity.VERBOSE) self.callbacks.on_batch_end(i, logs=data) if self.ema: self.ema.update(self.model) self.global_step += 1 if self.scheduler: self.scheduler.step() self.callbacks.on_valid_begin(self.epoch) validation_loss = self.validate() if validation_loss != validation_loss: #NaN check self._stop_training = True data = {"val_loss": validation_loss} self.callbacks.on_valid_end(self.epoch, logs=data) if self.train_sampler: self.train_sampler.set_epoch(self.epoch) self.valid_sampler.set_epoch(self.epoch) self.callbacks.on_epoch_end(self.epoch, logs=data) if self._stop_training: break self.epoch += 1 self.callbacks.on_train_end(logs=self.metrics) class StatTrainer(Trainer): def __init__(self, config, model, train_dataset, valid_dataset ): self.config = config self.train_dataset = train_dataset self.global_step = 0 self.epoch = 0 self.model = model self.logger = setup_logger(self.config) def train(self): for train_batch in self.train_dataset: self.model.fit(train_batch["endog"], train_batch["exog"]) self.model.save() def validate(self): raise RuntimeError("Validation is not supported for StatTrainer") class XGBTrainer(Trainer): def __init__(self, config, callbacks, model, train_dataset, valid_dataset): ''' The idea behind this trainer is that we are given data at a time step t and want to create models to predict the value of a target from t+1 to t+n. At time step t we have access to every feature including the target, and if we are trying to predict at time step t+i, we have access to the known and static values from there, using the function target_shift. To aid in prediction and give the model access to the history, lag and moving features can be specified in the configs. Lag features can either be specifed by a min value and max value or a list of values. If a min and max value are specified then the range(min, max+1) is used as the list. Moving average (or rolling features) are specified by a window size. These values are added with the feat_adder function. A new model is trained for every step we want to predict. The trainer is not recursive so each model is independent and does not rely on the previous trained models. ''' self.config = config self.logger = setup_logger(config) self.train_dataset = train_dataset self.valid_dataset = valid_dataset self.patience = callbacks.early_stopping.patience self.log_interval = config.get('log_interval', 25) self.model = model def train(self): for i, ((train_step, labels), (valid_step, valid_labels)) in enumerate(zip(self.train_dataset, self.valid_dataset)): self.model.fit(train_step, labels, valid_step, valid_labels, patience=self.patience, log_interval=self.log_interval) self.model.save(os.getcwd())
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/training/trainer.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging import os from pathlib import Path os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "1" # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = Path(__file__).parent.name from .deployment_toolkit.args import ArgParserGenerator # noqa: E402 module level import not at top of file from .deployment_toolkit.core import ( # noqa: E402 module level import not at top of file DATALOADER_FN_NAME, BaseLoader, BaseSaver, Format, load_from_file, ) from .deployment_toolkit.extensions import loaders, savers # noqa: E402 module level import not at top of file LOGGER = logging.getLogger("export_model") INPUT_MODEL_TYPES = [Format.TF_ESTIMATOR, Format.TF_KERAS, Format.PYT] OUTPUT_MODEL_TYPES = [Format.TF_SAVEDMODEL, Format.TS_TRACE, Format.TS_SCRIPT, Format.ONNX] def _get_args(): parser = argparse.ArgumentParser( description="Script for exporting models from supported frameworks.", allow_abbrev=False ) parser.add_argument("--input-path", help="Path to input python module", required=True) parser.add_argument( "--input-type", help="Input model type", choices=[f.value for f in INPUT_MODEL_TYPES], required=True ) parser.add_argument("--output-path", help="Path to output model file", required=True) parser.add_argument( "--output-type", help="Output model type", choices=[f.value for f in OUTPUT_MODEL_TYPES], required=True ) parser.add_argument("--dataloader", help="Path to python module containing data loader") parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False) parser.add_argument( "--ignore-unknown-parameters", help="Ignore unknown parameters (argument often used in CI where set of arguments is constant)", action="store_true", default=False, ) args, unparsed_args = parser.parse_known_args() Loader: BaseLoader = loaders.get(args.input_type) ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser) if args.input_type == Format.PYT.value and args.output_type == Format.ONNX.value: saver_type = f"{Format.PYT.value}--{Format.ONNX.value}" else: saver_type = args.output_type Saver: BaseSaver = savers.get(saver_type) ArgParserGenerator(Saver).update_argparser(parser) if args.dataloader is not None: get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) ArgParserGenerator(get_dataloader_fn).update_argparser(parser) if args.ignore_unknown_parameters: args, unknown_args = parser.parse_known_args() LOGGER.warning(f"Got additional args {unknown_args}") else: args = parser.parse_args() return args def main(): args = _get_args() log_level = logging.INFO if not args.verbose else logging.DEBUG log_format = "%(asctime)s %(levelname)s %(name)s %(message)s" logging.basicConfig(level=log_level, format=log_format) LOGGER.info("args:") for key, value in vars(args).items(): LOGGER.info(f" {key} = {value}") dataloader_fn = None if args.dataloader is not None: get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args) Loader: BaseLoader = loaders.get(args.input_type) loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args) model = loader.load(args.input_path, dataloader_fn=dataloader_fn, output_type=args.output_type) LOGGER.info("inputs: %s", model.inputs) LOGGER.info("outputs: %s", model.outputs) if args.input_type == Format.PYT.value and args.output_type == Format.ONNX.value: saver_type = f"{Format.PYT.value}--{Format.ONNX.value}" else: saver_type = args.output_type Saver: BaseSaver = savers.get(saver_type) saver = ArgParserGenerator(Saver).from_args(args) saver.save(model, args.output_path, dataloader_fn) if __name__ == "__main__": main()
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/export_model.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pandas as pd import numpy as np import pickle import argparse import hydra import torch from triton.deployment_toolkit.core import BaseMetricsCalculator from omegaconf import OmegaConf def update_argparser(parser): parser.add_argument("--model-dir", type=str, help="Path to the model directory you would like to use (likely in outputs)", required=True) class MetricsCalculator(BaseMetricsCalculator): def __init__(self, model_dir): with open(os.path.join(model_dir, ".hydra/config_merged.yaml"), "rb") as f: self.config = OmegaConf.load(f) train, valid, test = hydra.utils.call(self.config.dataset) del train, valid self.evaluator = hydra.utils.call(self.config.evaluator, test_data=test) self.predictions = [] self.targets = [] self.ids = [] self.weights = [] @property def metrics(self): targets = np.concatenate(self.targets, axis=0) predictions = np.concatenate(self.predictions, axis=0) weights = np.concatenate(self.weights, axis=0) ids = np.concatenate(self.ids, axis=0) if np.isnan(weights).any(): weights = np.empty([0]) return self.evaluator.evaluate(targets, predictions, ids, weights) def update( self, ids, y_pred, x, y_real, ): #can probably just pass all of this to the evaluator main class self.targets.append(y_real['target__0'][:,:,0][:,:,np.newaxis]) self.ids.append(ids) self.weights.append(x["weight__9"]) preds = y_pred["target__0"] self.predictions.append(preds) # return self.metrics
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/metrics.py
#!/usr/bin/env python3 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" Using `calculate_metrics.py` script, you can obtain model accuracy/error metrics using defined `MetricsCalculator` class. Data provided to `MetricsCalculator` are obtained from dump files stored in directory pointed by `--dump-dir` argument. Above files are prepared by `run_inference_on_fw.py` and `run_inference_on_triton.py` scripts. Output data is stored in csv file pointed by `--csv` argument. Example call: ```shell script python ./triton/calculate_metrics.py \ --dump-dir /results/dump_triton \ --csv /results/accuracy_results.csv \ --metrics metrics.py \ --metric-class-param1 value ``` """ import argparse import csv import logging import string from pathlib import Path # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = Path(__file__).parent.name from .deployment_toolkit.args import ArgParserGenerator from .deployment_toolkit.core import BaseMetricsCalculator, load_from_file from .deployment_toolkit.dump import JsonDumpReader LOGGER = logging.getLogger("calculate_metrics") TOTAL_COLUMN_NAME = "_total_" def main(): logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser(description="Run models with given dataloader", allow_abbrev=False) parser.add_argument("--metrics", help="Path to python module containing metrics calculator", required=True) parser.add_argument("--csv", help="Path to csv file", required=True) parser.add_argument("--dump-dir", help="Path to directory with dumped outputs (and labels)", required=True) args, *_ = parser.parse_known_args() MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator") ArgParserGenerator(MetricsCalculator).update_argparser(parser) args = parser.parse_args() LOGGER.info("args:") for key, value in vars(args).items(): LOGGER.info(f" {key} = {value}") MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator") metrics_calculator: BaseMetricsCalculator = ArgParserGenerator(MetricsCalculator).from_args(args) reader = JsonDumpReader(args.dump_dir) for ids, x, y_true, y_pred in reader.iterate_over(["ids", "inputs", "labels", "outputs"]): ids = list(ids["ids"]) if ids is not None else None metrics_calculator.update(ids=ids, x=x, y_pred=y_pred, y_real=y_true) metrics = metrics_calculator.metrics metric_names_with_space = [name for name in metrics if any([c in string.whitespace for c in name])] if metric_names_with_space: raise ValueError(f"Metric names shall have no spaces; Incorrect names: {', '.join(metric_names_with_space)}") LOGGER.info("Results:") for key, value in metrics.items(): LOGGER.info(f" {key}: {value}") csv_path = Path(args.csv) csv_path.parent.mkdir(parents=True, exist_ok=True) with csv_path.open("w") as csv_file: writer = csv.DictWriter(csv_file, fieldnames=list(metrics.keys())) writer.writeheader() writer.writerow(metrics) if __name__ == "__main__": main()
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/calculate_metrics.py
#!/usr/bin/env python3 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging import os from pathlib import Path from tqdm import tqdm import numpy as np os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "1" # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = Path(__file__).parent.name from .deployment_toolkit.args import ArgParserGenerator # noqa: E402 module level import not at top of file from .deployment_toolkit.core import ( # noqa: E402 module level import not at top of file DATALOADER_FN_NAME, BaseLoader, BaseRunner, Model, load_from_file, ) from .deployment_toolkit.extensions import loaders, runners # noqa: E402 module level import not at top of file from .model import get_model LOGGER = logging.getLogger("check_accuracy") def _get_args(): parser = argparse.ArgumentParser( description="Script for checking accuracy of export and conversion.", allow_abbrev=False ) parser.add_argument("--native-model", help="Path to native model", required=True) parser.add_argument("--native-type", help="Native model type", required=True) parser.add_argument("--export-model", help="Path to exported model", required=True) parser.add_argument("--export-type", help="Exported model type", required=True) parser.add_argument("--convert-model", help="Path to converted model", required=True) parser.add_argument("--convert-type", help="Converted model type", required=True) parser.add_argument("--dataloader", help="Path to python module containing data loader", required=True) parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False) parser.add_argument( "--ignore-unknown-parameters", help="Ignore unknown parameters (argument often used in CI where set of arguments is constant)", action="store_true", default=False, ) args, unparsed_args = parser.parse_known_args() Loader: BaseLoader = loaders.get(args.native_type) ArgParserGenerator(Loader, module_path=args.native_model).update_argparser(parser) Runner: BaseRunner = runners.get(args.native_type) ArgParserGenerator(Runner).update_argparser(parser) Loader: BaseLoader = loaders.get(args.export_type) ArgParserGenerator(Loader, module_path=args.export_model).update_argparser(parser) Runner: BaseRunner = runners.get(args.export_type) ArgParserGenerator(Runner).update_argparser(parser) if args.convert_type != 'trt': Loader: BaseLoader = loaders.get(args.convert_type) ArgParserGenerator(Loader, module_path=args.convert_model).update_argparser(parser) Runner: BaseRunner = runners.get(args.convert_type) ArgParserGenerator(Runner).update_argparser(parser) if args.dataloader is not None: get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) ArgParserGenerator(get_dataloader_fn).update_argparser(parser) if args.ignore_unknown_parameters: args, unknown_args = parser.parse_known_args() LOGGER.warning(f"Got additional args {unknown_args}") else: args = parser.parse_args() return args def main(): args = _get_args() log_level = logging.INFO if not args.verbose else logging.DEBUG log_format = "%(asctime)s %(levelname)s %(name)s %(message)s" logging.basicConfig(level=log_level, format=log_format) LOGGER.info("args:") for key, value in vars(args).items(): LOGGER.info(f" {key} = {value}") LOGGER.info(f"Loading {args.native_model}") Runner: BaseRunner = runners.get(args.native_type) runner_native = ArgParserGenerator(Runner).from_args(args) model_native, _ = get_model(model_dir= args.native_model) model_native = Model(handle=model_native, precision=None, inputs=None, outputs=['target__0']) LOGGER.info(f"Loading {args.export_model}") Loader: BaseLoader = loaders.get(args.export_type) Runner: BaseRunner = runners.get(args.export_type) loader = ArgParserGenerator(Loader, module_path=args.export_model).from_args(args) runner_export = ArgParserGenerator(Runner).from_args(args) model_export = loader.load(args.export_model) if args.convert_type != 'trt': LOGGER.info(f"Loading {args.convert_model}") Loader: BaseLoader = loaders.get(args.convert_type) Runner: BaseRunner = runners.get(args.convert_type) loader = ArgParserGenerator(Loader, module_path=args.convert_model).from_args(args) runner_convert = ArgParserGenerator(Runner).from_args(args) model_convert = loader.load(args.convert_model) get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args) ids, x, y_real = next(dataloader_fn()) with runner_native.init_inference(model=model_native) as runner_session: y_pred_native = runner_session(x) del model_native del runner_native with runner_export.init_inference(model=model_export) as runner_session: y_pred_export = runner_session(x) del model_export del runner_export e1 = [np.linalg.norm(y_pred_native[k]-y_pred_export[k]) for k in y_pred_native.keys()] assert all([i < 1e-3 for i in e1]), "Error between native and export is {}, limit is 1e-3".format(e1) if args.convert_type != 'trt': with runner_convert.init_inference(model=model_convert) as runner_session: y_pred_convert = runner_session(x) e2 = [np.linalg.norm(y_pred_convert[k]-y_pred_export[k]) for k in y_pred_native.keys()] assert all([i < 1e-3 for i in e2]), "Error between export and convert is {}, limit is 1e-3".format(e2) if __name__ == "__main__": main()
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/check_accuracy.py
#!/usr/bin/env python3 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" To infer the model deployed on Triton, you can use `run_inference_on_triton.py` script. It sends a request with data obtained from pointed data loader and dumps received data into dump files. Those files are stored in directory pointed by `--output-dir` argument. Currently, the client communicates with the Triton server asynchronously using GRPC protocol. Example call: ```shell script python ./triton/run_inference_on_triton.py \ --server-url localhost:8001 \ --model-name ResNet50 \ --model-version 1 \ --dump-labels \ --output-dir /results/dump_triton ``` """ import argparse import functools import logging import queue import threading import time import traceback from pathlib import Path from typing import Optional from tqdm import tqdm # pytype: disable=import-error try: from tritonclient import utils as client_utils # noqa: F401 from tritonclient.grpc import InferenceServerClient, InferInput, InferRequestedOutput except ImportError: from tritongrpcclient import InferenceServerClient, InferInput, InferRequestedOutput # pytype: enable=import-error # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = Path(__file__).parent.name from .deployment_toolkit.args import ArgParserGenerator from .deployment_toolkit.core import DATALOADER_FN_NAME, load_from_file from .deployment_toolkit.dump import JsonDumpWriter LOGGER = logging.getLogger("run_inference_on_triton") class SyncGRPCTritonRunner: DEFAULT_MAX_RESP_WAIT_S = 120 def __init__( self, server_url: str, model_name: str, model_version: str, *, dataloader, verbose=False, resp_wait_s: Optional[float] = None, ): self._server_url = server_url self._model_name = model_name self._model_version = model_version self._dataloader = dataloader self._verbose = verbose self._response_wait_t = self.DEFAULT_MAX_RESP_WAIT_S if resp_wait_s is None else resp_wait_s def __iter__(self): client = InferenceServerClient(self._server_url, verbose=self._verbose) error = self._verify_triton_state(client) if error: raise RuntimeError(f"Could not communicate to Triton Server: {error}") LOGGER.debug( f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!" ) model_config = client.get_model_config(self._model_name, self._model_version) model_metadata = client.get_model_metadata(self._model_name, self._model_version) LOGGER.info(f"Model config {model_config}") LOGGER.info(f"Model metadata {model_metadata}") inputs = {tm.name: tm for tm in model_metadata.inputs} outputs = {tm.name: tm for tm in model_metadata.outputs} output_names = list(outputs) outputs_req = [InferRequestedOutput(name) for name in outputs] for ids, x, y_real in self._dataloader: infer_inputs = [] for name in inputs: data = x[name] infer_input = InferInput(name, data.shape, inputs[name].datatype) target_np_dtype = client_utils.triton_to_np_dtype(inputs[name].datatype) data = data.astype(target_np_dtype) infer_input.set_data_from_numpy(data) infer_inputs.append(infer_input) results = client.infer( model_name=self._model_name, model_version=self._model_version, inputs=infer_inputs, outputs=outputs_req, client_timeout=self._response_wait_t, ) y_pred = {name: results.as_numpy(name) for name in output_names} yield ids, x, y_pred, y_real def _verify_triton_state(self, triton_client): if not triton_client.is_server_live(): return f"Triton server {self._server_url} is not live" elif not triton_client.is_server_ready(): return f"Triton server {self._server_url} is not ready" elif not triton_client.is_model_ready(self._model_name, self._model_version): return f"Model {self._model_name}:{self._model_version} is not ready" return None class AsyncGRPCTritonRunner: DEFAULT_MAX_RESP_WAIT_S = 120 DEFAULT_MAX_UNRESP_REQS = 128 DEFAULT_MAX_FINISH_WAIT_S = 900 # 15min def __init__( self, server_url: str, model_name: str, model_version: str, *, dataloader, verbose=False, resp_wait_s: Optional[float] = None, max_unresponded_reqs: Optional[int] = None, ): self._server_url = server_url self._model_name = model_name self._model_version = model_version self._dataloader = dataloader self._verbose = verbose self._response_wait_t = self.DEFAULT_MAX_RESP_WAIT_S if resp_wait_s is None else resp_wait_s self._max_unresp_reqs = self.DEFAULT_MAX_UNRESP_REQS if max_unresponded_reqs is None else max_unresponded_reqs self._results = queue.Queue() self._processed_all = False self._errors = [] self._num_waiting_for = 0 self._sync = threading.Condition() self._req_thread = threading.Thread(target=self.req_loop, daemon=True) def __iter__(self): self._req_thread.start() timeout_s = 0.050 # check flags processed_all and error flags every 50ms while True: try: ids, x, y_pred, y_real = self._results.get(timeout=timeout_s) yield ids, x, y_pred, y_real except queue.Empty: shall_stop = self._processed_all or self._errors if shall_stop: break LOGGER.debug("Waiting for request thread to stop") self._req_thread.join() if self._errors: error_msg = "\n".join(map(str, self._errors)) raise RuntimeError(error_msg) def _on_result(self, ids, x, y_real, output_names, result, error): with self._sync: request_id = str(ids[0]) NOT_MATCHING_REQUEST_ID_MSG = ( "Error during processing result - request_id doesn't match. This shouldn't have happened." ) if error: response_id = error.get_response().id if response_id != request_id: raise RuntimeError(NOT_MATCHING_REQUEST_ID_MSG) self._errors.append(error) else: response_id = result.get_response().id if response_id != request_id: raise RuntimeError(NOT_MATCHING_REQUEST_ID_MSG) y_pred = {name: result.as_numpy(name) for name in output_names} self._results.put((ids, x, y_pred, y_real)) self._num_waiting_for -= 1 self._sync.notify_all() def req_loop(self): client = InferenceServerClient(self._server_url, verbose=self._verbose) self._errors = self._verify_triton_state(client) if self._errors: return LOGGER.debug( f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!" ) model_config = client.get_model_config(self._model_name, self._model_version) model_metadata = client.get_model_metadata(self._model_name, self._model_version) LOGGER.info(f"Model config {model_config}") LOGGER.info(f"Model metadata {model_metadata}") inputs = {tm.name: tm for tm in model_metadata.inputs} outputs = {tm.name: tm for tm in model_metadata.outputs} output_names = list(outputs) self._num_waiting_for = 0 for ids, x, y_real in self._dataloader: infer_inputs = [] for name in inputs: data = x[name] infer_input = InferInput(name, data.shape, inputs[name].datatype) target_np_dtype = client_utils.triton_to_np_dtype(inputs[name].datatype) data = data.astype(target_np_dtype) infer_input.set_data_from_numpy(data) infer_inputs.append(infer_input) outputs_req = [InferRequestedOutput(name) for name in outputs] with self._sync: def _check_can_send(): return self._num_waiting_for < self._max_unresp_reqs can_send = self._sync.wait_for(_check_can_send, timeout=self._response_wait_t) if not can_send: error_msg = f"Runner could not send new requests for {self._response_wait_t}s" self._errors.append(error_msg) self._sync.notify_all() break request_id = str(ids[0]) callback = functools.partial(AsyncGRPCTritonRunner._on_result, self, ids, x, y_real, output_names) client.async_infer( model_name=self._model_name, model_version=self._model_version, inputs=infer_inputs, outputs=outputs_req, callback=callback, request_id=request_id, ) self._num_waiting_for += 1 self._sync.notify_all() # wait till receive all requested data with self._sync: def _all_processed(): LOGGER.debug(f"wait for {self._num_waiting_for} unprocessed jobs") return self._num_waiting_for == 0 self._processed_all = self._sync.wait_for(_all_processed, self.DEFAULT_MAX_FINISH_WAIT_S) if not self._processed_all: error_msg = f"Runner {self._response_wait_t}s timeout received while waiting for results from server" self._errors.append(error_msg) self._sync.notify_all() LOGGER.debug("Finished request thread") def _verify_triton_state(self, triton_client): errors = [] if not triton_client.is_server_live(): errors.append(f"Triton server {self._server_url} is not live") elif not triton_client.is_server_ready(): errors.append(f"Triton server {self._server_url} is not ready") elif not triton_client.is_model_ready(self._model_name, self._model_version): errors.append(f"Model {self._model_name}:{self._model_version} is not ready") return errors def _parse_args(): parser = argparse.ArgumentParser(description="Infer model on Triton server", allow_abbrev=False) parser.add_argument( "--server-url", type=str, default="localhost:8001", help="Inference server URL (default localhost:8001)" ) parser.add_argument("--model-name", help="The name of the model used for inference.", required=True) parser.add_argument("--model-version", help="The version of the model used for inference.", required=True) parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True) parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False) parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False) parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=True) parser.add_argument("--output-dir", required=True, help="Path to directory where outputs will be saved") parser.add_argument( "--response-wait-time", required=False, help="Maximal time to wait for response", default=120, type=float ) parser.add_argument( "--max-unresponded-requests", required=False, help="Maximal number of unresponded requests", default=128, type=int, ) parser.add_argument( "--synchronous", help="Enable synchronous calls to Triton Server", action="store_true", default=False ) args, *_ = parser.parse_known_args() get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) ArgParserGenerator(get_dataloader_fn).update_argparser(parser) args = parser.parse_args() return args def main(): args = _parse_args() log_format = "%(asctime)s %(levelname)s %(name)s %(message)s" log_level = logging.INFO if not args.verbose else logging.DEBUG logging.basicConfig(level=log_level, format=log_format) LOGGER.info("args:") for key, value in vars(args).items(): LOGGER.info(f" {key} = {value}") get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args) try: if args.synchronous: runner = SyncGRPCTritonRunner( args.server_url, args.model_name, args.model_version, dataloader=dataloader_fn(), verbose=False, resp_wait_s=args.response_wait_time, ) else: runner = AsyncGRPCTritonRunner( args.server_url, args.model_name, args.model_version, dataloader=dataloader_fn(), verbose=False, resp_wait_s=args.response_wait_time, max_unresponded_reqs=args.max_unresponded_requests, ) except Exception as e: message = traceback.format_exc() LOGGER.error(f"Encountered exception \n{message}") raise e with JsonDumpWriter(output_dir=args.output_dir) as writer: start = time.time() for ids, x, y_pred, y_real in tqdm(runner, unit="batch", mininterval=10): data = _verify_and_format_dump(args, ids, x, y_pred, y_real) writer.write(**data) stop = time.time() LOGGER.info(f"\nThe inference took {stop - start:0.3f}s") def _verify_and_format_dump(args, ids, x, y_pred, y_real): data = {"outputs": y_pred, "ids": {"ids": ids}} if args.dump_inputs: data["inputs"] = x if args.dump_labels: if not y_real: raise ValueError( "Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument" ) data["labels"] = y_real return data if __name__ == "__main__": main()
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/run_inference_on_triton.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import torch import torch.nn as nn import hydra from typing import Dict, Tuple, Optional, List from omegaconf import OmegaConf def update_argparser(parser): parser.add_argument("--model-dir", type=str, help="Path to the model directory you would like to use (likely in outputs)", required=True) class ModelWrapper(nn.Module): def __init__(self, model, test_func): super().__init__() self.model = model self.test_func = test_func def unwrap(self, t): if not torch.isnan(t).any(): return t return None def forward(self, s_cat, s_cont, k_cat, k_cont, o_cat, o_cont, target, sample_weight, id, weight): wrapped_input = {} wrapped_input['s_cat'] = self.unwrap(s_cat) wrapped_input['s_cont'] = self.unwrap(s_cont) wrapped_input['k_cat'] = self.unwrap(k_cat) wrapped_input['k_cont'] = self.unwrap(k_cont) wrapped_input['o_cat'] = self.unwrap(o_cat) wrapped_input['o_cont'] = self.unwrap(o_cont) wrapped_input['sample_weight'] = self.unwrap(sample_weight) wrapped_input['target'] = target wrapped_input['id'] = id if id.numel() else None wrapped_input['weight'] = self.unwrap(weight) output = self.test_func(wrapped_input) return output def get_model(**args): #get model config with open(os.path.join(args['model_dir'], ".hydra/config_merged.yaml"), "rb") as f: config = OmegaConf.load(f) os.environ["TFT_SCRIPTING"] = "True" state_dict = torch.load(os.path.join(args['model_dir'], "best_checkpoint.zip"))['model_state_dict'] model = hydra.utils.instantiate(config.model) test_method_name = 'predict' if hasattr(model, "predict") else '__call__' test_method = getattr(model, test_method_name) #load model model.load_state_dict(state_dict) model.eval() model.cuda() model = ModelWrapper(model, test_method).cuda() tensor_names = { "inputs": ['s_cat__0', 's_cont__1', 'k_cat__2', 'k_cont__3', 'o_cat__4', 'o_cont__5', 'target__6', 'sample_weight__7', 'id__8', 'weight__9'], "outputs": ["target__0"] } return model, tensor_names
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/model.py
#!/usr/bin/env python3 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" To infer the model on framework runtime, you can use `run_inference_on_fw.py` script. It infers data obtained from pointed data loader locally and saves received data into dump files. Those files are stored in directory pointed by `--output-dir` argument. Example call: ```shell script python ./triton/run_inference_on_fw.py \ --input-path /models/exported/model.onnx \ --input-type onnx \ --dataloader triton/dataloader.py \ --data-dir /data/imagenet \ --batch-size 32 \ --output-dir /results/dump_local \ --dump-labels ``` """ import argparse import logging import os from pathlib import Path from tqdm import tqdm # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = Path(__file__).parent.name os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "0" from .deployment_toolkit.args import ArgParserGenerator # noqa: E402 module level import not at top of file from .deployment_toolkit.core import ( # noqa: E402 module level import not at top of file DATALOADER_FN_NAME, BaseLoader, BaseRunner, load_from_file, ) from .deployment_toolkit.dump import JsonDumpWriter # noqa: E402 module level import not at top of file from .deployment_toolkit.extensions import loaders, runners # noqa: E402 module level import not at top of file LOGGER = logging.getLogger("run_inference_on_fw") def _verify_and_format_dump(args, ids, x, y_pred, y_real): data = {"outputs": y_pred, "ids": {"ids": ids}} if args.dump_inputs: data["inputs"] = x if args.dump_labels: if not y_real: raise ValueError( "Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument" ) data["labels"] = y_real return data def _parse_and_validate_args(): supported_inputs = set(runners.supported_extensions) & set(loaders.supported_extensions) parser = argparse.ArgumentParser(description="Dump local inference output of given model", allow_abbrev=False) parser.add_argument("--input-path", help="Path to input model", required=True) parser.add_argument("--input-type", help="Input model type", choices=supported_inputs, required=True) parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True) parser.add_argument("--output-dir", help="Path to dir where output files will be stored", required=True) parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False) parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False) parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False) args, *_ = parser.parse_known_args() get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) ArgParserGenerator(get_dataloader_fn).update_argparser(parser) Loader: BaseLoader = loaders.get(args.input_type) ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser) Runner: BaseRunner = runners.get(args.input_type) ArgParserGenerator(Runner).update_argparser(parser) args = parser.parse_args() types_requiring_io_params = [] if args.input_type in types_requiring_io_params and not all(p for p in [args.inputs, args.outptputs]): parser.error(f"For {args.input_type} input provide --inputs and --outputs parameters") return args def main(): args = _parse_and_validate_args() log_level = logging.INFO if not args.verbose else logging.DEBUG log_format = "%(asctime)s %(levelname)s %(name)s %(message)s" logging.basicConfig(level=log_level, format=log_format) LOGGER.info("args:") for key, value in vars(args).items(): LOGGER.info(f" {key} = {value}") Loader: BaseLoader = loaders.get(args.input_type) Runner: BaseRunner = runners.get(args.input_type) loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args) runner = ArgParserGenerator(Runner).from_args(args) LOGGER.info(f"Loading {args.input_path}") model = loader.load(args.input_path) with runner.init_inference(model=model) as runner_session, JsonDumpWriter(args.output_dir) as writer: get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args) LOGGER.info("Data loader initialized; Running inference") for ids, x, y_real in tqdm(dataloader_fn(), unit="batch", mininterval=10): y_pred = runner_session(x) data = _verify_and_format_dump(args, ids=ids, x=x, y_pred=y_pred, y_real=y_real) writer.write(**data) LOGGER.info("Inference finished") if __name__ == "__main__": main()
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/run_inference_on_fw.py
import os import numpy import dllogger import tritonclient.http as triton_http import tritonclient.grpc as triton_grpc import numpy as np import xgboost as xgb import hydra import subprocess from loggers.log_helper import setup_logger def generate_config( model_name, *, features=31, predict_proba=False, batching_window=100, max_batch_size=8192, storage_type="AUTO"): """Return a string with the full Triton config.pbtxt for this model """ model_format = 'xgboost' instance_kind = 'gpu' if instance_kind == 'gpu': instance_kind = 'KIND_GPU' predict_proba = 'false' return f"""name: "{model_name}" backend: "fil" max_batch_size: {max_batch_size} input [ {{ name: "input__0" data_type: TYPE_FP32 dims: [ {features} ] }} ] output [ {{ name: "output__0" data_type: TYPE_FP32 dims: [ {1} ] }} ] instance_group [{{ kind: {instance_kind} }}] parameters [ {{ key: "model_type" value: {{ string_value: "{model_format}" }} }}, {{ key: "output_class" value: {{ string_value: "{predict_proba}" }} }}, {{ key: "format" value: {{ string_value: "{model_format}"}} }} ] dynamic_batching {{ max_queue_delay_microseconds: {batching_window} }}""" def format_checkpoint(ckpt, total_features, max_batch_size): main_output_path = ckpt #TODO hardcoded the num features #make deployment checkpoint_path = os.path.join(main_output_path, 'checkpoints') #make navigator_workspace os.makedirs(os.path.join(main_output_path, 'deployment'), exist_ok=True) os.makedirs(os.path.join(main_output_path, 'deployment', 'navigator_workspace'), exist_ok=True) os.makedirs(os.path.join(main_output_path, 'deployment', 'navigator_workspace', 'model-store'), exist_ok=True) #make model-store model_store_path = os.path.join(main_output_path, 'deployment', 'navigator_workspace', 'model-store') #iterate over the models for ckpt in os.listdir(checkpoint_path): #load model model = xgb.Booster() model.load_model(os.path.join(checkpoint_path, ckpt)) model_name = ckpt.split(".")[0] os.makedirs(os.path.join(model_store_path, model_name), exist_ok=True) os.makedirs(os.path.join(model_store_path, model_name, '1'), exist_ok=True) model.save_model(os.path.join(model_store_path, model_name, '1', 'xgboost.model')) #grab the config triton_config = generate_config(model_name=model_name, features=total_features, max_batch_size=max_batch_size) #put in model-store config_path = os.path.join(os.path.join(model_store_path, model_name), 'config.pbtxt') with open(config_path, 'w') as config_file: config_file.write(triton_config) def run_XGBoost_triton(cfg, config): ckpt = cfg.checkpoint max_batch_size = cfg.batch_size tspp_main_dir = os.path.sep + os.path.join(*(os.getcwd().split(os.path.sep)[:-3])) #need the extra inference stuff train, valid, test = hydra.utils.call(config.dataset) del train, valid dataloader = test original_features = len(test.data.columns) total_features = original_features + len(test.known) - len(test.target) + 1 format_checkpoint(ckpt, total_features, max_batch_size)
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/xgboost_triton.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import torch import torch.nn as nn import hydra from torch.utils.data import DataLoader import numpy as np from omegaconf import OmegaConf import dgl from data.datasets import get_collate_fn from data.data_utils import Preprocessor def update_argparser(parser): parser.add_argument("--model-dir", type=str, help="Path to the model directory you would like to use (likely in outputs)", required=True) parser.add_argument("--batch-size", type=int, required=True) def get_dataloader_fn(model_dir, batch_size): with open(os.path.join(model_dir, ".hydra/config_merged.yaml"), "rb") as f: config = OmegaConf.load(f) if config.inference.get("dataset_path", None): preprocessor = Preprocessor(config.dataset.config) if config.inference.get("preproc_state_path", None): preprocessor_state_file = config.inference.preproc_state_path else: preprocessor_state_file = None preprocessor.load_state(preprocessor_state_file) test_df = preprocessor.preprocess_test(dataset=config.inference.dataset_path) test_df = preprocessor.apply_scalers(test_df) test_df = preprocessor.impute(test_df) train, valid, test = hydra.utils.call(config.dataset, input_df=test_df) else: train, valid, test = hydra.utils.call(config.dataset) del train del valid input_names_dict = {'s_cat': 's_cat__0', 's_cont':'s_cont__1', 'k_cat':'k_cat__2', 'k_cont':'k_cont__3', 'o_cat':'o_cat__4', 'o_cont':'o_cont__5', 'target':'target__6', 'sample_weight': 'sample_weight__7', 'id':'id__8'} if config.model.config.get("quantiles", None): tile_num = len(config.model.config.quantiles) else: tile_num = 1 data_loader = DataLoader( test, batch_size=int(batch_size), num_workers=1, pin_memory=True, collate_fn=get_collate_fn(config.trainer.config.model_type, config.trainer.config.encoder_length, test=True), ) def _get_dataloader(): for step, (batch, labels, _) in enumerate(data_loader): bs = batch['target'].shape[0] x = {input_names_dict[key]: batch[key].numpy() if batch[key].numel() else np.full([bs, 1], np.nan) for key in input_names_dict.keys()} weights = batch.ndata['weight'] if isinstance(batch, dgl.DGLGraph) else batch['weight'] x['weight__9']= weights[:, config.dataset.config.encoder_length:, :].numpy() if weights is not None and weights.numel() else np.full([bs, 1], np.nan) ids = batch.ndata['id'] if isinstance(batch, dgl.DGLGraph) else batch["id"] ids = ids[:,0].numpy() y_real = {'target__0':np.tile(labels.numpy(), (1, 1, tile_num))} #Probably need to expand the final dims here as well yield (ids, x, y_real) return _get_dataloader
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/dataloader.py
#!/usr/bin/env python3 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import csv import logging import os import pathlib import shutil import sys from distutils.version import LooseVersion from enum import Enum from typing import Any, Dict, List import yaml # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .deployment_toolkit.core import BatchingMode, EvaluationMode, MeasurementMode, OfflineMode, PerformanceTool from .deployment_toolkit.model_analyzer import ModelAnalyzer, ModelAnalyzerConfig, ModelAnalyzerMode from .deployment_toolkit.perf_analyzer import PerfAnalyzer, PerfAnalyzerConfig from .deployment_toolkit.report import save_results, show_results, sort_results from .deployment_toolkit.utils import parse_server_url from .deployment_toolkit.warmup import performance_evaluation_warmup LOGGER = logging.getLogger("run_performance_on_triton") if LooseVersion(sys.version) >= LooseVersion("3.8.0"): from importlib.metadata import version TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient")) TRITON_MODEL_ANALYZER_VERSION = LooseVersion(version("triton-model-analyzer")) else: import pkg_resources TRITON_CLIENT_VERSION = LooseVersion(pkg_resources.get_distribution("tritonclient").version) TRITON_MODEL_ANALYZER_VERSION = LooseVersion(pkg_resources.get_distribution("triton-model-analyzer").version) def _log_dict(title: str, dict_: Dict[str, Any]): LOGGER.info(title) for key, value in dict_.items(): LOGGER.info(f"\t{key} = {value}") def _calculate_average_latency(r): avg_sum_fields = [ "Client Send", "Network+Server Send/Recv", "Server Queue", "Server Compute", "Server Compute Input", "Server Compute Infer", "Server Compute Output", "Client Recv", ] avg_latency = sum([int(r.get(f, 0)) for f in avg_sum_fields]) return avg_latency def _update_performance_data(results: List, batch_size: int, performance_partial_file: str): row: Dict = {"Batch": batch_size} with open(performance_partial_file) as csvfile: reader = csv.DictReader(csvfile) for r in reader: avg_latency = _calculate_average_latency(r) row = {**row, **r, "avg latency": avg_latency} results.append(row) def _model_analyzer_evaluation( server_url: str, model_name: str, input_data: str, input_shapes: List[str], batch_sizes: List[int], number_of_triton_instances: int, number_of_model_instances: int, measurement_mode: MeasurementMode, measurement_interval: int, measurement_request_count: int, concurrency_steps: int, batching_mode: BatchingMode, evaluation_mode: EvaluationMode, offline_mode: OfflineMode, model_repository: str, result_path: pathlib.Path, output_shared_memory_size: int = 102400, verbose: bool = False, ): _log_dict( "Selected configuration", { "server_url": server_url, "model_name": model_name, "input_data": input_data, "input_shapes": input_shapes, "batch_sizes": batch_sizes, "number_of_triton_instances": number_of_triton_instances, "number_of_model_instances": number_of_model_instances, "measurement_mode": measurement_mode, "measurement_interval": measurement_interval, "measurement_request_count": measurement_request_count, "concurrency_steps": concurrency_steps, "batching_mode": batching_mode, "evaluation_mode": evaluation_mode, "offline_mode": offline_mode, "output_shared_memory_size": output_shared_memory_size, "model_repository": model_repository, "result_path": result_path, "verbose": verbose, }, ) perf_analyzer_config = { "measurement-interval": measurement_interval, } if TRITON_MODEL_ANALYZER_VERSION >= LooseVersion("1.8.0"): perf_analyzer_config["input-data"] = [input_data] else: perf_analyzer_config["input-data"] = input_data if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"): perf_analyzer_config["measurement-mode"] = measurement_mode.value perf_analyzer_config["measurement-request-count"] = measurement_request_count if evaluation_mode == EvaluationMode.OFFLINE: perf_analyzer_config["shared-memory"] = offline_mode.value perf_analyzer_config["output-shared-memory-size"] = output_shared_memory_size if input_shapes: if TRITON_MODEL_ANALYZER_VERSION > LooseVersion("1.8.0"): perf_analyzer_config["shape"] = input_shapes else: perf_analyzer_config["shape"] = input_shapes[0] LOGGER.warning("Model Analyzer <= 1.8.0 support only single shape param for Perf Analyzer.") if batching_mode == BatchingMode.STATIC: batch_sizes = batch_sizes concurrency = [number_of_triton_instances] elif batching_mode == BatchingMode.DYNAMIC: max_batch_size = max(batch_sizes) max_total_requests = 2 * max_batch_size * number_of_triton_instances * number_of_model_instances max_concurrency = min(256, max_total_requests) step = max(1, max_concurrency // concurrency_steps) min_concurrency = step concurrency = {"start": min_concurrency, "stop": max_concurrency, "step": step} batch_sizes = [max(1, max_total_requests // 256)] else: raise ValueError(f"Unsupported batching mode: {batching_mode}") protocol, host, port = parse_server_url(server_url) checkpoints = pathlib.Path("./checkpoints") if checkpoints.is_dir(): shutil.rmtree(checkpoints.as_posix()) checkpoints.mkdir(parents=True, exist_ok=True) config = { "model_repository": model_repository, "triton_launch_mode": "remote", "run_config_search_disable": True, "perf_analyzer_flags": perf_analyzer_config, "perf_analyzer_timeout": 3600, # Workaround for Perf Analyzer timeout - use 1h "profile_models": [model_name], "batch_sizes": batch_sizes, "concurrency": concurrency, "verbose": verbose, "checkpoint_directory": checkpoints.as_posix(), "override_output_model_repository": True, "client_protocol": protocol, f"triton_{protocol}_endpoint": f"{host}:{port}", } if verbose: _log_dict("Model Analyzer profiling configuration", config) with open("config.yaml", "w") as file: yaml.safe_dump(config, file) config = ModelAnalyzerConfig() model_analyzer = ModelAnalyzer(config=config) model_analyzer.run(mode=ModelAnalyzerMode.PROFILE, verbose=verbose) result_path.mkdir(parents=True, exist_ok=True) for file in checkpoints.iterdir(): if not file.is_file() or file.suffix != ".ckpt": continue LOGGER.info(f"Moving checkpoint {file.name} to {result_path}") shutil.move(file, result_path / file.name) inference_output_fields = [ "batch_size", "concurrency", "perf_throughput", "perf_latency", "perf_client_send_recv", "perf_client_response_wait", "perf_server_queue", "perf_server_compute_input", "perf_server_compute_infer", "perf_server_compute_output", ] gpu_output_fields = [ "gpu_uuid", "batch_size", "concurrency", "gpu_used_memory", "gpu_free_memory", "gpu_utilization", "gpu_power_usage", ] filename_model_inference = "metrics-model-inference.csv" filename_model_gpu = "metrics-model-gpu.csv" config = { "analysis_models": model_name, "checkpoint_directory": result_path.as_posix(), "export_path": "/tmp", "inference_output_fields": inference_output_fields, "gpu_output_fields": gpu_output_fields, "filename_model_inference": filename_model_inference, "filename_model_gpu": filename_model_gpu, "summarize": False, } if verbose: _log_dict("Model Analyzer analysis configuration", config) with open("config.yaml", "w") as file: yaml.safe_dump(config, file) config = ModelAnalyzerConfig() model_analyzer = ModelAnalyzer(config=config) model_analyzer.run(mode=ModelAnalyzerMode.ANALYZE, verbose=verbose) inference_metrics_file = pathlib.Path("/tmp") / "results" / filename_model_inference gpu_metrics_file = pathlib.Path("/tmp") / "results" / filename_model_gpu for file in [inference_metrics_file, gpu_metrics_file]: LOGGER.info(f"Moving metrics {file.name} to {result_path}") shutil.move(file, result_path / file.name) def _perf_analyzer_evaluation( server_url: str, model_name: str, input_data: str, input_shapes: List[str], batch_sizes: List[int], number_of_triton_instances: int, number_of_model_instances: int, measurement_mode: MeasurementMode, measurement_interval: int, measurement_request_count: int, concurrency_steps: int, batching_mode: BatchingMode, evaluation_mode: EvaluationMode, offline_mode: OfflineMode, result_path: pathlib.Path, output_shared_memory_size: int = 102400, verbose: bool = False, ): protocol, host, port = parse_server_url(server_url) if batching_mode == BatchingMode.STATIC: batch_sizes = batch_sizes max_concurrency = 1 min_concurrency = 1 step = 1 elif batching_mode == BatchingMode.DYNAMIC: max_batch_size = max(batch_sizes) max_total_requests = 2 * max_batch_size * number_of_triton_instances * number_of_model_instances max_concurrency = min(256, max_total_requests) step = max(1, max_concurrency // concurrency_steps) min_concurrency = step batch_sizes = [max(1, max_total_requests // 256)] else: raise ValueError(f"Unsupported batching mode: {batching_mode}") _log_dict( "Selected configuration", { "server_url": server_url, "model_name": model_name, "input_data": input_data, "input_shapes": input_shapes, "batch_sizes": batch_sizes, "number_of_triton_instances": number_of_triton_instances, "number_of_model_instances": number_of_model_instances, "measurement_mode": measurement_mode, "measurement_interval": measurement_interval, "measurement_request_count": measurement_request_count, "concurrency_steps": concurrency_steps, "batching_mode": batching_mode, "evaluation_mode": evaluation_mode, "offline_mode": offline_mode, "output_shared_memory_size": output_shared_memory_size, "result_path": result_path, "verbose": verbose, }, ) results: List[Dict] = list() for batch_size in batch_sizes: for concurrency in range(min_concurrency, max_concurrency + step, step): performance_partial_file = f"triton_performance_{evaluation_mode.value.lower()}_{batching_mode.value.lower()}_partial_{batch_size}_{concurrency}.csv" params = { "model-name": model_name, "model-version": 1, "batch-size": batch_size, "url": f"{host}:{port}", "protocol": protocol, "input-data": input_data, "measurement-interval": measurement_interval, "concurrency-range": f"{concurrency}:{concurrency}:1", "latency-report-file": performance_partial_file, } if verbose: params["extra-verbose"] = True if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"): params["measurement-mode"] = measurement_mode.value params["measurement-request-count"] = measurement_request_count if evaluation_mode == EvaluationMode.OFFLINE: params["shared-memory"] = offline_mode.value params["output-shared-memory-size"] = output_shared_memory_size if verbose: _log_dict(f"Perf Analyzer config for batch_size: {batch_size} and concurrency: {concurrency}", params) config = PerfAnalyzerConfig() for param, value in params.items(): config[param] = value for shape in input_shapes: config["shape"] = shape perf_analyzer = PerfAnalyzer(config=config) perf_analyzer.run() _update_performance_data(results, batch_size, performance_partial_file) os.remove(performance_partial_file) results = sort_results(results=results) save_results(filename=result_path.as_posix(), data=results) show_results(results=results) def _run_performance_analysis( server_url: str, model_name: str, input_data: str, input_shapes: List[str], batch_sizes: List[int], number_of_triton_instances: int, number_of_model_instances: int, measurement_mode: MeasurementMode, measurement_interval: int, measurement_request_count: int, concurrency_steps: int, batching_mode: BatchingMode, evaluation_mode: EvaluationMode, offline_mode: OfflineMode, output_shared_memory_size: int, performance_tool: PerformanceTool, model_repository: str, result_path: pathlib.Path, warmup: bool, verbose: bool, ): log_level = logging.INFO if not verbose else logging.DEBUG log_format = "%(asctime)s %(levelname)s %(name)s %(message)s" logging.basicConfig(level=log_level, format=log_format) if performance_tool == PerformanceTool.MODEL_ANALYZER: if result_path.suffix: raise ValueError( "Results path for Model Analyzer is invalid. Please, provide the directory name. Example: results" ) elif performance_tool == PerformanceTool.PERF_ANALYZER: if result_path.suffix != ".csv": raise ValueError( "Results path for Perf Analyzer is invalid. Please, provide the CSV file name. Example: results.csv" ) else: raise ValueError(f"Unsupported performance tool {performance_tool}") if warmup: LOGGER.info("Running warmup before the main test") performance_evaluation_warmup( server_url=server_url, model_name=model_name, input_data=input_data, input_shapes=input_shapes, batch_sizes=batch_sizes, number_of_triton_instances=number_of_triton_instances, number_of_model_instances=number_of_model_instances, measurement_mode=measurement_mode, measurement_interval=measurement_interval, measurement_request_count=measurement_request_count, batching_mode=batching_mode, evaluation_mode=evaluation_mode, offline_mode=offline_mode, output_shared_memory_size=output_shared_memory_size, ) if performance_tool == PerformanceTool.MODEL_ANALYZER: LOGGER.info("Using Model Analyzer for performance evaluation") _model_analyzer_evaluation( server_url=server_url, model_name=model_name, input_data=input_data, input_shapes=input_shapes, batch_sizes=batch_sizes, number_of_triton_instances=number_of_triton_instances, number_of_model_instances=number_of_model_instances, measurement_mode=measurement_mode, measurement_interval=measurement_interval, measurement_request_count=measurement_request_count, concurrency_steps=concurrency_steps, batching_mode=batching_mode, evaluation_mode=evaluation_mode, offline_mode=offline_mode, output_shared_memory_size=output_shared_memory_size, model_repository=model_repository, result_path=result_path, verbose=verbose, ) elif performance_tool == PerformanceTool.PERF_ANALYZER: LOGGER.info("Using Perf Analyzer for performance evaluation") _perf_analyzer_evaluation( server_url=server_url, model_name=model_name, input_data=input_data, input_shapes=input_shapes, batch_sizes=batch_sizes, number_of_triton_instances=number_of_triton_instances, number_of_model_instances=number_of_model_instances, measurement_mode=measurement_mode, measurement_interval=measurement_interval, measurement_request_count=measurement_request_count, concurrency_steps=concurrency_steps, batching_mode=batching_mode, evaluation_mode=evaluation_mode, offline_mode=offline_mode, output_shared_memory_size=output_shared_memory_size, result_path=result_path, verbose=verbose, ) else: raise ValueError(f"Unsupported performance tool {performance_tool}") class MeasurementMode(Enum): """ Available measurement stabilization modes """ COUNT_WINDOWS = "count_windows" TIME_WINDOWS = "time_windows" def main(): parser = argparse.ArgumentParser() parser.add_argument( "--server-url", type=str, required=False, default="http://127.0.0.1:8000", help="Url to Triton server", ) parser.add_argument( "--model-name", type=str, required=True, help="Name of the model to test", ) parser.add_argument( "--input-data", type=str, required=False, default="random", help="Input data to perform profiling.", ) parser.add_argument( "--input-shapes", action="append", required=False, help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.", ) parser.add_argument( "--batch-sizes", type=str, required=True, help="List of batch sizes to tests. Comma separated.", ) parser.add_argument( "--number-of-triton-instances", type=int, default=1, help="Number of Triton Server instances", ) parser.add_argument( "--number-of-model-instances", type=int, default=1, help="Number of models instances on Triton Server", ) parser.add_argument( "--measurement-mode", choices=[item.value for item in MeasurementMode], default=MeasurementMode.COUNT_WINDOWS.value, type=str, help="Select measurement mode " "'time_windows' stabilize performance on measurement window. " "'count_windows' stabilize performance on number of samples.", ) parser.add_argument( "--measurement-interval", required=False, help="Time window perf_analyzer will wait to stabilize the measurement", default=5000, type=int, ) parser.add_argument( "--measurement-request-count", required=False, help="Number of samples on which perf_analyzer will stabilize the measurement", default=50, type=int, ) parser.add_argument( "--concurrency-steps", help="Define number of concurrency steps used for dynamic batching tests", default=32, type=int, ) parser.add_argument( "--batching-mode", choices=[item.value for item in BatchingMode], default=BatchingMode.STATIC.value, type=str, help="Select batching mode " "'static' run static batching scenario. " "'dynamic' run dynamic batching scenario.", ) parser.add_argument( "--evaluation-mode", choices=[item.value for item in EvaluationMode], default=EvaluationMode.OFFLINE.value, type=str, help="Select evaluation mode " "'offline' run offline analysis and use GPU memory to pass tensors. " "'online' run online analysis and use HTTP protocol.", ) parser.add_argument( "--offline-mode", choices=[item.value for item in OfflineMode], default=OfflineMode.SYSTEM.value, type=str, help="Select offline mode " "'system' pass tensors through CPU RAM memory. " "'cuda' pass tensors through GPU RAM memory.", ) parser.add_argument( "--output-shared-memory-size", default=100240, type=int, help="Size of memory buffer allocated for output with dynamic shapes in bytes. " "Has to be equal to maximal size of output tensor.", ) parser.add_argument( "--performance-tool", choices=[item.value for item in PerformanceTool], default=PerformanceTool.MODEL_ANALYZER.value, type=str, help="Select performance tool for measurement mode " "'model_analyzer' use Model Analyzer " "'perf_analyzer' use Perf Analyzer", ) parser.add_argument( "--model-repository", default=None, type=str, help="Path to model repository. Valid when using Model Analyzer", ) parser.add_argument("--result-path", type=pathlib.Path, required=True, help="Path where results files is stored.") parser.add_argument( "--warmup", help="Enable model warmup before performance test", action="store_true", default=False ) parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False) args = parser.parse_args() batch_sizes = list(map(lambda x: int(x), args.batch_sizes.split(","))) _run_performance_analysis( server_url=args.server_url, model_name=args.model_name, input_data=args.input_data, input_shapes=args.input_shapes or [], batch_sizes=batch_sizes, number_of_triton_instances=args.number_of_triton_instances, number_of_model_instances=args.number_of_model_instances, measurement_mode=MeasurementMode(args.measurement_mode), measurement_interval=args.measurement_interval, measurement_request_count=args.measurement_request_count, concurrency_steps=args.concurrency_steps, batching_mode=BatchingMode(args.batching_mode), evaluation_mode=EvaluationMode(args.evaluation_mode), offline_mode=OfflineMode(args.offline_mode), output_shared_memory_size=args.output_shared_memory_size, performance_tool=PerformanceTool(args.performance_tool), model_repository=args.model_repository, result_path=args.result_path, warmup=args.warmup, verbose=args.verbose, ) if __name__ == "__main__": main()
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/run_performance_on_triton.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/__init__.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import importlib import logging import os from enum import Enum from pathlib import Path from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np LOGGER = logging.getLogger(__name__) DATALOADER_FN_NAME = "get_dataloader_fn" GET_MODEL_FN_NAME = "get_model" GET_SERVING_INPUT_RECEIVER_FN = "get_serving_input_receiver_fn" GET_ARGPARSER_FN_NAME = "update_argparser" class TensorSpec(NamedTuple): name: str dtype: str shape: Tuple class Parameter(Enum): def __lt__(self, other: "Parameter") -> bool: return self.value < other.value def __str__(self): return self.value class Accelerator(Parameter): NONE = "none" AMP = "amp" TRT = "trt" CUDA = NONE # backward compatibility class Precision(Parameter): INT8 = "int8" FP16 = "fp16" FP32 = "fp32" TF32 = "tf32" # Deprecated class Format(Parameter): TF_GRAPHDEF = "tf-graphdef" TF_SAVEDMODEL = "tf-savedmodel" TF_TRT = "tf-trt" TF_ESTIMATOR = "tf-estimator" TF_KERAS = "tf-keras" ONNX = "onnx" TRT = "trt" TS_SCRIPT = "ts-script" TS_TRACE = "ts-trace" PYT = "pyt" FASTERTRANSFORMER = "fastertransformer" class Model(NamedTuple): handle: object # TODO: precision should be removed precision: Optional[Precision] inputs: Dict[str, TensorSpec] outputs: Dict[str, TensorSpec] def load_from_file(file_path, label, target): spec = importlib.util.spec_from_file_location(name=label, location=file_path) my_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(my_module) # pytype: disable=attribute-error return getattr(my_module, target, None) class BaseLoader(abc.ABC): required_fn_name_for_signature_parsing: Optional[str] = None @abc.abstractmethod def load(self, model_path: Union[str, Path], **kwargs) -> Model: """ Loads and process model from file based on given set of args """ pass class BaseSaver(abc.ABC): required_fn_name_for_signature_parsing: Optional[str] = None @abc.abstractmethod def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None: """ Save model to file """ pass class BaseRunner(abc.ABC): required_fn_name_for_signature_parsing: Optional[str] = None @abc.abstractmethod def init_inference(self, model: Model): raise NotImplementedError class BaseRunnerSession(abc.ABC): def __init__(self, model: Model): self._model = model @abc.abstractmethod def __enter__(self): raise NotImplementedError() @abc.abstractmethod def __exit__(self, exc_type, exc_value, traceback): raise NotImplementedError() @abc.abstractmethod def __call__(self, x: Dict[str, object]): raise NotImplementedError() def _set_env_variables(self) -> Dict[str, object]: """this method not remove values; fix it if needed""" to_set = {} old_values = {k: os.environ.pop(k, None) for k in to_set} os.environ.update(to_set) return old_values def _recover_env_variables(self, old_envs: Dict[str, object]): for name, value in old_envs.items(): if value is None: del os.environ[name] else: os.environ[name] = str(value) class BaseConverter(abc.ABC): required_fn_name_for_signature_parsing: Optional[str] = None @abc.abstractmethod def convert(self, model: Model, dataloader_fn) -> Model: raise NotImplementedError() @staticmethod def required_source_model_precision(requested_model_precision: Precision) -> Precision: return requested_model_precision class BaseMetricsCalculator(abc.ABC): required_fn_name_for_signature_parsing: Optional[str] = None def calc( self, *, ids: List[Any], y_pred: Dict[str, np.ndarray], x: Optional[Dict[str, np.ndarray]], y_real: Optional[Dict[str, np.ndarray]], ) -> Dict[str, float]: """ Calculates error/accuracy metrics Args: ids: List of ids identifying each sample in the batch y_pred: model output as dict where key is output name and value is output value x: model input as dict where key is input name and value is input value y_real: input ground truth as dict where key is output name and value is output value Returns: dictionary where key is metric name and value is its value """ pass @abc.abstractmethod def update( self, ids: List[Any], y_pred: Dict[str, np.ndarray], x: Optional[Dict[str, np.ndarray]], y_real: Optional[Dict[str, np.ndarray]], ): pass @property @abc.abstractmethod def metrics(self) -> Dict[str, Any]: pass class ShapeSpec(NamedTuple): min: Tuple opt: Tuple max: Tuple class MeasurementMode(Enum): COUNT_WINDOWS = "count_windows" TIME_WINDOWS = "time_windows" class PerformanceTool(Enum): """ Available performance evaluation tools """ MODEL_ANALYZER = "model_analyzer" PERF_ANALYZER = "perf_analyzer" class BatchingMode(Enum): """ Available batching modes """ STATIC = "static" DYNAMIC = "dynamic" class EvaluationMode(Enum): """ Available evaluation modes """ OFFLINE = "offline" ONLINE = "online" class OfflineMode(Enum): SYSTEM = "system" CUDA = "cuda"
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/core.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import json import pickle import threading from pathlib import Path from typing import Dict, Iterator, List, Union import numpy as np MB2B = 2 ** 20 B2MB = 1 / MB2B FLUSH_THRESHOLD_B = 256 * MB2B def _validate_batch(name: str, value: Union[list, np.ndarray]): if not isinstance(value, (list, np.ndarray)): raise ValueError(f"Values shall be lists or np.ndarrays; current type {type(value)}") def _validate_prefix_data(prefix_data: Dict[str, List[np.ndarray]]): batch_sizes_per_io_name = {name: [len(batch) for batch in batches] for name, batches in prefix_data.items()} names = list(batch_sizes_per_io_name) for io_name in names: for batch_idx, batch_size in enumerate(batch_sizes_per_io_name[io_name]): if not all([batch_sizes_per_io_name[other_name][batch_idx] == batch_size for other_name in names]): non_equal_batch_sizes = { other_name: batch_sizes_per_io_name[other_name][batch_idx] for other_name in names } non_equal_batch_sizes_str = ", ".join( [f"{name}={batch_size}" for name, batch_size in non_equal_batch_sizes.items()] ) raise ValueError( "All inputs/outputs should have same number of batches with equal batch_size. " f"At batch_idx={batch_idx} there are batch_sizes: {non_equal_batch_sizes_str}" ) # ensure if each io has same number of batches with equal size def _get_nitems_and_batches(prefix_data: Dict[str, List[np.ndarray]]): nitems = 0 nbatches = 0 if prefix_data: nitems_per_io_name = {name: sum(len(batch) for batch in batches) for name, batches in prefix_data.items()} nbatches_per_io_name = {name: len(batches) for name, batches in prefix_data.items()} nitems = list(nitems_per_io_name.values())[0] nbatches = list(nbatches_per_io_name.values())[0] return nitems, nbatches class BaseDumpWriter(abc.ABC): FILE_SUFFIX = ".abstract" def __init__(self, output_dir: Union[str, Path]): self._output_dir = Path(output_dir) # outer dict key is prefix (i.e. input/output/labels/...), inner dict key is input/output name # list is list of batches self._items_cache: Dict[str, Dict[str, List[np.ndarray]]] = {} # key is prefix self._items_counters: Dict[str, int] = {} self._cache_lock = threading.RLock() self._flush_threshold_b = FLUSH_THRESHOLD_B @property def cache_size(self): def _get_bytes_size(name, batch): _validate_batch(name, batch) if not isinstance(batch, np.ndarray): batch = np.narray(batch) return batch.nbytes with self._cache_lock: return { prefix: sum(_get_bytes_size(name, batch) for name, batches in data.items() for batch in batches) for prefix, data in self._items_cache.items() } def _append_to_cache(self, prefix, prefix_data): if prefix_data is None: return if not isinstance(prefix_data, dict): raise ValueError(f"{prefix} data to store shall be dict") with self._cache_lock: cached_prefix_data = self._items_cache.setdefault(prefix, {}) for name, batch in prefix_data.items(): _validate_batch(name, batch) if not isinstance(batch, np.ndarray): batch = np.array(batch) cached_batches = cached_prefix_data.setdefault(name, []) cached_batches += [batch] def write(self, **kwargs): with self._cache_lock: for prefix, prefix_data in kwargs.items(): self._append_to_cache(prefix, prefix_data) biggest_prefix_data_size = max(self.cache_size.values()) if biggest_prefix_data_size > self._flush_threshold_b: self.flush() def flush(self): with self._cache_lock: for prefix, prefix_data in self._items_cache.items(): _validate_prefix_data(prefix_data) output_path = self._output_dir / self._get_filename(prefix) self._dump(prefix_data, output_path) nitems, nbatches = _get_nitems_and_batches(prefix_data) self._items_counters[prefix] += nitems self._items_cache = {} def _get_filename(self, prefix): idx = self._items_counters.setdefault(prefix, 0) return f"{prefix}-{idx:012d}{self.FILE_SUFFIX}" @abc.abstractmethod def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path): pass def __enter__(self): if self._output_dir.exists() and len(list(self._output_dir.iterdir())): raise ValueError(f"{self._output_dir.as_posix()} is not empty") self._output_dir.mkdir(parents=True, exist_ok=True) return self def __exit__(self, exc_type, exc_val, exc_tb): self.flush() class PickleDumpWriter(BaseDumpWriter): FILE_SUFFIX = ".pkl" def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path): output_path.parent.mkdir(parents=True, exist_ok=True) with output_path.open("wb") as pickle_file: pickle.dump(prefix_data, pickle_file) class JsonDumpWriter(BaseDumpWriter): FILE_SUFFIX = ".json" def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path): repacked_prefix_data = self._format_data(prefix_data) output_path.parent.mkdir(parents=True, exist_ok=True) with output_path.open("w") as json_file: json.dump(repacked_prefix_data, json_file) def _format_data(self, prefix_data: Dict[str, List[np.ndarray]]) -> Dict: def _format_batch_for_perf_analyzer_json_format(batch: np.ndarray): return { "content": batch.flatten().tolist(), "shape": list(batch.shape), "dtype": str(batch.dtype), } _, nbatches = _get_nitems_and_batches(prefix_data) batches = [{} for _ in range(nbatches)] for io_name, batches_per_io in prefix_data.items(): for batch_idx, batch in enumerate(batches_per_io): batches[batch_idx][io_name] = _format_batch_for_perf_analyzer_json_format(batch) return {"data": batches} class BaseDumpReader(abc.ABC): FILE_SUFFIX = ".abstract" def __init__(self, dump_dir: Union[Path, str]): self._dump_dir = Path(dump_dir) def get(self, prefix: str) -> Iterator[Dict[str, np.ndarray]]: dump_files_paths = sorted(self._dump_dir.glob(f"{prefix}*{self.FILE_SUFFIX}")) for dump_file_path in dump_files_paths: prefix_data = self._load_file(dump_file_path) nitems, nbatches = _get_nitems_and_batches(prefix_data) for batch_idx in range(nbatches): yield {io_name: prefix_data[io_name][batch_idx] for io_name in prefix_data} @abc.abstractmethod def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]: pass def iterate_over(self, prefix_list: List[str]) -> Iterator: iterators = [self.get(prefix) for prefix in prefix_list] empty_iterators = [False] * len(iterators) while not all(empty_iterators): values = [None] * len(iterators) for idx, iterator in enumerate(iterators): if empty_iterators[idx]: continue try: values[idx] = next(iterator) except StopIteration: empty_iterators[idx] = True if all(empty_iterators): break if not all(empty_iterators): yield values def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass class PickleDumpReader(BaseDumpReader): FILE_SUFFIX = ".pkl" def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]: with dump_file_path.open("rb") as pickle_file: return pickle.load(pickle_file) class JsonDumpReader(BaseDumpReader): FILE_SUFFIX = ".json" def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]: with dump_file_path.open("rb") as json_file: data = json.load(json_file) return self._repack_data(data) def _repack_data(self, data: Dict) -> Dict[str, List[np.ndarray]]: result: Dict[str, List[np.ndarray]] = {} batches = data["data"] for batch in batches: for io_name, batch_as_dict in batch.items(): io_batches = result.setdefault(io_name, []) flat_array = batch_as_dict["content"] shape = batch_as_dict["shape"] dtype = batch_as_dict["dtype"] batch_as_array = np.array(flat_array).reshape(shape).astype(dtype) io_batches.append(batch_as_array) return result
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/dump.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import logging import os import re from pathlib import Path from typing import List LOGGER = logging.getLogger(__name__) class ExtensionManager: def __init__(self, name: str): self._name = name self._registry = {} def register_extension(self, extension: str, clazz): already_registered_class = self._registry.get(extension, None) if already_registered_class and already_registered_class.__module__ != clazz.__module__: raise RuntimeError( f"Conflicting extension {self._name}/{extension}; " f"{already_registered_class.__module__}.{already_registered_class.__name} " f"and " f"{clazz.__module__}.{clazz.__name__}" ) elif already_registered_class is None: clazz_full_name = f"{clazz.__module__}.{clazz.__name__}" if clazz is not None else "None" LOGGER.debug(f"Registering extension {self._name}/{extension}: {clazz_full_name}") self._registry[extension] = clazz def get(self, extension): if extension not in self._registry: raise RuntimeError(f"Missing extension {self._name}/{extension}") return self._registry[extension] @property def supported_extensions(self): return list(self._registry) @staticmethod def scan_for_extensions(extension_dirs: List[Path]): register_pattern = r".*\.register_extension\(.*" for extension_dir in extension_dirs: for python_path in extension_dir.rglob("*.py"): if not python_path.is_file(): continue payload = python_path.read_text() if re.findall(register_pattern, payload): import_path = python_path.relative_to(toolkit_root_dir.parent) package = import_path.parent.as_posix().replace(os.sep, ".") package_with_module = f"{package}.{import_path.stem}" spec = importlib.util.spec_from_file_location(name=package_with_module, location=python_path) my_module = importlib.util.module_from_spec(spec) my_module.__package__ = package try: spec.loader.exec_module(my_module) # pytype: disable=attribute-error except ModuleNotFoundError as e: LOGGER.error( f"Could not load extensions from {import_path} due to missing python packages; {e}" ) runners = ExtensionManager("runners") loaders = ExtensionManager("loaders") savers = ExtensionManager("savers") converters = ExtensionManager("converters") toolkit_root_dir = (Path(__file__).parent / "..").resolve() ExtensionManager.scan_for_extensions([toolkit_root_dir])
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/extensions.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import pathlib from distutils.version import LooseVersion from importlib.metadata import version from typing import List TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient")) # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .core import BatchingMode, EvaluationMode, MeasurementMode, OfflineMode from .perf_analyzer import PerfAnalyzer, PerfAnalyzerConfig from .utils import parse_server_url LOGGER = logging.getLogger("warmup") def performance_evaluation_warmup( server_url: str, model_name: str, batch_sizes: List[int], number_of_triton_instances: int, number_of_model_instances: int, input_data: str, input_shapes: List[str], measurement_mode: MeasurementMode, measurement_interval: int, measurement_request_count: int, batching_mode: BatchingMode, offline_mode: OfflineMode, evaluation_mode: EvaluationMode, output_shared_memory_size: int, ): protocol, host, port = parse_server_url(server_url) measurement_interval = 2 * measurement_interval measurement_request_count = 2 * measurement_request_count if batching_mode == BatchingMode.STATIC: if len(batch_sizes) == 1: batch_sizes = {batch_sizes[0]} else: batch_sizes = sorted({1, batch_sizes[-1]}) max_concurrency = 1 min_concurrency = 1 step = 1 elif batching_mode == BatchingMode.DYNAMIC: max_batch_size = max(batch_sizes) max_total_requests = 2 * max_batch_size * number_of_triton_instances * number_of_model_instances max_concurrency = min(256, max_total_requests) step = max(1, max_concurrency // 2) min_concurrency = step batch_sizes = [max(1, max_total_requests // 256)] else: raise ValueError(f"Unsupported batching mode: {batching_mode}") for batch_size in batch_sizes: for concurrency in range(min_concurrency, max_concurrency + step, step): params = { "model-name": model_name, "model-version": 1, "batch-size": batch_size, "url": f"{host}:{port}", "protocol": protocol, "input-data": input_data, "measurement-interval": measurement_interval, "concurrency-range": f"{concurrency}:{concurrency}:1", } if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"): params["measurement-mode"] = measurement_mode.value params["measurement-request-count"] = measurement_request_count if evaluation_mode == EvaluationMode.OFFLINE: params["shared-memory"] = offline_mode.value params["output-shared-memory-size"] = output_shared_memory_size config = PerfAnalyzerConfig() for param, value in params.items(): config[param] = value for shape in input_shapes: config["shape"] = shape perf_analyzer = PerfAnalyzer(config=config) perf_analyzer.run()
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/warmup.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import Tuple LOGGER = logging.getLogger(__name__) def parse_server_url(server_url: str) -> Tuple[str, str, int]: DEFAULT_PORTS = {"http": 8000, "grpc": 8001} # extract protocol server_url_items = server_url.split("://") if len(server_url_items) != 2: raise ValueError("Prefix server_url with protocol ex.: grpc://127.0.0.1:8001") requested_protocol, server_url = server_url_items requested_protocol = requested_protocol.lower() if requested_protocol not in DEFAULT_PORTS: raise ValueError(f"Unsupported protocol: {requested_protocol}") # extract host and port default_port = DEFAULT_PORTS[requested_protocol] server_url_items = server_url.split(":") if len(server_url_items) == 1: host, port = server_url, default_port elif len(server_url_items) == 2: host, port = server_url_items port = int(port) if port != default_port: LOGGER.warning( f"Current server URL is {server_url} while default {requested_protocol} port is {default_port}" ) else: raise ValueError(f"Could not parse {server_url}. Example of correct server URL: grpc://127.0.0.1:8001") return requested_protocol, host, port
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/utils.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import inspect import logging from typing import Callable, Dict, Optional, Union from model_navigator.utils.cli import is_dict_generic, is_list_generic, is_optional_generic from .core import GET_ARGPARSER_FN_NAME, load_from_file LOGGER = logging.getLogger(__name__) def str2bool(v): if isinstance(v, bool): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError("Boolean value expected.") def filter_fn_args(args: Union[dict, argparse.Namespace], fn: Callable) -> dict: signature = inspect.signature(fn) parameters_names = list(signature.parameters) if isinstance(args, argparse.Namespace): args = vars(args) args = {k: v for k, v in args.items() if k in parameters_names} return args def add_args_for_fn_signature(parser, fn) -> argparse.ArgumentParser: parser.conflict_handler = "resolve" signature = inspect.signature(fn) for parameter in signature.parameters.values(): if parameter.name in ["self", "args", "kwargs"]: continue argument_kwargs = {} if parameter.annotation != inspect.Parameter.empty: is_optional = is_optional_generic(parameter.annotation) if is_optional: annotation = parameter.annotation.__args__[0] # Optional[cls] will be changed into Union[cls, None] else: annotation = parameter.annotation is_list = is_list_generic(annotation) is_dict = is_dict_generic(annotation) if parameter.annotation == bool: argument_kwargs["type"] = str2bool argument_kwargs["choices"] = [0, 1] elif is_list: argument_kwargs["type"] = annotation.__args__[0] # List[cls] -> cls elif is_dict: raise RuntimeError( f"Could not prepare argument parser for {parameter.name}: {parameter.annotation} in {fn}" ) else: argument_kwargs["type"] = annotation if parameter.default != inspect.Parameter.empty: if parameter.annotation == bool: argument_kwargs["default"] = str2bool(parameter.default) else: argument_kwargs["default"] = parameter.default else: argument_kwargs["required"] = True name = parameter.name.replace("_", "-") LOGGER.debug(f"Adding argument {name} with {argument_kwargs}") parser.add_argument(f"--{name}", **argument_kwargs) return parser class ArgParserGenerator: def __init__(self, cls_or_fn, module_path: Optional[str] = None): self._cls_or_fn = cls_or_fn init_method_name = "__init__" self._handle = cls_or_fn if inspect.isfunction(cls_or_fn) else getattr(cls_or_fn, init_method_name, None) input_is_python_file = module_path and module_path.endswith(".py") self._input_path = module_path if input_is_python_file else None self._required_fn_name_for_signature_parsing = getattr( cls_or_fn, "required_fn_name_for_signature_parsing", None ) def update_argparser(self, parser): name = self._handle.__name__ group_parser = parser.add_argument_group(name) add_args_for_fn_signature(group_parser, fn=self._handle) self._update_argparser(group_parser) def get_args(self, args: argparse.Namespace): filtered_args = filter_fn_args(args, fn=self._handle) tmp_parser = argparse.ArgumentParser(allow_abbrev=False) self._update_argparser(tmp_parser) custom_names = [ p.dest.replace("-", "_") for p in tmp_parser._actions if not isinstance(p, argparse._HelpAction) ] custom_params = {n: getattr(args, n) for n in custom_names} filtered_args = {**filtered_args, **custom_params} return filtered_args def from_args(self, args: Union[argparse.Namespace, Dict]): args = self.get_args(args) LOGGER.info(f"Initializing {self._cls_or_fn.__name__}({args})") return self._cls_or_fn(**args) def _update_argparser(self, parser): label = "argparser_update" if self._input_path: update_argparser_handle = load_from_file(self._input_path, label=label, target=GET_ARGPARSER_FN_NAME) if update_argparser_handle: update_argparser_handle(parser) elif self._required_fn_name_for_signature_parsing: fn_handle = load_from_file( self._input_path, label=label, target=self._required_fn_name_for_signature_parsing ) if fn_handle: add_args_for_fn_signature(parser, fn_handle)
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/args.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv import re from typing import Dict, List from natsort import natsorted from tabulate import tabulate def sort_results(results: List): results = natsorted(results, key=lambda item: [item[key] for key in item.keys()]) return results def save_results(filename: str, data: List, formatted: bool = False): data = format_data(data=data) if formatted else data with open(filename, "a") as csvfile: fieldnames = data[0].keys() writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() for row in data: writer.writerow(row) def format_data(data: List[Dict]) -> List[Dict]: formatted_data = list() for item in data: formatted_item = format_keys(data=item) formatted_data.append(formatted_item) return formatted_data def format_keys(data: Dict) -> Dict: keys = {format_key(key=key): value for key, value in data.items()} return keys def format_key(key: str) -> str: key = " ".join([k.capitalize() for k in re.split("_| ", key)]) return key def show_results(results: List[Dict]): headers = list(results[0].keys()) summary = map(lambda x: list(map(lambda item: item[1], x.items())), results) print(tabulate(summary, headers=headers))
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/report.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from pathlib import Path from typing import Dict, Optional, Union import numpy as np # pytype: disable=import-error import onnx import onnx.shape_inference import onnxruntime from google.protobuf import text_format from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec from ..extensions import loaders, runners, savers from .utils import infer_precision # pytype: enable=import-error LOGGER = logging.getLogger(__name__) def _value_info2tensor_spec(value_info: onnx.ValueInfoProto): onnx_data_type_map = {"float": "float32", "double": "float64"} elem_type_name = onnx.TensorProto.DataType.Name(value_info.type.tensor_type.elem_type).lower() dtype = onnx_data_type_map.get(elem_type_name, elem_type_name) def _get_dim(dim): which = dim.WhichOneof("value") if which is not None: # which is None when dim is None dim = getattr(dim, which) return None if isinstance(dim, (str, bytes)) else dim shape = value_info.type.tensor_type.shape shape = tuple(_get_dim(d) for d in shape.dim) return TensorSpec(value_info.name, dtype=dtype, shape=shape) def _infer_graph_precision(onnx_graph: onnx.GraphProto) -> Optional[Precision]: import networkx as nx # build directed graph nx_graph = nx.DiGraph() def _get_dtype(vi): t = vi.type if hasattr(t, "tensor_type"): type_id = t.tensor_type.elem_type else: raise NotImplementedError("Not implemented yet") return TENSOR_TYPE_TO_NP_TYPE[type_id] node_output2type = {vi.name: _get_dtype(vi) for vi in onnx_graph.value_info} node_outputs2node = {output_name: node for node in onnx_graph.node for output_name in node.output} node_inputs2node = {input_name: node for node in onnx_graph.node for input_name in node.input} for node in onnx_graph.node: node_dtype = node_output2type.get("+".join(node.output), None) nx_graph.add_node( node.name, op=node.op_type, attr={a.name: a for a in node.attribute}, dtype=node_dtype, ) for input_name in node.input: prev_node = node_outputs2node.get(input_name, None) if prev_node: nx_graph.add_edge(prev_node.name, node.name) for input_node in onnx_graph.input: input_name = input_node.name nx_graph.add_node(input_name, op="input", dtype=_get_dtype(input_node)) next_node = node_inputs2node.get(input_name, None) if next_node: nx_graph.add_edge(input_name, next_node.name) for output in onnx_graph.output: output_name = output.name nx_graph.add_node(output_name, op="output", dtype=_get_dtype(output)) prev_node = node_outputs2node.get(output_name, None) if prev_node: nx_graph.add_edge(prev_node.name, output_name) else: LOGGER.warning(f"Could not find previous node for {output_name}") input_names = [n.name for n in onnx_graph.input] output_names = [n.name for n in onnx_graph.output] most_common_dtype = infer_precision(nx_graph, input_names, output_names, lambda node: node.get("dtype", None)) if most_common_dtype is not None: precision = {np.dtype("float32"): Precision.FP32, np.dtype("float16"): Precision.FP16}[most_common_dtype] else: precision = None return precision class OnnxLoader(BaseLoader): def load(self, model_path: Union[str, Path], **_) -> Model: if isinstance(model_path, Path): model_path = model_path.as_posix() model = onnx.load(model_path) onnx.checker.check_model(model) onnx.helper.strip_doc_string(model) model = onnx.shape_inference.infer_shapes(model) # TODO: probably modification of onnx model ios causes error on optimize # from onnx.utils import polish_model # model = polish_model(model) # run checker, docs strip, optimizer and shape inference inputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.input} outputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.output} precision = _infer_graph_precision(model.graph) return Model(model, precision, inputs, outputs) class OnnxSaver(BaseSaver): def __init__(self, as_text: bool = False): self._as_text = as_text def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None: model_path = Path(model_path) LOGGER.debug(f"Saving ONNX model to {model_path.as_posix()}") model_path.parent.mkdir(parents=True, exist_ok=True) onnx_model: onnx.ModelProto = model.handle if self._as_text: with model_path.open("w") as f: f.write(text_format.MessageToString(onnx_model)) else: with model_path.open("wb") as f: f.write(onnx_model.SerializeToString()) """ ExecutionProviders on onnxruntime 1.4.0 ['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'MIGraphXExecutionProvider', 'NGRAPHExecutionProvider', 'OpenVINOExecutionProvider', 'DnnlExecutionProvider', 'NupharExecutionProvider', 'VitisAIExecutionProvider', 'ArmNNExecutionProvider', 'ACLExecutionProvider', 'CPUExecutionProvider'] """ def _check_providers(providers): providers = providers or [] if not isinstance(providers, (list, tuple)): providers = [providers] available_providers = onnxruntime.get_available_providers() unavailable = set(providers) - set(available_providers) if unavailable: raise RuntimeError(f"Unavailable providers {unavailable}") return providers class OnnxRunner(BaseRunner): def __init__(self, verbose_runtime_logs: bool = False): self._providers = None self._verbose_runtime_logs = verbose_runtime_logs def init_inference(self, model: Model): assert isinstance(model.handle, onnx.ModelProto) return OnnxRunnerSession( model=model, providers=self._providers, verbose_runtime_logs=self._verbose_runtime_logs ) class OnnxRunnerSession(BaseRunnerSession): def __init__(self, model: Model, providers, verbose_runtime_logs: bool = False): super().__init__(model) self._input_names = None self._output_names = None self._session = None self._providers = providers self._verbose_runtime_logs = verbose_runtime_logs self._old_env_values = {} def __enter__(self): self._old_env_values = self._set_env_variables() sess_options = onnxruntime.SessionOptions() # default session options if self._verbose_runtime_logs: sess_options.log_severity_level = 0 sess_options.log_verbosity_level = 1 LOGGER.info( f"Starting inference session for onnx model providers={self._providers} sess_options={sess_options}" ) self._input_names = list(self._model.inputs) self._output_names = list(self._model.outputs) model_payload = self._model.handle.SerializeToString() self._session = onnxruntime.InferenceSession( model_payload, providers=self._providers, sess_options=sess_options ) return self def __exit__(self, exc_type, exc_value, traceback): self._input_names = None self._output_names = None self._session = None self._recover_env_variables(self._old_env_values) def __call__(self, x: Dict[str, object]): feed_dict = {k: x[k] for k in self._input_names} y_pred = self._session.run(self._output_names, feed_dict) y_pred = dict(zip(self._output_names, y_pred)) return y_pred loaders.register_extension(Format.ONNX.value, OnnxLoader) runners.register_extension(Format.ONNX.value, OnnxRunner) savers.register_extension(Format.ONNX.value, OnnxSaver)
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/bermuda/onnx.py
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/bermuda/__init__.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import Counter from typing import Callable, Dict, List, Optional import networkx as nx from ..core import ShapeSpec def infer_precision( nx_graph: nx.Graph, input_names: List[str], output_names: List[str], get_node_dtype_fn: Callable, ): node_dtypes = [nx_graph.nodes[node_name].get("dtype", None) for node_name in nx_graph.nodes] node_dtypes = [dt for dt in node_dtypes if dt is None or dt.kind not in ["i", "b"]] dtypes_counter = Counter(node_dtypes) return dtypes_counter.most_common()[0][0] def get_shapes_with_dynamic_axes(dataloader, batch_size_dim: Optional[int] = None): def _set_dynamic_shapes(t, shapes): for k, v in t.items(): shape = list(v.shape) for dim, s in enumerate(shape): if shapes[k][dim] != -1 and shapes[k][dim] != s: shapes[k][dim] = -1 def _mark_batch_axis(shape, batch_axis: int): shape = list(shape) shape[batch_axis] = -1 return tuple(shape) ## get all shapes from input and output tensors input_shapes = {} output_shapes = {} for batch in dataloader: _, x, y = batch for k, v in x.items(): input_shapes[k] = list(v.shape) for k, v in y.items(): output_shapes[k] = list(v.shape) break # based on max <max_num_iters> iterations, check which # dimensions differ to determine dynamic_axes max_num_iters = 100 for idx, batch in enumerate(dataloader): if idx >= max_num_iters: break _, x, y = batch _set_dynamic_shapes(x, input_shapes) _set_dynamic_shapes(y, output_shapes) if batch_size_dim is not None: input_shapes = {name: _mark_batch_axis(shape, batch_size_dim) for name, shape in input_shapes.items()} output_shapes = {name: _mark_batch_axis(shape, batch_size_dim) for name, shape in output_shapes.items()} return input_shapes, output_shapes def get_dynamic_axes(dataloader, batch_size_dim: Optional[int] = None): input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader, batch_size_dim=batch_size_dim) all_shapes = {**input_shapes, **output_shapes} dynamic_axes = {} for k, shape in all_shapes.items(): for idx, s in enumerate(shape): if s == -1: dynamic_axes[k] = {idx: k + "_" + str(idx)} for k in all_shapes: if k in dynamic_axes: dynamic_axes[k].update({batch_size_dim: "batch_size_" + str(batch_size_dim)}) else: dynamic_axes[k] = {batch_size_dim: "batch_size_" + str(batch_size_dim)} return dynamic_axes def get_input_shapes(dataloader, max_batch_size=1) -> Dict[str, ShapeSpec]: def init_counters_and_shapes(x, counters, min_shapes, max_shapes): for k, v in x.items(): counters[k] = Counter() min_shapes[k] = [float("inf")] * v.ndim max_shapes[k] = [float("-inf")] * v.ndim counters = {} min_shapes: Dict[str, tuple] = {} max_shapes: Dict[str, tuple] = {} for idx, batch in enumerate(dataloader): ids, x, y = batch if idx == 0: init_counters_and_shapes(x, counters, min_shapes, max_shapes) for k, v in x.items(): shape = v.shape counters[k][shape] += 1 min_shapes[k] = tuple(min(a, b) for a, b in zip(min_shapes[k], shape)) max_shapes[k] = tuple(max(a, b) for a, b in zip(max_shapes[k], shape)) opt_shapes: Dict[str, tuple] = {} for k, v in counters.items(): opt_shapes[k] = v.most_common(1)[0][0] shapes = {} for k in opt_shapes.keys(): # same keys in min_shapes and max_shapes shapes[k] = ShapeSpec( min=(1,) + min_shapes[k][1:], max=(max_batch_size,) + max_shapes[k][1:], opt=(max_batch_size,) + opt_shapes[k][1:], ) return shapes
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/bermuda/utils.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import typing from collections import Counter from pathlib import Path from typing import Dict, Optional, Union import numpy as np import torch # pytype: disable=import-error import yaml from model_navigator.model import ModelSignatureConfig from model_navigator.tensor import TensorSpec from model_navigator.utils.config import YamlConfigFile from ..core import ( GET_MODEL_FN_NAME, BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, load_from_file, ) from ..extensions import loaders, runners, savers from .utils import get_dynamic_axes, get_shapes_with_dynamic_axes LOGGER = logging.getLogger(__name__) def get_sample_input(dataloader, device): for batch in dataloader: _, x, _ = batch break if isinstance(x, dict): sample_input = list(x.values()) elif isinstance(x, list): sample_input = x else: raise TypeError("The first element (x) of batch returned by dataloader must be a list or a dict") for idx, s in enumerate(sample_input): sample_input[idx] = torch.from_numpy(s).to(device) return tuple(sample_input) def get_model_device(torch_model): if next(torch_model.parameters()).is_cuda: return "cuda" else: return "cpu" def infer_model_precision(model): counter = Counter() for param in model.parameters(): counter[param.dtype] += 1 if counter[torch.float16] > 0: return Precision.FP16 else: return Precision.FP32 def _get_tensor_dtypes(dataloader, precision): def _get_dtypes(t): def _get_dtype(v): dtype = str(v.dtype) if dtype == "float64": dtype = "float32" if precision == Precision.FP16 and dtype == "float32": dtype = "float16" return np.dtype(dtype) return {k: _get_dtype(v) for k, v in t.items()} batch = next(dataloader) _, x, y = batch input_dtypes = _get_dtypes(x) output_dtypes = _get_dtypes(y) return input_dtypes, output_dtypes ### TODO assumption: floating point input ### type has same precision as the model def _get_model_signature( inputs_names: typing.List[str], outputs_names: typing.List[str], precision, dataloader_fn, batch_size_dim: typing.Optional[int] = None, ): dataloader = dataloader_fn() input_dtypes, output_dtypes = _get_tensor_dtypes(dataloader, precision) input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader, batch_size_dim=batch_size_dim) inputs = { name: TensorSpec(name=name, dtype=input_dtypes[name], shape=tuple(input_shapes[name])) for name in inputs_names } outputs = { name: TensorSpec(name=name, dtype=output_dtypes[name], shape=tuple(output_shapes[name])) for name in outputs_names } return ModelSignatureConfig(inputs, outputs) class PyTorchModelLoader(BaseLoader): required_fn_name_for_signature_parsing: Optional[str] = GET_MODEL_FN_NAME def __init__(self, **kwargs): self._model_args = kwargs def load(self, model_path: Union[str, Path], **kwargs) -> Model: if isinstance(model_path, Path): model_path = model_path.as_posix() get_model = load_from_file(model_path, "model", GET_MODEL_FN_NAME) model, io_names_dict = get_model(**self._model_args) dataloader_fn = kwargs.get("dataloader_fn", None) output_type = kwargs.get("output_type", None) precision = infer_model_precision(model) batch_axis = getattr(model, "bermuda_batch_axis", 0) # by default models supports batching; batch_axis=0 model_signature = _get_model_signature( inputs_names=io_names_dict["inputs"], outputs_names=io_names_dict["outputs"], precision=precision, dataloader_fn=dataloader_fn, batch_size_dim=batch_axis, ) model = Model(handle=model, precision=precision, inputs=model_signature.inputs, outputs=model_signature.outputs) if output_type == Format.TS_TRACE.value: return self._trace(model, dataloader_fn) elif output_type == Format.TS_SCRIPT.value: return self._script(model) elif output_type == Format.ONNX.value: return model else: raise ValueError(f"Not supported PyTorch format: {output_type}") def _trace(self, model: Model, dataloader_fn) -> Model: device = get_model_device(model.handle) dummy_input = get_sample_input(dataloader_fn(), device) traced_model = torch.jit.trace_module(model.handle, {"forward": dummy_input}) return Model(traced_model, precision=model.precision, inputs=model.inputs, outputs=model.outputs) def _script(self, model: Model) -> Model: scripted_model = torch.jit.script(model.handle) return Model(scripted_model, precision=model.precision, inputs=model.inputs, outputs=model.outputs) class TorchScriptLoader(BaseLoader): def __init__(self, tensor_names_path: str = None, **kwargs): self._model_args = kwargs self._io_spec = None if tensor_names_path is not None: with Path(tensor_names_path).open("r") as fh: tensor_infos = yaml.load(fh, Loader=yaml.SafeLoader) self._io_spec = ModelSignatureConfig(tensor_infos["inputs"], tensor_infos["outputs"]) def load(self, model_path: Union[str, Path], **_) -> Model: if not isinstance(model_path, Path): model_path = Path(model_path) model = torch.jit.load(model_path.as_posix()) precision = infer_model_precision(model) io_spec = self._io_spec if not io_spec: yaml_path = model_path.parent / f"{model_path.name}.yaml" if not yaml_path.is_file(): raise ValueError( f"If `--tensor-names-path is not provided, " f"TorchScript model loader expects file {yaml_path} with tensor information." ) with yaml_path.open("r") as fh: tensor_info = yaml.load(fh, Loader=yaml.SafeLoader) io_spec = ModelSignatureConfig(tensor_info["inputs"], tensor_info["outputs"]) return Model(handle=model, precision=precision, inputs=io_spec.inputs, outputs=io_spec.outputs) class PYT2ONNXSaver(BaseSaver): def __init__(self, onnx_opset: int = None): self._onnx_opset = onnx_opset def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> Model: if isinstance(model_path, Path): model_path = model_path.as_posix() assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance( model.handle, torch.nn.Module ), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Converter aborted." dynamic_axes = get_dynamic_axes(dataloader_fn(), batch_size_dim=0) device = get_model_device(model.handle) dummy_input = get_sample_input(dataloader_fn(), device) with torch.no_grad(): torch.onnx.export( model.handle, dummy_input, model_path, do_constant_folding=True, input_names=list(model.inputs), output_names=list(model.outputs), dynamic_axes=dynamic_axes, opset_version=self._onnx_opset, ) class TorchScriptSaver(BaseSaver): def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None: if not isinstance(model_path, Path): model_path = Path(model_path) if isinstance(model.handle, torch.jit.ScriptModule): torch.jit.save(model.handle, model_path.as_posix()) else: raise RuntimeError("The model must be of type 'torch.jit.ScriptModule'. Saving aborted.") signature_config = ModelSignatureConfig(inputs=model.inputs, outputs=model.outputs) annotation_path = model_path.parent / f"{model_path.name}.yaml" with YamlConfigFile(annotation_path) as config_file: config_file.save_config(signature_config) class PyTorchRunner(BaseRunner): def __init__(self): pass def init_inference(self, model: Model): return PyTorchRunnerSession(model=model) class PyTorchRunnerSession(BaseRunnerSession): def __init__(self, model: Model): super().__init__(model) assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance( model.handle, torch.nn.Module ), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Runner aborted." self._model = model self._output_names = None def __enter__(self): self._output_names = list(self._model.outputs) return self def __exit__(self, exc_type, exc_value, traceback): self._output_names = None self._model = None def __call__(self, x: Dict[str, object]): with torch.no_grad(): feed_list = [torch.from_numpy(v).cuda() for k, v in x.items()] y_pred = self._model.handle(*feed_list) if isinstance(y_pred, torch.Tensor): y_pred = (y_pred,) y_pred = [t.cpu().numpy() for t in y_pred] y_pred = dict(zip(self._output_names, y_pred)) return y_pred loaders.register_extension(Format.PYT.value, PyTorchModelLoader) loaders.register_extension(Format.TS_TRACE.value, TorchScriptLoader) loaders.register_extension(Format.TS_SCRIPT.value, TorchScriptLoader) savers.register_extension(Format.TS_SCRIPT.value, TorchScriptSaver) savers.register_extension(Format.TS_TRACE.value, TorchScriptSaver) savers.register_extension(f"{Format.PYT.value}--{Format.ONNX.value}", PYT2ONNXSaver) runners.register_extension(Format.PYT.value, PyTorchRunner) runners.register_extension(Format.TS_SCRIPT.value, PyTorchRunner) runners.register_extension(Format.TS_TRACE.value, PyTorchRunner)
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/bermuda/pyt.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .model_analyzer import ModelAnalyzer, ModelAnalyzerMode, ModelAnalyzerReportMode # noqa: F401 from .model_analyzer_config import ModelAnalyzerConfig # noqa: F401
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/model_analyzer/__init__.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .exceptions import ModelAnalyzerException class ModelAnalyzerConfig: """ A config class to set arguments to the Model Analyzer. An argument set to None will use the default. """ model_analyzer_args = [ "config-file", ] input_to_options = [ "config-file", ] def __init__(self): # Args will be a dict with the string representation as key self._args = {k: None for k in self.model_analyzer_args} self._options = { "-f": "config.yaml", } self._input_to_options = { "config-file": "-f", } def to_cli_string(self): """ Utility function to convert a config into a string of arguments to the server with CLI. Returns ------- str the command consisting of all set arguments to the model analyzer. e.g. '--model-repository=/models --verbose=True' """ # single dashed options, then verbose flags, then main args args = [f"{k} {v}" for k, v in self._options.items() if v] args += [f"--{k}={v}" for k, v in self._args.items() if v] return " ".join(args) @classmethod def allowed_keys(cls): """ Returns ------- list of str The keys that are allowed to be passed into model_analyzer """ return list(cls.model_analyzer_args) + list(cls.input_to_options) def __getitem__(self, key): """ Gets an arguments value in config Parameters ---------- key : str The name of the argument to the model analyzer Returns ------- The value that the argument is set to in this config """ if key in self._args: return self._args[key] elif key in self._input_to_options: return self._options[self._input_to_options[key]] else: raise ModelAnalyzerException(f"'{key}' Key not found in config") def __setitem__(self, key, value): """ Sets an arguments value in config after checking if defined/supported. Parameters ---------- key : str The name of the argument to the model analyzer value : (any) The value to which the argument is being set Raises ------ TritonModelAnalyzerException If key is unsupported or undefined in the config class """ if key in self._args: self._args[key] = value elif key in self._input_to_options: self._options[self._input_to_options[key]] = value else: raise ModelAnalyzerException(f"The argument '{key}' to the Model Analyzer is not supported.")
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/model_analyzer/model_analyzer_config.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class ModelAnalyzerException(Exception): def __init__(self, message: str): self._message = message def __str__(self): """ Get the exception string representation. Returns ------- str The message associated with this exception, or None if no message. """ return self._message @property def message(self): """ Get the exception message. Returns ------- str The message associated with this exception, or None if no message. """ return self._message
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/model_analyzer/exceptions.py
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import subprocess from subprocess import CalledProcessError from .exceptions import ModelAnalyzerException SERVER_OUTPUT_TIMEOUT_SECS = 5 LOGGER = logging.getLogger(__name__) class ModelAnalyzerMode: PROFILE = "profile" ANALYZE = "analyze" REPORT = "report" class ModelAnalyzerReportMode: OFFLINE = "offline" ONLINE = "online" class ModelAnalyzer: """ Concrete Implementation of Model Analyzer interface that runs analyzer locally as as subprocess. """ _analyzer_path = "model-analyzer" def __init__(self, config): """ Parameters ---------- config : AnalyzerConfig the config object containing arguments for this server instance """ self._analyzer_process = None self._analyzer_config = config self._log = None def run(self, mode: str, verbose: bool = False, quiet: bool = False, report_mode: str = None): """ Starts the model analyzer locally """ if self._analyzer_path: cmd = [self._analyzer_path] if verbose: cmd += ["--verbose"] if quiet: cmd += ["--quiet"] if report_mode: cmd += ["-m"] cmd += [report_mode] cmd += [mode] cmd += self._analyzer_config.to_cli_string().split() LOGGER.debug(f"Model Analyze command: {cmd}") try: subprocess.run(cmd, check=True, start_new_session=True) except CalledProcessError as e: raise ModelAnalyzerException( f"Running {self._analyzer_path} with {e.cmd} failed with" f" exit status {e.returncode} : {e.output}" )
DeepLearningExamples-master
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/model_analyzer/model_analyzer.py