code
stringlengths
31
1.05M
apis
list
extract_api
stringlengths
97
1.91M
# Copyright (c) 2021 Graphcore Ltd. All rights reserved. import popart import numpy as np import test_util as tu import re @tu.requires_ipu_model def test_groupHostSync(): builder = popart.Builder() a = builder.addInputTensor(popart.TensorInfo("FLOAT16", [1])) w = builder.addInitializedInputTensor(np.ones([1], np.float16)) o = builder.aiOnnx.add([w, a]) l1 = builder.aiGraphcore.l1loss([o], 0.1) anchor_config = { o: popart.AnchorReturnType("All"), l1: popart.AnchorReturnType("All") } dataFlow = popart.DataFlow(1, anchor_config) options = popart.SessionOptions() options.engineOptions = { "debug.instrumentCompute": "true", "debug.instrumentExternalExchange": "true" } options.groupHostSync = True #The option we are testing options.reportOptions = { "showVarStorage": "true", "showPerIpuMemoryUsage": "true", "showExecutionSteps": "true" } patterns = popart.Patterns(popart.PatternsLevel.Minimal) session = popart.InferenceSession(fnModel=builder.getModelProto(), dataFlow=dataFlow, deviceInfo=tu.create_test_device(), userOptions=options, patterns=patterns) session.prepareDevice() session.weightsFromHost() anchors = session.initAnchorArrays() input_a = np.array([1.4], dtype=np.float16) stepio = popart.PyStepIO({a: input_a}, anchors) session.run(stepio) summaryReport = session.getSummaryReport() lines = summaryReport.split('\n') order = [] pastSwitch = False countSeq = 0 # Analyse a sequence: # default order : # Switch # Repeat # StreamCopy (FromHost) x1 # StreamCopy(ToHost) x1 # Add # StreamCopy(ToHost) x2 # Absolute # Reduce # StreamCopy(ToHost) x1 # with the option: # Switch # Repeat # StreamCopy (FromHost) x1 # Add # Absolute # Reduce # StreamCopy(ToHost) x1 for l in lines: if re.search(r"Switch", l): pastSwitch = True if not pastSwitch: continue if re.search(r"Sequence", l): countSeq += 1 if countSeq > 6: break if re.search(r"OnTileExecute: ", l): order.append("execution") if re.search(r"\bStreamCopy(Mid)?\b", l): order.append("streamcopy") # The streamcopy to host should only happen at the end (after # ReduceExpression) # There should be 2 stream copies and some executions assert len(order) > 3 # There should be 1 stream copy at the start and 1 at the end assert order[0] == "streamcopy" assert order[-1] == "streamcopy" # 2 stream copies in total assert order.count("streamcopy") == 2 # Everything else should be execution for i in order[2:-1]: assert i == 'execution'
[ "popart.Patterns", "popart.Builder", "numpy.ones", "popart.AnchorReturnType", "test_util.create_test_device", "numpy.array", "popart.PyStepIO", "popart.TensorInfo", "popart.SessionOptions", "popart.DataFlow", "re.search" ]
[((188, 204), 'popart.Builder', 'popart.Builder', ([], {}), '()\n', (202, 204), False, 'import popart\n'), ((551, 584), 'popart.DataFlow', 'popart.DataFlow', (['(1)', 'anchor_config'], {}), '(1, anchor_config)\n', (566, 584), False, 'import popart\n'), ((600, 623), 'popart.SessionOptions', 'popart.SessionOptions', ([], {}), '()\n', (621, 623), False, 'import popart\n'), ((978, 1023), 'popart.Patterns', 'popart.Patterns', (['popart.PatternsLevel.Minimal'], {}), '(popart.PatternsLevel.Minimal)\n', (993, 1023), False, 'import popart\n'), ((1458, 1491), 'numpy.array', 'np.array', (['[1.4]'], {'dtype': 'np.float16'}), '([1.4], dtype=np.float16)\n', (1466, 1491), True, 'import numpy as np\n'), ((1505, 1543), 'popart.PyStepIO', 'popart.PyStepIO', (['{a: input_a}', 'anchors'], {}), '({a: input_a}, anchors)\n', (1520, 1543), False, 'import popart\n'), ((237, 270), 'popart.TensorInfo', 'popart.TensorInfo', (['"""FLOAT16"""', '[1]'], {}), "('FLOAT16', [1])\n", (254, 270), False, 'import popart\n'), ((314, 338), 'numpy.ones', 'np.ones', (['[1]', 'np.float16'], {}), '([1], np.float16)\n', (321, 338), True, 'import numpy as np\n'), ((455, 485), 'popart.AnchorReturnType', 'popart.AnchorReturnType', (['"""All"""'], {}), "('All')\n", (478, 485), False, 'import popart\n'), ((499, 529), 'popart.AnchorReturnType', 'popart.AnchorReturnType', (['"""All"""'], {}), "('All')\n", (522, 529), False, 'import popart\n'), ((2171, 2193), 're.search', 're.search', (['"""Switch"""', 'l'], {}), "('Switch', l)\n", (2180, 2193), False, 'import re\n'), ((2285, 2309), 're.search', 're.search', (['"""Sequence"""', 'l'], {}), "('Sequence', l)\n", (2294, 2309), False, 'import re\n'), ((2400, 2431), 're.search', 're.search', (['"""OnTileExecute: """', 'l'], {}), "('OnTileExecute: ', l)\n", (2409, 2431), False, 'import re\n'), ((2483, 2521), 're.search', 're.search', (['"""\\\\bStreamCopy(Mid)?\\\\b"""', 'l'], {}), "('\\\\bStreamCopy(Mid)?\\\\b', l)\n", (2492, 2521), False, 'import re\n'), ((1202, 1225), 'test_util.create_test_device', 'tu.create_test_device', ([], {}), '()\n', (1223, 1225), True, 'import test_util as tu\n')]
import cPickle import functools import os import fuel import numpy from fuel.schemes import ( ConstantScheme, ShuffledExampleScheme, SequentialExampleScheme, IndexScheme) from fuel.streams import DataStream, AbstractDataStream from fuel.transformers import ( SortMapping, Padding, ForceFloatX, Batch, Mapping, Unpack, Filter, FilterSources, Transformer, Rename, Merge) from fuel import config from lvsr.datasets.h5py import H5PYAudioDataset from blocks.utils import dict_subset import numpy as np import logging from picklable_itertools.iter_dispatch import iter_ from numpy.distutils.misc_util import dict_append from lvsr.utils import resizeArray logger = logging.getLogger(__name__) def switch_first_two_axes(batch): result = [] for array in batch: if array.ndim == 2: result.append(array.transpose(1, 0)) else: result.append(array.transpose(1, 0, 2)) return tuple(result) class _Length(object): def __init__(self, index): self.index = index def __call__(self, example): return len(example[self.index]) class _AddLabel(object): def __init__(self, label, index, append=True, times=1): self.label = label self.append = append self.times = times self.index = index def __call__(self, example): example = list(example) i = self.index if self.append: # Not using `list.append` to avoid having weird mutable # example objects. example[i] = numpy.hstack([example[i], self.times * [self.label]]) else: example[i] = numpy.hstack([self.times * [self.label], example[1]]) return example class _LengthFilter(object): def __init__(self, index, max_length): self.index = index self.max_length = max_length def __call__(self, example): if self.max_length: return 0 < len(example[self.index]) <= self.max_length return True class SoftenResult(object): def __init__(self, data_labels, soften_distributions): self.soften_distributions = [] for label,soften_distribution in soften_distributions.iteritems(): self.soften_distributions += \ [ (data_labels.index(label), soften_distribution[0], soften_distribution[1]) ] def __call__(self, example): example = list(example) for label_id, soften_factor, distribution in self.soften_distributions: data = example[label_id] assert data.ndim == 1, "data to soften must be 1-dimensional" if distribution is not None: mask = np.eye(distribution.shape[0])[data] tile = np.tile(distribution, (data.shape[0], 1)) distribution_matrix = \ (tile + distribution.T[data][:, np.newaxis]/(distribution.shape[0]-1))*soften_factor elif soften_factor >= 0: mask = np.eye(data.shape[0])[data] distribution_matrix = np.tile(1.0/(data.shape[0]-1), (data.shape[0], data.shape[0]))*soften_factor else: mask = np.zeros((data.shape[0], data.shape[0])) distribution_matrix = np.tile(1.0/(data.shape[0]), (data.shape[0], data.shape[0])) result = distribution_matrix * -(mask - 1) + (1-soften_factor)*mask result = result.astype(np.float32) example[label_id] = result example = tuple(example) return example class ConvertToMask(object): def __init__(self, data_labels, source, mask_size): self.source_id = data_labels.index(source) self.mask_size = mask_size def __call__(self, example): sample = example[self.source_id] def convert_single(x): out = np.zeros((self.mask_size,), dtype=np.int32) out[ x[x>-1] ] = 1 return out sample = np.apply_along_axis(convert_single, 1, sample) return example[:self.source_id] + (sample,) + example[(self.source_id+1):] class ForceCContiguous(Transformer): """Force all floating point numpy arrays to be floatX.""" def __init__(self, data_stream): super(ForceCContiguous, self).__init__( data_stream, axis_labels=data_stream.axis_labels) def get_data(self, request=None): if request is not None: raise ValueError data = next(self.child_epoch_iterator) result = [] for piece in data: if isinstance(piece, numpy.ndarray): result.append(numpy.ascontiguousarray(piece)) else: result.append(piece) return tuple(result) class GlobalPadding(Transformer): def __init__(self, data_stream, mask_sources=None, mask_dtype=None, **kwargs): if data_stream.produces_examples: raise ValueError('the wrapped data stream must produce batches of ' 'examples, not examples') super(GlobalPadding, self).__init__( data_stream, produces_examples=False, **kwargs) if mask_sources is None: mask_sources = self.data_stream.sources self.mask_sources = mask_sources if mask_dtype is None: self.mask_dtype = config.floatX else: self.mask_dtype = mask_dtype @property def sources(self): sources = [] for source in self.data_stream.sources: sources.append(source) if source in self.mask_sources: sources.append(source + '_mask') return tuple(sources) def transform_batch(self, batch): batch_with_masks = [] max_length = 0 for data in batch: max_sequence_length = max([numpy.asarray(sample).shape[0] for sample in data]) max_length = max(max_length, max_sequence_length) for i, (source, source_batch) in enumerate( zip(self.data_stream.sources, batch)): if source not in self.mask_sources: batch_with_masks.append(source_batch) continue shapes = [numpy.asarray(sample).shape for sample in source_batch] lengths = [shape[0] for shape in shapes] rest_shape = shapes[0][1:] if not all([shape[1:] == rest_shape for shape in shapes]): assert all([shape[0] == shape[1] for shape in shapes]),\ "Arrays must be quadratic" dtype = numpy.asarray(source_batch[0]).dtype padded_batch = numpy.zeros( (len(source_batch), max_length, max_length), dtype=dtype) for i, sample in enumerate(source_batch): padded_batch[i, :len(sample), :len(sample)] = sample else: dtype = numpy.asarray(source_batch[0]).dtype padded_batch = numpy.zeros( (len(source_batch), max_length) + rest_shape, dtype=dtype) for i, sample in enumerate(source_batch): padded_batch[i, :len(sample)] = sample batch_with_masks.append(padded_batch) mask = numpy.zeros((len(source_batch), max_length), self.mask_dtype) for i, sequence_length in enumerate(lengths): mask[i, :sequence_length] = 1 batch_with_masks.append(mask) return tuple(batch_with_masks) class Data(object): """Dataset manager. This class is in charge of accessing different datasets and building preprocessing pipelines. Parameters ---------- dataset_filename : str Dataset file name. name_mapping : dict A map from conceptual split names (train, test) into concrete split names (e.g. 93eval). sources_map: dict A map from conceptual source names, such as "labels" or "recordings" into names of dataset entries. batch_size : int Batch size. validation_batch_size : int Batch size used for validation. sort_k_batches : int max_length : int Maximum length of input, longer sequences will be filtered. normalization : str Normalization file name to use. add_eos : bool Add end of sequence symbol. add_bos : int Add this many beginning-of-sequence tokens. eos_label : int Label to use for eos symbol. default_sources : list Default sources to include in created datasets dataset_class : object Class for this particulat dataset kind (WSJ, TIMIT) """ def __init__(self, dataset_filename, name_mapping, sources_map, batch_size, validation_batch_size=None, sort_k_batches=None, max_length=None, normalization=None, add_eos=True, eos_label=None, add_bos=0, prepend_eos=False, default_sources=None, dataset_class=H5PYAudioDataset): assert not prepend_eos if normalization: with open(normalization, "rb") as src: normalization = cPickle.load(src) self.dataset_filename = dataset_filename self.dataset_class = dataset_class self.name_mapping = name_mapping self.sources_map = sources_map if default_sources is None: logger.warn( "The Data class was provided with no default_sources.\n" "All instantiated Datasets or Datastreams will use all " "available sources.\n") self.default_sources = sources_map.keys() self.normalization = normalization self.batch_size = batch_size if validation_batch_size is None: validation_batch_size = batch_size self.validation_batch_size = validation_batch_size self.sort_k_batches = sort_k_batches self.max_length = max_length self.add_eos = add_eos self.prepend_eos = prepend_eos self._eos_label = eos_label self.add_bos = add_bos self.dataset_cache = {} # # Hardcode the number of source for length at 0 # this typixcally works, as main.get_net_config # will properly set default_sources, such that the label is last # Unfortunately, we cannot query for a source name, as the # list of sources will differ.... # self.length_filter = _LengthFilter( index=0, max_length=self.max_length) @property def info_dataset(self): return self.get_dataset("train") @property def num_labels(self): return self.info_dataset.num_characters @property def character_map(self): return self.info_dataset.char2num def num_features(self, feature_name): return self.info_dataset.num_features(feature_name) @property def eos_label(self): if self._eos_label: return self._eos_label return self.info_dataset.eos_label @property def bos_label(self): return self.info_dataset.bos_label def decode(self, labels): return self.info_dataset.decode(labels) def pretty_print(self, labels, example): return self.info_dataset.pretty_print(labels, example) def get_dataset(self, part, add_sources=()): """Returns dataset from the cache or creates a new one""" sources = [] for src in self.default_sources + list(add_sources): sources.append(self.sources_map[src]) sources = tuple(sources) key = (part, sources) if key not in self.dataset_cache: self.dataset_cache[key] = self.dataset_class( file_or_path=os.path.join(fuel.config.data_path[0], self.dataset_filename), which_sets=(self.name_mapping.get(part, part), ), sources_map=self.sources_map, sources=sources) return self.dataset_cache[key] def get_stream(self, part, batches=True, shuffle=True, add_sources=(), num_examples=None, rng=None, seed=None): dataset = self.get_dataset(part, add_sources=add_sources) if num_examples is None: num_examples = dataset.num_examples if shuffle: iteration_scheme = ShuffledExampleScheme(num_examples, rng=rng) else: iteration_scheme = SequentialExampleScheme(num_examples) stream = DataStream( dataset, iteration_scheme=iteration_scheme) if self.add_eos: stream = Mapping(stream, _AddLabel( self.eos_label, index=stream.sources.index(self.sources_map['labels']))) if self.add_bos: if self.bos_label is None: raise Exception('No bos label given') stream = Mapping(stream, _AddLabel( self.bos_label, append=False, times=self.add_bos, index=stream.sources.index(self.sources_map['labels']))) if self.max_length: stream = Filter(stream, self.length_filter) if self.sort_k_batches and batches: stream = Batch(stream, iteration_scheme=ConstantScheme( self.batch_size * self.sort_k_batches)) # # Hardcode 0 for source on which to sort. This will be good, as # most source lengths are correlated and, furthermore, the # labels will typically be the last source, thus in a single-input # case this sorts on input lengths # stream = Mapping(stream, SortMapping(_Length( index=0))) stream = Unpack(stream) if self.normalization: stream = self.normalization.wrap_stream(stream) stream = ForceFloatX(stream) stream = Rename(stream, names=dict_subset({v: k for (k, v) in self.sources_map.items()}, stream.sources, must_have=False)) if not batches: return stream stream = Batch( stream, iteration_scheme=ConstantScheme(self.batch_size if part == 'train' else self.validation_batch_size)) stream = Padding(stream) stream = Mapping(stream, switch_first_two_axes) stream = ForceCContiguous(stream) stream._produces_examples = False return stream class PostfixManager: def __init__(self, languages, name_mapping): self.langs = languages self.name_mapping = name_mapping def combine_part_lang(self, part, lang): part = self.name_mapping.get(part, part) if lang != self.langs[0]: return part+'_'+lang else: return part def get_lang_postfix(self, lang): assert lang in self.langs if lang == self.langs[0]: return '' else: return '_'+lang def embed_lang_in_source(self, source, lang): postfix = self.get_lang_postfix(lang) if source.endswith('_mask'): return source[:-5]+postfix+'_mask' else: return source+postfix class MultilangData(Data): _binary_convertable_data = ['features_per_word'] def __init__(self, languages, *args, **kwargs): assert len(languages) > 0 self.langs = languages super(MultilangData, self).__init__(*args, **kwargs) self.postfix_manager = PostfixManager(languages, self.name_mapping) def combine_part_lang(self, part, lang): return self.postfix_manager.combine_part_lang(part, lang) def get_lang_postfix(self, lang): return self.postfix_manager.get_lang_postfix(lang) def embed_lang_in_source(self, source, lang): return self.postfix_manager.embed_lang_in_source(source, lang) @property def info_dataset(self): return self.get_dataset("train", self.langs[0]) def get_dataset(self, part, lang, add_sources=()): """Returns dataset from the cache or creates a new one""" part = self.combine_part_lang(part, lang) sources = [] for src in self.default_sources + list(add_sources): sources.append(self.sources_map[src]) sources = tuple(sources) key = (part, sources) if key not in self.dataset_cache: self.dataset_cache[key] = self.dataset_class( file_or_path=os.path.join(fuel.config.data_path[0], self.dataset_filename), which_sets=(self.name_mapping.get(part, part), ), sources_map=self.sources_map, sources=sources) return self.dataset_cache[key] def get_one_stream(self, part, lang=None, batches=True, shuffle=True, add_sources=(), num_examples=None, rng=None, seed=None, num_result=None, soften_distributions=None, only_stream=False): assert lang in self.langs dataset = self.get_dataset(part, lang, add_sources=add_sources) if num_examples is None: num_examples = dataset.num_examples if shuffle: iteration_scheme = ShuffledExampleScheme(num_examples, rng=rng) else: iteration_scheme = SequentialExampleScheme(num_examples) if num_result is None: num_result = num_examples if lang != self.langs[0] and not only_stream: iteration_scheme = RandomExampleScheme(num_examples, num_result=num_result, rng=rng) stream = DataStream( dataset, iteration_scheme=iteration_scheme) if soften_distributions: stream = Mapping(stream, SoftenResult(self.default_sources, soften_distributions)) for bconv in self._binary_convertable_data: if bconv in self.default_sources: stream = Mapping(stream, ConvertToMask(self.default_sources, bconv, self.num_features(bconv))) if self.add_eos: stream = Mapping(stream, _AddLabel( self.eos_label, index=stream.sources.index(self.sources_map['labels']))) if self.add_bos: if self.bos_label is None: raise Exception('No bos label given') stream = Mapping(stream, _AddLabel( self.bos_label, append=False, times=self.add_bos, index=stream.sources.index(self.sources_map['labels']))) if self.max_length: stream = Filter(stream, self.length_filter) if self.sort_k_batches and batches: stream = Batch(stream, iteration_scheme=ConstantScheme( self.batch_size * self.sort_k_batches)) # # Hardcode 0 for source on which to sort. This will be good, as # most source lengths are correlated and, furthermore, the # labels will typically be the last source, thus in a single-input # case this sorts on input lengths # stream = Mapping(stream, SortMapping(_Length( index=0))) stream = Unpack(stream) if self.normalization: stream = self.normalization.wrap_stream(stream) stream = ForceFloatX(stream) stream = Rename(stream, names=dict_subset({v: k for (k, v) in self.sources_map.items()}, stream.sources, must_have=False)) if not batches: return stream, num_examples stream = Batch( stream, iteration_scheme=ConstantScheme(self.batch_size if part == 'train' else self.validation_batch_size)) stream._produces_examples = False return stream, num_examples def get_stream(self, *args, **kwargs): lang_streams = [] sources = [] num_result=None for lang in self.langs: kwargs['lang'] = lang stream,num_examples = self.get_one_stream(*args, num_result=num_result, **kwargs) lang_streams += [stream] if lang == self.langs[0]: num_result = num_examples sources += [self.embed_lang_in_source(source, lang) for source in lang_streams[-1].sources] if kwargs.get('batches', True): stream = Merge(lang_streams, sources) stream._produces_examples = False stream = GlobalPadding(stream) stream = Mapping(stream, switch_first_two_axes) stream = ForceCContiguous(stream) else: stream = Merge(lang_streams, sources) return stream class RandomExampleScheme(IndexScheme): """Shuffled examples iterator. Returns examples in random order. """ def __init__(self, *args, **kwargs): self.rng = kwargs.pop('rng', None) self.num_result = kwargs.pop('num_result') if self.rng is None: self.rng = numpy.random.RandomState() super(RandomExampleScheme, self).__init__(*args, **kwargs) def get_request_iterator(self): indices = list(self.indices) #self.rng.shuffle(indices) if indices != []: indices = self.rng.choice(indices, self.num_result) return iter_(indices)
[ "fuel.transformers.Filter", "cPickle.load", "fuel.transformers.Padding", "numpy.tile", "os.path.join", "picklable_itertools.iter_dispatch.iter_", "numpy.random.RandomState", "numpy.apply_along_axis", "fuel.transformers.ForceFloatX", "fuel.transformers.Merge", "fuel.schemes.ShuffledExampleScheme", "fuel.schemes.SequentialExampleScheme", "numpy.asarray", "fuel.transformers.Mapping", "fuel.streams.DataStream", "numpy.hstack", "fuel.schemes.ConstantScheme", "fuel.transformers.Unpack", "numpy.zeros", "numpy.eye", "numpy.ascontiguousarray", "logging.getLogger" ]
[((676, 703), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (693, 703), False, 'import logging\n'), ((3956, 4002), 'numpy.apply_along_axis', 'np.apply_along_axis', (['convert_single', '(1)', 'sample'], {}), '(convert_single, 1, sample)\n', (3975, 4002), True, 'import numpy as np\n'), ((12635, 12689), 'fuel.streams.DataStream', 'DataStream', (['dataset'], {'iteration_scheme': 'iteration_scheme'}), '(dataset, iteration_scheme=iteration_scheme)\n', (12645, 12689), False, 'from fuel.streams import DataStream, AbstractDataStream\n'), ((14014, 14033), 'fuel.transformers.ForceFloatX', 'ForceFloatX', (['stream'], {}), '(stream)\n', (14025, 14033), False, 'from fuel.transformers import SortMapping, Padding, ForceFloatX, Batch, Mapping, Unpack, Filter, FilterSources, Transformer, Rename, Merge\n'), ((14585, 14600), 'fuel.transformers.Padding', 'Padding', (['stream'], {}), '(stream)\n', (14592, 14600), False, 'from fuel.transformers import SortMapping, Padding, ForceFloatX, Batch, Mapping, Unpack, Filter, FilterSources, Transformer, Rename, Merge\n'), ((14618, 14656), 'fuel.transformers.Mapping', 'Mapping', (['stream', 'switch_first_two_axes'], {}), '(stream, switch_first_two_axes)\n', (14625, 14656), False, 'from fuel.transformers import SortMapping, Padding, ForceFloatX, Batch, Mapping, Unpack, Filter, FilterSources, Transformer, Rename, Merge\n'), ((17930, 17984), 'fuel.streams.DataStream', 'DataStream', (['dataset'], {'iteration_scheme': 'iteration_scheme'}), '(dataset, iteration_scheme=iteration_scheme)\n', (17940, 17984), False, 'from fuel.streams import DataStream, AbstractDataStream\n'), ((19758, 19777), 'fuel.transformers.ForceFloatX', 'ForceFloatX', (['stream'], {}), '(stream)\n', (19769, 19777), False, 'from fuel.transformers import SortMapping, Padding, ForceFloatX, Batch, Mapping, Unpack, Filter, FilterSources, Transformer, Rename, Merge\n'), ((21930, 21944), 'picklable_itertools.iter_dispatch.iter_', 'iter_', (['indices'], {}), '(indices)\n', (21935, 21944), False, 'from picklable_itertools.iter_dispatch import iter_\n'), ((1540, 1593), 'numpy.hstack', 'numpy.hstack', (['[example[i], self.times * [self.label]]'], {}), '([example[i], self.times * [self.label]])\n', (1552, 1593), False, 'import numpy\n'), ((1633, 1686), 'numpy.hstack', 'numpy.hstack', (['[self.times * [self.label], example[1]]'], {}), '([self.times * [self.label], example[1]])\n', (1645, 1686), False, 'import numpy\n'), ((3841, 3884), 'numpy.zeros', 'np.zeros', (['(self.mask_size,)'], {'dtype': 'np.int32'}), '((self.mask_size,), dtype=np.int32)\n', (3849, 3884), True, 'import numpy as np\n'), ((12489, 12533), 'fuel.schemes.ShuffledExampleScheme', 'ShuffledExampleScheme', (['num_examples'], {'rng': 'rng'}), '(num_examples, rng=rng)\n', (12510, 12533), False, 'from fuel.schemes import ConstantScheme, ShuffledExampleScheme, SequentialExampleScheme, IndexScheme\n'), ((12579, 12616), 'fuel.schemes.SequentialExampleScheme', 'SequentialExampleScheme', (['num_examples'], {}), '(num_examples)\n', (12602, 12616), False, 'from fuel.schemes import ConstantScheme, ShuffledExampleScheme, SequentialExampleScheme, IndexScheme\n'), ((13237, 13271), 'fuel.transformers.Filter', 'Filter', (['stream', 'self.length_filter'], {}), '(stream, self.length_filter)\n', (13243, 13271), False, 'from fuel.transformers import SortMapping, Padding, ForceFloatX, Batch, Mapping, Unpack, Filter, FilterSources, Transformer, Rename, Merge\n'), ((13890, 13904), 'fuel.transformers.Unpack', 'Unpack', (['stream'], {}), '(stream)\n', (13896, 13904), False, 'from fuel.transformers import SortMapping, Padding, ForceFloatX, Batch, Mapping, Unpack, Filter, FilterSources, Transformer, Rename, Merge\n'), ((17562, 17606), 'fuel.schemes.ShuffledExampleScheme', 'ShuffledExampleScheme', (['num_examples'], {'rng': 'rng'}), '(num_examples, rng=rng)\n', (17583, 17606), False, 'from fuel.schemes import ConstantScheme, ShuffledExampleScheme, SequentialExampleScheme, IndexScheme\n'), ((17652, 17689), 'fuel.schemes.SequentialExampleScheme', 'SequentialExampleScheme', (['num_examples'], {}), '(num_examples)\n', (17675, 17689), False, 'from fuel.schemes import ConstantScheme, ShuffledExampleScheme, SequentialExampleScheme, IndexScheme\n'), ((18981, 19015), 'fuel.transformers.Filter', 'Filter', (['stream', 'self.length_filter'], {}), '(stream, self.length_filter)\n', (18987, 19015), False, 'from fuel.transformers import SortMapping, Padding, ForceFloatX, Batch, Mapping, Unpack, Filter, FilterSources, Transformer, Rename, Merge\n'), ((19634, 19648), 'fuel.transformers.Unpack', 'Unpack', (['stream'], {}), '(stream)\n', (19640, 19648), False, 'from fuel.transformers import SortMapping, Padding, ForceFloatX, Batch, Mapping, Unpack, Filter, FilterSources, Transformer, Rename, Merge\n'), ((20996, 21024), 'fuel.transformers.Merge', 'Merge', (['lang_streams', 'sources'], {}), '(lang_streams, sources)\n', (21001, 21024), False, 'from fuel.transformers import SortMapping, Padding, ForceFloatX, Batch, Mapping, Unpack, Filter, FilterSources, Transformer, Rename, Merge\n'), ((21135, 21173), 'fuel.transformers.Mapping', 'Mapping', (['stream', 'switch_first_two_axes'], {}), '(stream, switch_first_two_axes)\n', (21142, 21173), False, 'from fuel.transformers import SortMapping, Padding, ForceFloatX, Batch, Mapping, Unpack, Filter, FilterSources, Transformer, Rename, Merge\n'), ((21255, 21283), 'fuel.transformers.Merge', 'Merge', (['lang_streams', 'sources'], {}), '(lang_streams, sources)\n', (21260, 21283), False, 'from fuel.transformers import SortMapping, Padding, ForceFloatX, Batch, Mapping, Unpack, Filter, FilterSources, Transformer, Rename, Merge\n'), ((21622, 21648), 'numpy.random.RandomState', 'numpy.random.RandomState', ([], {}), '()\n', (21646, 21648), False, 'import numpy\n'), ((2756, 2797), 'numpy.tile', 'np.tile', (['distribution', '(data.shape[0], 1)'], {}), '(distribution, (data.shape[0], 1))\n', (2763, 2797), True, 'import numpy as np\n'), ((9255, 9272), 'cPickle.load', 'cPickle.load', (['src'], {}), '(src)\n', (9267, 9272), False, 'import cPickle\n'), ((14440, 14527), 'fuel.schemes.ConstantScheme', 'ConstantScheme', (["(self.batch_size if part == 'train' else self.validation_batch_size)"], {}), "(self.batch_size if part == 'train' else self.\n validation_batch_size)\n", (14454, 14527), False, 'from fuel.schemes import ConstantScheme, ShuffledExampleScheme, SequentialExampleScheme, IndexScheme\n'), ((20198, 20285), 'fuel.schemes.ConstantScheme', 'ConstantScheme', (["(self.batch_size if part == 'train' else self.validation_batch_size)"], {}), "(self.batch_size if part == 'train' else self.\n validation_batch_size)\n", (20212, 20285), False, 'from fuel.schemes import ConstantScheme, ShuffledExampleScheme, SequentialExampleScheme, IndexScheme\n'), ((2697, 2726), 'numpy.eye', 'np.eye', (['distribution.shape[0]'], {}), '(distribution.shape[0])\n', (2703, 2726), True, 'import numpy as np\n'), ((3183, 3223), 'numpy.zeros', 'np.zeros', (['(data.shape[0], data.shape[0])'], {}), '((data.shape[0], data.shape[0]))\n', (3191, 3223), True, 'import numpy as np\n'), ((3262, 3322), 'numpy.tile', 'np.tile', (['(1.0 / data.shape[0])', '(data.shape[0], data.shape[0])'], {}), '(1.0 / data.shape[0], (data.shape[0], data.shape[0]))\n', (3269, 3322), True, 'import numpy as np\n'), ((4606, 4636), 'numpy.ascontiguousarray', 'numpy.ascontiguousarray', (['piece'], {}), '(piece)\n', (4629, 4636), False, 'import numpy\n'), ((6202, 6223), 'numpy.asarray', 'numpy.asarray', (['sample'], {}), '(sample)\n', (6215, 6223), False, 'import numpy\n'), ((6565, 6595), 'numpy.asarray', 'numpy.asarray', (['source_batch[0]'], {}), '(source_batch[0])\n', (6578, 6595), False, 'import numpy\n'), ((6918, 6948), 'numpy.asarray', 'numpy.asarray', (['source_batch[0]'], {}), '(source_batch[0])\n', (6931, 6948), False, 'import numpy\n'), ((11865, 11926), 'os.path.join', 'os.path.join', (['fuel.config.data_path[0]', 'self.dataset_filename'], {}), '(fuel.config.data_path[0], self.dataset_filename)\n', (11877, 11926), False, 'import os\n'), ((13396, 13449), 'fuel.schemes.ConstantScheme', 'ConstantScheme', (['(self.batch_size * self.sort_k_batches)'], {}), '(self.batch_size * self.sort_k_batches)\n', (13410, 13449), False, 'from fuel.schemes import ConstantScheme, ShuffledExampleScheme, SequentialExampleScheme, IndexScheme\n'), ((16801, 16862), 'os.path.join', 'os.path.join', (['fuel.config.data_path[0]', 'self.dataset_filename'], {}), '(fuel.config.data_path[0], self.dataset_filename)\n', (16813, 16862), False, 'import os\n'), ((19140, 19193), 'fuel.schemes.ConstantScheme', 'ConstantScheme', (['(self.batch_size * self.sort_k_batches)'], {}), '(self.batch_size * self.sort_k_batches)\n', (19154, 19193), False, 'from fuel.schemes import ConstantScheme, ShuffledExampleScheme, SequentialExampleScheme, IndexScheme\n'), ((2999, 3020), 'numpy.eye', 'np.eye', (['data.shape[0]'], {}), '(data.shape[0])\n', (3005, 3020), True, 'import numpy as np\n'), ((3065, 3131), 'numpy.tile', 'np.tile', (['(1.0 / (data.shape[0] - 1))', '(data.shape[0], data.shape[0])'], {}), '(1.0 / (data.shape[0] - 1), (data.shape[0], data.shape[0]))\n', (3072, 3131), True, 'import numpy as np\n'), ((5818, 5839), 'numpy.asarray', 'numpy.asarray', (['sample'], {}), '(sample)\n', (5831, 5839), False, 'import numpy\n')]
import sys sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages') # in order to import cv2 under python3 import cv2 import glob import matplotlib.pyplot as plt import pickle import numpy as np import matplotlib.image as mpimg # """ # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((6*9,3), np.float32) objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d points in real world space imgpoints = [] # 2d points in image plane. # Make a list of calibration images images = glob.glob('camera_cal/calibration*.jpg') # Step through the list and search for chessboard corners for fname in images: img = cv2.imread(fname) gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # Find the chessboard corners ret, corners = cv2.findChessboardCorners(gray, (9,6),None) # print (ret) # If found, add object points, image points if ret == True: objpoints.append(objp) imgpoints.append(corners) # print (objpoints) # Draw and display the corners img = cv2.drawChessboardCorners(img, (9,6), corners, ret) # cv2.imshow('img',img) # cv2.waitKey(500) # plt.show() img = mpimg.imread('camera_cal/calibration1.jpg') ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None) dst = cv2.undistort(img, mtx, dist, None, mtx) f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9)) f.tight_layout() ax1.set_title('Original Image', fontsize=50) ax1.imshow(img) ax2.set_title('Undistorted Image', fontsize=50) ax2.imshow(dst) plt.savefig('output_images/undistorted_chess.jpg') # cv2.waitKey(500) plt.show() # Save the camera calibration result for later use (we won't worry about rvecs / tvecs) # dist_pickle = {} # dist_pickle["mtx"] = mtx # dist_pickle["dist"] = dist # pickle.dump( dist_pickle, open( "wide_dist_pickle.p", "wb" ) ) # cv2.destroyAllWindows() # """
[ "matplotlib.image.imread", "sys.path.remove", "matplotlib.pyplot.show", "cv2.findChessboardCorners", "cv2.cvtColor", "numpy.zeros", "cv2.imread", "cv2.calibrateCamera", "glob.glob", "cv2.drawChessboardCorners", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "cv2.undistort" ]
[((11, 74), 'sys.path.remove', 'sys.path.remove', (['"""/opt/ros/kinetic/lib/python2.7/dist-packages"""'], {}), "('/opt/ros/kinetic/lib/python2.7/dist-packages')\n", (26, 74), False, 'import sys\n'), ((319, 351), 'numpy.zeros', 'np.zeros', (['(6 * 9, 3)', 'np.float32'], {}), '((6 * 9, 3), np.float32)\n', (327, 351), True, 'import numpy as np\n'), ((603, 643), 'glob.glob', 'glob.glob', (['"""camera_cal/calibration*.jpg"""'], {}), "('camera_cal/calibration*.jpg')\n", (612, 643), False, 'import glob\n'), ((1280, 1323), 'matplotlib.image.imread', 'mpimg.imread', (['"""camera_cal/calibration1.jpg"""'], {}), "('camera_cal/calibration1.jpg')\n", (1292, 1323), True, 'import matplotlib.image as mpimg\n'), ((1356, 1427), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objpoints', 'imgpoints', 'gray.shape[::-1]', 'None', 'None'], {}), '(objpoints, imgpoints, gray.shape[::-1], None, None)\n', (1375, 1427), False, 'import cv2\n'), ((1434, 1474), 'cv2.undistort', 'cv2.undistort', (['img', 'mtx', 'dist', 'None', 'mtx'], {}), '(img, mtx, dist, None, mtx)\n', (1447, 1474), False, 'import cv2\n'), ((1493, 1528), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(24, 9)'}), '(1, 2, figsize=(24, 9))\n', (1505, 1528), True, 'import matplotlib.pyplot as plt\n'), ((1672, 1722), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output_images/undistorted_chess.jpg"""'], {}), "('output_images/undistorted_chess.jpg')\n", (1683, 1722), True, 'import matplotlib.pyplot as plt\n'), ((1742, 1752), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1750, 1752), True, 'import matplotlib.pyplot as plt\n'), ((734, 751), 'cv2.imread', 'cv2.imread', (['fname'], {}), '(fname)\n', (744, 751), False, 'import cv2\n'), ((763, 800), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (775, 800), False, 'import cv2\n'), ((854, 899), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', '(9, 6)', 'None'], {}), '(gray, (9, 6), None)\n', (879, 899), False, 'import cv2\n'), ((1132, 1184), 'cv2.drawChessboardCorners', 'cv2.drawChessboardCorners', (['img', '(9, 6)', 'corners', 'ret'], {}), '(img, (9, 6), corners, ret)\n', (1157, 1184), False, 'import cv2\n')]
# I this file will take in the time_data.yml files and plot import yaml import matplotlib.pyplot as plt import sys import numpy as np def read_yaml_file(filename): with open(filename, 'r') as f: data = yaml.load(f) return(data) def array_to_dist(array): return(np.mean(array), np.std(array)) def main(): fn = sys.argv[1] data = read_yaml_file(fn) names = ['CG11', 'CG21', 'CG31'] keys = dct.keys() for name in names: spec_keys = list(filter(lambda ns: name in ns, names)) for sk in spec_keys: dist = data[sk] mean, std = array_to_dist(dist) if __name__ == "__main__": main()
[ "numpy.std", "yaml.load", "numpy.mean" ]
[((216, 228), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (225, 228), False, 'import yaml\n'), ((284, 298), 'numpy.mean', 'np.mean', (['array'], {}), '(array)\n', (291, 298), True, 'import numpy as np\n'), ((300, 313), 'numpy.std', 'np.std', (['array'], {}), '(array)\n', (306, 313), True, 'import numpy as np\n')]
from __future__ import division import torch import torch.nn as nn from .base import BaseDetector from .test_mixins import RPNTestMixin from .. import builder from ..registry import DETECTORS from mmdet.core import (assign_and_sample, bbox2roi, bbox2result, multi_apply, merge_aug_masks) import numpy as np import pickle from ..utils import ConvModule import torch.nn.functional as F @DETECTORS.register_module class ReasoningRCNN(BaseDetector, RPNTestMixin): def __init__(self, num_stages, backbone, neck=None, upper_neck=None, rpn_head=None, bbox_roi_extractor=None, bbox_head=None, mask_roi_extractor=None, mask_head=None, train_cfg=None, test_cfg=None, pretrained=None, adj_gt=None, graph_out_channels=256, normalize=None, roi_feat_size=7, shared_num_fc=2): assert bbox_roi_extractor is not None assert bbox_head is not None super(ReasoningRCNN, self).__init__() self.num_stages = num_stages self.backbone = builder.build_backbone(backbone) if neck is not None: self.neck = builder.build_neck(neck) else: assert upper_neck is not None if rpn_head is not None: self.rpn_head = builder.build_rpn_head(rpn_head) if upper_neck is not None: if isinstance(upper_neck, list): self.upper_neck = nn.ModuleList() assert len(upper_neck) == self.num_stages for neck in upper_neck: self.upper_neck.append(builder.build_upper_neck(neck)) else: self.upper_neck = builder.build_upper_neck(upper_neck) if bbox_head is not None: self.bbox_roi_extractor = nn.ModuleList() self.bbox_head = nn.ModuleList() if not isinstance(bbox_roi_extractor, list): bbox_roi_extractor = [ bbox_roi_extractor for _ in range(num_stages) ] if not isinstance(bbox_head, list): bbox_head = [bbox_head for _ in range(num_stages)] assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages for roi_extractor, head in zip(bbox_roi_extractor, bbox_head): self.bbox_roi_extractor.append( builder.build_roi_extractor(roi_extractor)) self.bbox_head.append(builder.build_bbox_head(head)) if mask_head is not None: self.mask_head = nn.ModuleList() if not isinstance(mask_head, list): mask_head = [mask_head for _ in range(num_stages)] assert len(mask_head) == self.num_stages for head in mask_head: self.mask_head.append(builder.build_mask_head(head)) if mask_roi_extractor is not None: self.mask_roi_extractor = nn.ModuleList() if not isinstance(mask_roi_extractor, list): mask_roi_extractor = [ mask_roi_extractor for _ in range(num_stages) ] assert len(mask_roi_extractor) == self.num_stages for roi_extractor in mask_roi_extractor: self.mask_roi_extractor.append( builder.build_roi_extractor(roi_extractor)) self.normalize = normalize self.with_bias = normalize is None if adj_gt is not None: self.adj_gt = pickle.load(open(adj_gt, 'rb')) self.adj_gt = np.float32(self.adj_gt) self.adj_gt = nn.Parameter(torch.from_numpy(self.adj_gt), requires_grad=False) # init cmp attention self.cmp_attention = nn.ModuleList() self.cmp_attention.append( ConvModule(1024, 1024 // 16, 3, stride=2, padding=1, normalize=self.normalize, bias=self.with_bias)) self.cmp_attention.append( nn.Linear(1024 // 16, bbox_head[0]['in_channels'] + 1)) # init graph w self.graph_out_channels = graph_out_channels self.graph_weight_fc = nn.Linear(bbox_head[0]['in_channels'] + 1, self.graph_out_channels) self.relu = nn.ReLU(inplace=True) # shared upper neck in_channels = rpn_head['in_channels'] if shared_num_fc > 0: in_channels *= (roi_feat_size * roi_feat_size) self.branch_fcs = nn.ModuleList() for i in range(shared_num_fc): fc_in_channels = (in_channels if i == 0 else bbox_head[0]['in_channels']) self.branch_fcs.append( nn.Linear(fc_in_channels, bbox_head[0]['in_channels'])) self.train_cfg = train_cfg self.test_cfg = test_cfg self.init_weights(pretrained=pretrained) @property def with_rpn(self): return hasattr(self, 'rpn_head') and self.rpn_head is not None def init_weights(self, pretrained=None): super(ReasoningRCNN, self).init_weights(pretrained) self.backbone.init_weights(pretrained=pretrained) if self.with_neck: if isinstance(self.neck, nn.Sequential): for m in self.neck: m.init_weights() else: self.neck.init_weights() if self.with_rpn: self.rpn_head.init_weights() for i in range(self.num_stages): if self.with_bbox: self.bbox_roi_extractor[i].init_weights() self.bbox_head[i].init_weights() if self.with_mask_roi_extractor: self.mask_roi_extractor[i].init_weights() if self.with_mask: self.mask_head[i].init_weights() def extract_feat(self, img): x = self.backbone(img) if self.with_neck: x = self.neck(x) return x def forward_upper_neck(self, x, stage): if self.with_share_upper_neck: x = self.upper_neck(x) elif self.with_unshare_upper_neck: x = self.upper_neck[stage](x) return x def forward_train(self, img, img_meta, gt_bboxes, gt_bboxes_ignore, gt_labels, gt_masks=None, proposals=None): x = self.extract_feat(img) # precmp attention if len(x) > 1: base_feat = [] for b_f in x[1:]: base_feat.append( F.interpolate(b_f, scale_factor=(x[2].size(2) / b_f.size(2), x[2].size(3) / b_f.size(3)))) base_feat = torch.cat(base_feat, 1) else: base_feat = torch.cat(x, 1) for ops in self.cmp_attention: base_feat = ops(base_feat) if len(base_feat.size()) > 2: base_feat = base_feat.mean(3).mean(2) else: base_feat = self.relu(base_feat) losses = dict() if self.with_rpn: rpn_outs = self.rpn_head(x) rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, self.train_cfg.rpn) rpn_losses = self.rpn_head.loss(*rpn_loss_inputs) losses.update(rpn_losses) proposal_inputs = rpn_outs + (img_meta, self.test_cfg.rpn) proposal_list = self.rpn_head.get_proposals(*proposal_inputs) else: proposal_list = proposals for i in range(self.num_stages): rcnn_train_cfg = self.train_cfg.rcnn[i] lw = self.train_cfg.stage_loss_weights[i] # add reasoning process if i > 0: # 1.build global semantic pool global_semantic_pool = torch.cat((bbox_head.fc_cls.weight, bbox_head.fc_cls.bias.unsqueeze(1)), 1).detach() # 2.compute graph attention attention_map = nn.Softmax(1)(torch.mm(base_feat, torch.transpose(global_semantic_pool, 0, 1))) # 3.adaptive global reasoning alpha_em = attention_map.unsqueeze(-1) * torch.mm(self.adj_gt, global_semantic_pool).unsqueeze(0) alpha_em = alpha_em.view(-1, global_semantic_pool.size(-1)) alpha_em = self.graph_weight_fc(alpha_em) alpha_em = self.relu(alpha_em) # enhanced_feat = torch.mm(nn.Softmax(1)(cls_score), alpha_em) n_classes = bbox_head.fc_cls.weight.size(0) cls_prob = nn.Softmax(1)(cls_score).view(len(img_meta), -1, n_classes) enhanced_feat = torch.bmm(cls_prob, alpha_em.view(len(img_meta), -1, self.graph_out_channels)) enhanced_feat = enhanced_feat.view(-1, self.graph_out_channels) # assign gts and sample proposals assign_results, sampling_results = multi_apply( assign_and_sample, proposal_list, gt_bboxes, gt_bboxes_ignore, gt_labels, cfg=rcnn_train_cfg) # bbox head forward and loss bbox_roi_extractor = self.bbox_roi_extractor[i] bbox_head = self.bbox_head[i] rois, rois_index = bbox2roi( [(res.pos_bboxes, res.neg_bboxes) for res in sampling_results], return_index=True) bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], rois) # without upperneck bbox_feats = bbox_feats.view(bbox_feats.size(0), -1) for fc in self.branch_fcs: bbox_feats = self.relu(fc(bbox_feats)) # cat with enhanced feature if i > 0: bbox_feats = torch.cat([bbox_feats, enhanced_feat], 1) cls_score, bbox_pred = bbox_head(bbox_feats) bbox_targets = bbox_head.get_target(sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg) loss_bbox = bbox_head.loss(cls_score, bbox_pred, *bbox_targets) for name, value in loss_bbox.items(): losses['s{}.{}'.format( i, name)] = (value * lw if 'loss' in name else value) # mask head forward and loss if self.with_mask: if self.with_mask_roi_extractor: mask_roi_extractor = self.mask_roi_extractor[i] pos_rois = bbox2roi( [res.pos_bboxes for res in sampling_results]) mask_feats = mask_roi_extractor( x[:mask_roi_extractor.num_inputs], pos_rois) mask_feats = self.forward_upper_neck(mask_feats, i) else: pos_inds = (rois_index == 0) mask_feats = bbox_feats[pos_inds] mask_head = self.mask_head[i] mask_pred = mask_head(mask_feats) mask_targets = mask_head.get_target(sampling_results, gt_masks, rcnn_train_cfg) pos_labels = torch.cat( [res.pos_gt_labels for res in sampling_results]) loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels) for name, value in loss_mask.items(): losses['s{}.{}'.format( i, name)] = (value * lw if 'loss' in name else value) # refine bboxes if i < self.num_stages - 1: pos_is_gts = [res.pos_is_gt for res in sampling_results] roi_labels = bbox_targets[0] # bbox_targets is a tuple with torch.no_grad(): proposal_list = bbox_head.refine_bboxes( rois, roi_labels, bbox_pred, pos_is_gts, img_meta) return losses def simple_test(self, img, img_meta, proposals=None, rescale=False): x = self.extract_feat(img) # precmp attention if len(x) > 1: base_feat = [] for b_f in x[1:]: base_feat.append( F.interpolate(b_f, scale_factor=(x[2].size(2) / b_f.size(2), x[2].size(3) / b_f.size(3)))) base_feat = torch.cat(base_feat, 1) else: base_feat = torch.cat(x, 1) for ops in self.cmp_attention: base_feat = ops(base_feat) if len(base_feat.size()) > 2: base_feat = base_feat.mean(3).mean(2) else: base_feat = self.relu(base_feat) proposal_list = self.simple_test_rpn( x, img_meta, self.test_cfg.rpn) if proposals is None else proposals img_shape = img_meta[0]['img_shape'] ori_shape = img_meta[0]['ori_shape'] scale_factor = img_meta[0]['scale_factor'] # "ms" in variable names means multi-stage ms_bbox_result = {} ms_segm_result = {} ms_scores = [] rcnn_test_cfg = self.test_cfg.rcnn rois = bbox2roi(proposal_list) for i in range(self.num_stages): # add reasoning process if i > 0: # transform CxC classes graph to region # 1.build global semantic pool global_semantic_pool = torch.cat((bbox_head.fc_cls.weight, bbox_head.fc_cls.bias.unsqueeze(1)), 1).detach() # 2.compute graph attention attention_map = nn.Softmax(1)(torch.mm(base_feat, torch.transpose(global_semantic_pool, 0, 1))) # 3.adaptive global reasoning alpha_em = attention_map.unsqueeze(-1) * torch.mm(self.adj_gt, global_semantic_pool).unsqueeze(0) alpha_em = alpha_em.view(-1, global_semantic_pool.size(-1)) alpha_em = self.graph_weight_fc(alpha_em) alpha_em = self.relu(alpha_em) n_classes = bbox_head.fc_cls.weight.size(0) cls_prob = nn.Softmax(1)(cls_score).view(len(img_meta), -1, n_classes) enhanced_feat = torch.bmm(cls_prob, alpha_em.view(len(img_meta), -1, self.graph_out_channels)) enhanced_feat = enhanced_feat.view(-1, self.graph_out_channels) bbox_roi_extractor = self.bbox_roi_extractor[i] bbox_head = self.bbox_head[i] bbox_feats = bbox_roi_extractor( x[:len(bbox_roi_extractor.featmap_strides)], rois) # bbox_feats = self.forward_upper_neck(bbox_feats, i) # without upperneck bbox_feats = bbox_feats.view(bbox_feats.size(0), -1) for fc in self.branch_fcs: bbox_feats = self.relu(fc(bbox_feats)) # cat with enhanced feature if i > 0: bbox_feats = torch.cat([bbox_feats, enhanced_feat], 1) cls_score, bbox_pred = bbox_head(bbox_feats) ms_scores.append(cls_score) if self.test_cfg.keep_all_stages: det_bboxes, det_labels = bbox_head.get_det_bboxes( rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=rescale, cfg=rcnn_test_cfg) bbox_result = bbox2result(det_bboxes, det_labels, bbox_head.num_classes) ms_bbox_result['stage{}'.format(i)] = bbox_result if self.with_mask: if self.with_mask_roi_extractor: mask_roi_extractor = self.mask_roi_extractor[i] else: mask_roi_extractor = self.bbox_roi_extractor[i] mask_head = self.mask_head[i] if det_bboxes.shape[0] == 0: segm_result = [ [] for _ in range(mask_head.num_classes - 1) ] else: _bboxes = (det_bboxes[:, :4] * scale_factor if rescale else det_bboxes) mask_rois = bbox2roi([_bboxes]) mask_feats = mask_roi_extractor( x[:len(mask_roi_extractor.featmap_strides)], mask_rois) mask_feats = self.forward_upper_neck(mask_feats, i) mask_pred = mask_head(mask_feats) segm_result = mask_head.get_seg_masks( mask_pred, _bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor, rescale) ms_segm_result['stage{}'.format(i)] = segm_result if i < self.num_stages - 1: bbox_label = cls_score.argmax(dim=1) rois = bbox_head.regress_by_class(rois, bbox_label, bbox_pred, img_meta[0]) cls_score = sum(ms_scores) / self.num_stages det_bboxes, det_labels = self.bbox_head[-1].get_det_bboxes( rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=rescale, cfg=rcnn_test_cfg) bbox_result = bbox2result(det_bboxes, det_labels, self.bbox_head[-1].num_classes) ms_bbox_result['ensemble'] = bbox_result if self.with_mask: if det_bboxes.shape[0] == 0: segm_result = [ [] for _ in range(self.mask_head[-1].num_classes - 1) ] else: _bboxes = (det_bboxes[:, :4] * scale_factor if rescale else det_bboxes) mask_rois = bbox2roi([_bboxes]) aug_masks = [] for i in range(self.num_stages): if self.with_mask_roi_extractor: mask_roi_extractor = self.mask_roi_extractor[i] else: mask_roi_extractor = self.bbox_roi_extractor[i] mask_feats = mask_roi_extractor( x[:len(mask_roi_extractor.featmap_strides)], mask_rois) mask_feats = self.forward_upper_neck(mask_feats, i) mask_pred = self.mask_head[i](mask_feats) aug_masks.append(mask_pred.sigmoid().cpu().numpy()) merged_masks = merge_aug_masks(aug_masks, [img_meta] * self.num_stages, self.test_cfg.rcnn) segm_result = self.mask_head[-1].get_seg_masks( merged_masks, _bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor, rescale) ms_segm_result['ensemble'] = segm_result if not self.test_cfg.keep_all_stages: if self.with_mask: results = (ms_bbox_result['ensemble'], ms_segm_result['ensemble']) else: results = ms_bbox_result['ensemble'] else: if self.with_mask: results = { stage: (ms_bbox_result[stage], ms_segm_result[stage]) for stage in ms_bbox_result } else: results = ms_bbox_result return results def aug_test(self, img, img_meta, proposals=None, rescale=False): raise NotImplementedError def show_result(self, data, result, img_norm_cfg, **kwargs): if self.with_mask: ms_bbox_result, ms_segm_result = result if isinstance(ms_bbox_result, dict): result = (ms_bbox_result['ensemble'], ms_segm_result['ensemble']) else: if isinstance(result, dict): result = result['ensemble'] super(ReasoningRCNN, self).show_result(data, result, img_norm_cfg, **kwargs)
[ "torch.nn.ReLU", "torch.nn.ModuleList", "mmdet.core.bbox2roi", "numpy.float32", "torch.cat", "torch.mm", "torch.transpose", "torch.nn.Softmax", "torch.nn.Linear", "mmdet.core.bbox2result", "mmdet.core.merge_aug_masks", "torch.no_grad", "mmdet.core.multi_apply", "torch.from_numpy" ]
[((3959, 3974), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3972, 3974), True, 'import torch.nn as nn\n'), ((4356, 4423), 'torch.nn.Linear', 'nn.Linear', (["(bbox_head[0]['in_channels'] + 1)", 'self.graph_out_channels'], {}), "(bbox_head[0]['in_channels'] + 1, self.graph_out_channels)\n", (4365, 4423), True, 'import torch.nn as nn\n'), ((4444, 4465), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4451, 4465), True, 'import torch.nn as nn\n'), ((4656, 4671), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (4669, 4671), True, 'import torch.nn as nn\n'), ((13399, 13422), 'mmdet.core.bbox2roi', 'bbox2roi', (['proposal_list'], {}), '(proposal_list)\n', (13407, 13422), False, 'from mmdet.core import assign_and_sample, bbox2roi, bbox2result, multi_apply, merge_aug_masks\n'), ((17714, 17781), 'mmdet.core.bbox2result', 'bbox2result', (['det_bboxes', 'det_labels', 'self.bbox_head[-1].num_classes'], {}), '(det_bboxes, det_labels, self.bbox_head[-1].num_classes)\n', (17725, 17781), False, 'from mmdet.core import assign_and_sample, bbox2roi, bbox2result, multi_apply, merge_aug_masks\n'), ((2004, 2019), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (2017, 2019), True, 'import torch.nn as nn\n'), ((2049, 2064), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (2062, 2064), True, 'import torch.nn as nn\n'), ((2760, 2775), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (2773, 2775), True, 'import torch.nn as nn\n'), ((3786, 3809), 'numpy.float32', 'np.float32', (['self.adj_gt'], {}), '(self.adj_gt)\n', (3796, 3809), True, 'import numpy as np\n'), ((4193, 4247), 'torch.nn.Linear', 'nn.Linear', (['(1024 // 16)', "(bbox_head[0]['in_channels'] + 1)"], {}), "(1024 // 16, bbox_head[0]['in_channels'] + 1)\n", (4202, 4247), True, 'import torch.nn as nn\n'), ((6910, 6933), 'torch.cat', 'torch.cat', (['base_feat', '(1)'], {}), '(base_feat, 1)\n', (6919, 6933), False, 'import torch\n'), ((6972, 6987), 'torch.cat', 'torch.cat', (['x', '(1)'], {}), '(x, 1)\n', (6981, 6987), False, 'import torch\n'), ((9185, 9294), 'mmdet.core.multi_apply', 'multi_apply', (['assign_and_sample', 'proposal_list', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels'], {'cfg': 'rcnn_train_cfg'}), '(assign_and_sample, proposal_list, gt_bboxes, gt_bboxes_ignore,\n gt_labels, cfg=rcnn_train_cfg)\n', (9196, 9294), False, 'from mmdet.core import assign_and_sample, bbox2roi, bbox2result, multi_apply, merge_aug_masks\n'), ((9564, 9659), 'mmdet.core.bbox2roi', 'bbox2roi', (['[(res.pos_bboxes, res.neg_bboxes) for res in sampling_results]'], {'return_index': '(True)'}), '([(res.pos_bboxes, res.neg_bboxes) for res in sampling_results],\n return_index=True)\n', (9572, 9659), False, 'from mmdet.core import assign_and_sample, bbox2roi, bbox2result, multi_apply, merge_aug_masks\n'), ((12620, 12643), 'torch.cat', 'torch.cat', (['base_feat', '(1)'], {}), '(base_feat, 1)\n', (12629, 12643), False, 'import torch\n'), ((12682, 12697), 'torch.cat', 'torch.cat', (['x', '(1)'], {}), '(x, 1)\n', (12691, 12697), False, 'import torch\n'), ((1653, 1668), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1666, 1668), True, 'import torch.nn as nn\n'), ((3137, 3152), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3150, 3152), True, 'import torch.nn as nn\n'), ((3849, 3878), 'torch.from_numpy', 'torch.from_numpy', (['self.adj_gt'], {}), '(self.adj_gt)\n', (3865, 3878), False, 'import torch\n'), ((4879, 4933), 'torch.nn.Linear', 'nn.Linear', (['fc_in_channels', "bbox_head[0]['in_channels']"], {}), "(fc_in_channels, bbox_head[0]['in_channels'])\n", (4888, 4933), True, 'import torch.nn as nn\n'), ((10101, 10142), 'torch.cat', 'torch.cat', (['[bbox_feats, enhanced_feat]', '(1)'], {}), '([bbox_feats, enhanced_feat], 1)\n', (10110, 10142), False, 'import torch\n'), ((11487, 11545), 'torch.cat', 'torch.cat', (['[res.pos_gt_labels for res in sampling_results]'], {}), '([res.pos_gt_labels for res in sampling_results])\n', (11496, 11545), False, 'import torch\n'), ((15198, 15239), 'torch.cat', 'torch.cat', (['[bbox_feats, enhanced_feat]', '(1)'], {}), '([bbox_feats, enhanced_feat], 1)\n', (15207, 15239), False, 'import torch\n'), ((15711, 15769), 'mmdet.core.bbox2result', 'bbox2result', (['det_bboxes', 'det_labels', 'bbox_head.num_classes'], {}), '(det_bboxes, det_labels, bbox_head.num_classes)\n', (15722, 15769), False, 'from mmdet.core import assign_and_sample, bbox2roi, bbox2result, multi_apply, merge_aug_masks\n'), ((18219, 18238), 'mmdet.core.bbox2roi', 'bbox2roi', (['[_bboxes]'], {}), '([_bboxes])\n', (18227, 18238), False, 'from mmdet.core import assign_and_sample, bbox2roi, bbox2result, multi_apply, merge_aug_masks\n'), ((18912, 18988), 'mmdet.core.merge_aug_masks', 'merge_aug_masks', (['aug_masks', '([img_meta] * self.num_stages)', 'self.test_cfg.rcnn'], {}), '(aug_masks, [img_meta] * self.num_stages, self.test_cfg.rcnn)\n', (18927, 18988), False, 'from mmdet.core import assign_and_sample, bbox2roi, bbox2result, multi_apply, merge_aug_masks\n'), ((8249, 8262), 'torch.nn.Softmax', 'nn.Softmax', (['(1)'], {}), '(1)\n', (8259, 8262), True, 'import torch.nn as nn\n'), ((10815, 10869), 'mmdet.core.bbox2roi', 'bbox2roi', (['[res.pos_bboxes for res in sampling_results]'], {}), '([res.pos_bboxes for res in sampling_results])\n', (10823, 10869), False, 'from mmdet.core import assign_and_sample, bbox2roi, bbox2result, multi_apply, merge_aug_masks\n'), ((12058, 12073), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12071, 12073), False, 'import torch\n'), ((13875, 13888), 'torch.nn.Softmax', 'nn.Softmax', (['(1)'], {}), '(1)\n', (13885, 13888), True, 'import torch.nn as nn\n'), ((8283, 8326), 'torch.transpose', 'torch.transpose', (['global_semantic_pool', '(0)', '(1)'], {}), '(global_semantic_pool, 0, 1)\n', (8298, 8326), False, 'import torch\n'), ((13909, 13952), 'torch.transpose', 'torch.transpose', (['global_semantic_pool', '(0)', '(1)'], {}), '(global_semantic_pool, 0, 1)\n', (13924, 13952), False, 'import torch\n'), ((16568, 16587), 'mmdet.core.bbox2roi', 'bbox2roi', (['[_bboxes]'], {}), '([_bboxes])\n', (16576, 16587), False, 'from mmdet.core import assign_and_sample, bbox2roi, bbox2result, multi_apply, merge_aug_masks\n'), ((8432, 8475), 'torch.mm', 'torch.mm', (['self.adj_gt', 'global_semantic_pool'], {}), '(self.adj_gt, global_semantic_pool)\n', (8440, 8475), False, 'import torch\n'), ((8836, 8849), 'torch.nn.Softmax', 'nn.Softmax', (['(1)'], {}), '(1)\n', (8846, 8849), True, 'import torch.nn as nn\n'), ((14058, 14101), 'torch.mm', 'torch.mm', (['self.adj_gt', 'global_semantic_pool'], {}), '(self.adj_gt, global_semantic_pool)\n', (14066, 14101), False, 'import torch\n'), ((14383, 14396), 'torch.nn.Softmax', 'nn.Softmax', (['(1)'], {}), '(1)\n', (14393, 14396), True, 'import torch.nn as nn\n')]
"""Testing utilities for the MNE BIDS converter.""" # Authors: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # # License: BSD-3-Clause import os.path as op # This is here to handle mne-python <0.20 import warnings from datetime import datetime from pathlib import Path import pytest from numpy.random import random, RandomState with warnings.catch_warnings(): warnings.filterwarnings(action='ignore', message="can't resolve package", category=ImportWarning) import mne from mne_bids import BIDSPath from mne_bids.utils import (_check_types, _age_on_date, _handle_datatype, _infer_eeg_placement_scheme, _get_ch_type_mapping, _check_datatype) from mne_bids.path import _path_to_str base_path = op.join(op.dirname(mne.__file__), 'io') subject_id = '01' session_id = '01' run = '01' acq = None task = 'testing' bids_path = BIDSPath( subject=subject_id, session=session_id, run=run, acquisition=acq, task=task) def test_get_ch_type_mapping(): """Test getting a correct channel mapping.""" with pytest.raises(ValueError, match='specified from "bogus" to "mne"'): _get_ch_type_mapping(fro='bogus', to='mne') def test_handle_datatype(): """Test the automatic extraction of datatype from the data.""" # Create a dummy raw n_channels = 2 sampling_rate = 100 data = random((n_channels, sampling_rate)) # datatype is given, check once for each datatype channel_types = ['grad', 'eeg', 'ecog', 'seeg', 'dbs'] datatypes = ['meg', 'eeg', 'ieeg', 'ieeg', 'ieeg'] for ch_type, datatype in zip(channel_types, datatypes): info = mne.create_info(n_channels, sampling_rate, ch_types=[ch_type] * 2) raw = mne.io.RawArray(data, info) assert _handle_datatype(raw, datatype) == datatype # datatype is not given, will be inferred if possible datatype = None # check if datatype is correctly inferred (combined EEG and iEEG/MEG data) channel_types = [['grad', 'eeg'], ['eeg', 'mag'], ['eeg', 'seeg'], ['ecog', 'eeg']] expected_modalities = ['meg', 'meg', 'ieeg', 'ieeg'] for ch_type, expected_mod in zip(channel_types, expected_modalities): info = mne.create_info(n_channels, sampling_rate, ch_types=ch_type) raw = mne.io.RawArray(random((2, sampling_rate)), info) assert _handle_datatype(raw, datatype) == expected_mod # set type to MEG if type is EEG/iEEG but there are MEG channels as well channel_types = [['grad', 'eeg'], ['grad', 'seeg']] datatypes = ['eeg', 'ieeg'] for ch_type, datatype in zip(channel_types, datatypes): info = mne.create_info(n_channels, sampling_rate, ch_types=ch_type) raw = mne.io.RawArray(random((2, sampling_rate)), info) assert _handle_datatype(raw, datatype) == 'meg' # if the situation is ambiguous (iEEG and MEG), raise ValueError datatype = None channel_types = [['grad', 'ecog'], ['grad', 'seeg']] for ch_type in channel_types: with pytest.raises(ValueError, match='Multiple data types'): info = mne.create_info(n_channels, sampling_rate, ch_types=ch_type) raw = mne.io.RawArray(random((2, sampling_rate)), info) _handle_datatype(raw, datatype) # if proper channel type (iEEG, EEG or MEG) is not found, raise ValueError ch_type = ['misc'] with pytest.raises(ValueError, match='No MEG, EEG or iEEG channels found'): info = mne.create_info(n_channels, sampling_rate, ch_types=ch_type * 2) raw = mne.io.RawArray(data, info) _handle_datatype(raw, datatype) def test_check_types(): """Test the check whether vars are str or None.""" assert _check_types(['foo', 'bar', None]) is None with pytest.raises(ValueError): _check_types([None, 1, 3.14, 'meg', [1, 2]]) def test_path_to_str(): """Test that _path_to_str returns a string.""" path_str = 'foo' assert _path_to_str(path_str) == path_str assert _path_to_str(Path(path_str)) == path_str with pytest.raises(ValueError): _path_to_str(1) def test_age_on_date(): """Test whether the age is determined correctly.""" bday = datetime(1994, 1, 26) exp1 = datetime(2018, 1, 25) exp2 = datetime(2018, 1, 26) exp3 = datetime(2018, 1, 27) exp4 = datetime(1990, 1, 1) assert _age_on_date(bday, exp1) == 23 assert _age_on_date(bday, exp2) == 24 assert _age_on_date(bday, exp3) == 24 with pytest.raises(ValueError): _age_on_date(bday, exp4) def test_infer_eeg_placement_scheme(): """Test inferring a correct EEG placement scheme.""" # no eeg channels case (e.g., MEG data) data_path = op.join(base_path, 'bti', 'tests', 'data') raw_fname = op.join(data_path, 'test_pdf_linux') config_fname = op.join(data_path, 'test_config_linux') headshape_fname = op.join(data_path, 'test_hs_linux') raw = mne.io.read_raw_bti(raw_fname, config_fname, headshape_fname) placement_scheme = _infer_eeg_placement_scheme(raw) assert placement_scheme == 'n/a' # 1020 case data_path = op.join(base_path, 'brainvision', 'tests', 'data') raw_fname = op.join(data_path, 'test.vhdr') raw = mne.io.read_raw_brainvision(raw_fname) placement_scheme = _infer_eeg_placement_scheme(raw) assert placement_scheme == 'based on the extended 10/20 system' # Unknown case, use raw from 1020 case but rename a channel raw.rename_channels({'P3': 'foo'}) placement_scheme = _infer_eeg_placement_scheme(raw) assert placement_scheme == 'n/a' def test_check_datatype(): """Test checking if datatype exists in raw data.""" sfreq, n_points = 1024., int(1e6) rng = RandomState(99) info_eeg = mne.create_info(['ch1', 'ch2', 'ch3'], sfreq, ['eeg'] * 3) raw_eeg = mne.io.RawArray(rng.random((3, n_points)) * 1e-6, info_eeg) info_meg = mne.create_info(['ch1', 'ch2', 'ch3'], sfreq, ['mag'] * 3) raw_meg = mne.io.RawArray(rng.random((3, n_points)) * 1e-6, info_meg) info_ieeg = mne.create_info(['ch1', 'ch2', 'ch3'], sfreq, ['seeg'] * 3) raw_ieeg = mne.io.RawArray(rng.random((3, n_points)) * 1e-6, info_ieeg) # check behavior for unsupported data types for datatype in (None, 'anat'): with pytest.raises(ValueError, match=f'The specified datatype ' f'{datatype} is currently not'): _check_datatype(raw_eeg, datatype) # check behavior for matching data type for raw, datatype in [(raw_eeg, 'eeg'), (raw_meg, 'meg'), (raw_ieeg, 'ieeg')]: _check_datatype(raw, datatype) # check for missing data type for raw, datatype in [(raw_ieeg, 'eeg'), (raw_meg, 'eeg'), (raw_ieeg, 'meg'), (raw_eeg, 'meg'), (raw_meg, 'ieeg'), (raw_eeg, 'ieeg')]: with pytest.raises(ValueError, match=f'The specified datatype ' f'{datatype} was not found'): _check_datatype(raw, datatype)
[ "mne_bids.BIDSPath", "mne_bids.utils._age_on_date", "mne.io.read_raw_brainvision", "mne_bids.utils._infer_eeg_placement_scheme", "mne_bids.path._path_to_str", "pathlib.Path", "mne_bids.utils._handle_datatype", "os.path.join", "os.path.dirname", "mne.io.read_raw_bti", "numpy.random.RandomState", "mne_bids.utils._check_types", "pytest.raises", "warnings.catch_warnings", "datetime.datetime", "mne.create_info", "mne.io.RawArray", "mne_bids.utils._get_ch_type_mapping", "warnings.filterwarnings", "mne_bids.utils._check_datatype", "numpy.random.random" ]
[((971, 1060), 'mne_bids.BIDSPath', 'BIDSPath', ([], {'subject': 'subject_id', 'session': 'session_id', 'run': 'run', 'acquisition': 'acq', 'task': 'task'}), '(subject=subject_id, session=session_id, run=run, acquisition=acq,\n task=task)\n', (979, 1060), False, 'from mne_bids import BIDSPath\n'), ((362, 387), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (385, 387), False, 'import warnings\n'), ((393, 494), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""', 'message': '"""can\'t resolve package"""', 'category': 'ImportWarning'}), '(action=\'ignore\', message="can\'t resolve package",\n category=ImportWarning)\n', (416, 494), False, 'import warnings\n'), ((851, 875), 'os.path.dirname', 'op.dirname', (['mne.__file__'], {}), '(mne.__file__)\n', (861, 875), True, 'import os.path as op\n'), ((1455, 1490), 'numpy.random.random', 'random', (['(n_channels, sampling_rate)'], {}), '((n_channels, sampling_rate))\n', (1461, 1490), False, 'from numpy.random import random, RandomState\n'), ((4344, 4365), 'datetime.datetime', 'datetime', (['(1994)', '(1)', '(26)'], {}), '(1994, 1, 26)\n', (4352, 4365), False, 'from datetime import datetime\n'), ((4377, 4398), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(25)'], {}), '(2018, 1, 25)\n', (4385, 4398), False, 'from datetime import datetime\n'), ((4410, 4431), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(26)'], {}), '(2018, 1, 26)\n', (4418, 4431), False, 'from datetime import datetime\n'), ((4443, 4464), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(27)'], {}), '(2018, 1, 27)\n', (4451, 4464), False, 'from datetime import datetime\n'), ((4476, 4496), 'datetime.datetime', 'datetime', (['(1990)', '(1)', '(1)'], {}), '(1990, 1, 1)\n', (4484, 4496), False, 'from datetime import datetime\n'), ((4850, 4892), 'os.path.join', 'op.join', (['base_path', '"""bti"""', '"""tests"""', '"""data"""'], {}), "(base_path, 'bti', 'tests', 'data')\n", (4857, 4892), True, 'import os.path as op\n'), ((4909, 4945), 'os.path.join', 'op.join', (['data_path', '"""test_pdf_linux"""'], {}), "(data_path, 'test_pdf_linux')\n", (4916, 4945), True, 'import os.path as op\n'), ((4965, 5004), 'os.path.join', 'op.join', (['data_path', '"""test_config_linux"""'], {}), "(data_path, 'test_config_linux')\n", (4972, 5004), True, 'import os.path as op\n'), ((5027, 5062), 'os.path.join', 'op.join', (['data_path', '"""test_hs_linux"""'], {}), "(data_path, 'test_hs_linux')\n", (5034, 5062), True, 'import os.path as op\n'), ((5073, 5134), 'mne.io.read_raw_bti', 'mne.io.read_raw_bti', (['raw_fname', 'config_fname', 'headshape_fname'], {}), '(raw_fname, config_fname, headshape_fname)\n', (5092, 5134), False, 'import mne\n'), ((5158, 5190), 'mne_bids.utils._infer_eeg_placement_scheme', '_infer_eeg_placement_scheme', (['raw'], {}), '(raw)\n', (5185, 5190), False, 'from mne_bids.utils import _check_types, _age_on_date, _handle_datatype, _infer_eeg_placement_scheme, _get_ch_type_mapping, _check_datatype\n'), ((5261, 5311), 'os.path.join', 'op.join', (['base_path', '"""brainvision"""', '"""tests"""', '"""data"""'], {}), "(base_path, 'brainvision', 'tests', 'data')\n", (5268, 5311), True, 'import os.path as op\n'), ((5328, 5359), 'os.path.join', 'op.join', (['data_path', '"""test.vhdr"""'], {}), "(data_path, 'test.vhdr')\n", (5335, 5359), True, 'import os.path as op\n'), ((5370, 5408), 'mne.io.read_raw_brainvision', 'mne.io.read_raw_brainvision', (['raw_fname'], {}), '(raw_fname)\n', (5397, 5408), False, 'import mne\n'), ((5432, 5464), 'mne_bids.utils._infer_eeg_placement_scheme', '_infer_eeg_placement_scheme', (['raw'], {}), '(raw)\n', (5459, 5464), False, 'from mne_bids.utils import _check_types, _age_on_date, _handle_datatype, _infer_eeg_placement_scheme, _get_ch_type_mapping, _check_datatype\n'), ((5660, 5692), 'mne_bids.utils._infer_eeg_placement_scheme', '_infer_eeg_placement_scheme', (['raw'], {}), '(raw)\n', (5687, 5692), False, 'from mne_bids.utils import _check_types, _age_on_date, _handle_datatype, _infer_eeg_placement_scheme, _get_ch_type_mapping, _check_datatype\n'), ((5863, 5878), 'numpy.random.RandomState', 'RandomState', (['(99)'], {}), '(99)\n', (5874, 5878), False, 'from numpy.random import random, RandomState\n'), ((5894, 5952), 'mne.create_info', 'mne.create_info', (["['ch1', 'ch2', 'ch3']", 'sfreq', "(['eeg'] * 3)"], {}), "(['ch1', 'ch2', 'ch3'], sfreq, ['eeg'] * 3)\n", (5909, 5952), False, 'import mne\n'), ((6042, 6100), 'mne.create_info', 'mne.create_info', (["['ch1', 'ch2', 'ch3']", 'sfreq', "(['mag'] * 3)"], {}), "(['ch1', 'ch2', 'ch3'], sfreq, ['mag'] * 3)\n", (6057, 6100), False, 'import mne\n'), ((6191, 6250), 'mne.create_info', 'mne.create_info', (["['ch1', 'ch2', 'ch3']", 'sfreq', "(['seeg'] * 3)"], {}), "(['ch1', 'ch2', 'ch3'], sfreq, ['seeg'] * 3)\n", (6206, 6250), False, 'import mne\n'), ((1159, 1225), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""specified from "bogus" to "mne\\""""'}), '(ValueError, match=\'specified from "bogus" to "mne"\')\n', (1172, 1225), False, 'import pytest\n'), ((1235, 1278), 'mne_bids.utils._get_ch_type_mapping', '_get_ch_type_mapping', ([], {'fro': '"""bogus"""', 'to': '"""mne"""'}), "(fro='bogus', to='mne')\n", (1255, 1278), False, 'from mne_bids.utils import _check_types, _age_on_date, _handle_datatype, _infer_eeg_placement_scheme, _get_ch_type_mapping, _check_datatype\n'), ((1734, 1800), 'mne.create_info', 'mne.create_info', (['n_channels', 'sampling_rate'], {'ch_types': '([ch_type] * 2)'}), '(n_channels, sampling_rate, ch_types=[ch_type] * 2)\n', (1749, 1800), False, 'import mne\n'), ((1846, 1873), 'mne.io.RawArray', 'mne.io.RawArray', (['data', 'info'], {}), '(data, info)\n', (1861, 1873), False, 'import mne\n'), ((2345, 2405), 'mne.create_info', 'mne.create_info', (['n_channels', 'sampling_rate'], {'ch_types': 'ch_type'}), '(n_channels, sampling_rate, ch_types=ch_type)\n', (2360, 2405), False, 'import mne\n'), ((2773, 2833), 'mne.create_info', 'mne.create_info', (['n_channels', 'sampling_rate'], {'ch_types': 'ch_type'}), '(n_channels, sampling_rate, ch_types=ch_type)\n', (2788, 2833), False, 'import mne\n'), ((3506, 3575), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""No MEG, EEG or iEEG channels found"""'}), "(ValueError, match='No MEG, EEG or iEEG channels found')\n", (3519, 3575), False, 'import pytest\n'), ((3592, 3656), 'mne.create_info', 'mne.create_info', (['n_channels', 'sampling_rate'], {'ch_types': '(ch_type * 2)'}), '(n_channels, sampling_rate, ch_types=ch_type * 2)\n', (3607, 3656), False, 'import mne\n'), ((3702, 3729), 'mne.io.RawArray', 'mne.io.RawArray', (['data', 'info'], {}), '(data, info)\n', (3717, 3729), False, 'import mne\n'), ((3738, 3769), 'mne_bids.utils._handle_datatype', '_handle_datatype', (['raw', 'datatype'], {}), '(raw, datatype)\n', (3754, 3769), False, 'from mne_bids.utils import _check_types, _age_on_date, _handle_datatype, _infer_eeg_placement_scheme, _get_ch_type_mapping, _check_datatype\n'), ((3862, 3896), 'mne_bids.utils._check_types', '_check_types', (["['foo', 'bar', None]"], {}), "(['foo', 'bar', None])\n", (3874, 3896), False, 'from mne_bids.utils import _check_types, _age_on_date, _handle_datatype, _infer_eeg_placement_scheme, _get_ch_type_mapping, _check_datatype\n'), ((3914, 3939), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3927, 3939), False, 'import pytest\n'), ((3949, 3993), 'mne_bids.utils._check_types', '_check_types', (["[None, 1, 3.14, 'meg', [1, 2]]"], {}), "([None, 1, 3.14, 'meg', [1, 2]])\n", (3961, 3993), False, 'from mne_bids.utils import _check_types, _age_on_date, _handle_datatype, _infer_eeg_placement_scheme, _get_ch_type_mapping, _check_datatype\n'), ((4103, 4125), 'mne_bids.path._path_to_str', '_path_to_str', (['path_str'], {}), '(path_str)\n', (4115, 4125), False, 'from mne_bids.path import _path_to_str\n'), ((4200, 4225), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4213, 4225), False, 'import pytest\n'), ((4235, 4250), 'mne_bids.path._path_to_str', '_path_to_str', (['(1)'], {}), '(1)\n', (4247, 4250), False, 'from mne_bids.path import _path_to_str\n'), ((4508, 4532), 'mne_bids.utils._age_on_date', '_age_on_date', (['bday', 'exp1'], {}), '(bday, exp1)\n', (4520, 4532), False, 'from mne_bids.utils import _check_types, _age_on_date, _handle_datatype, _infer_eeg_placement_scheme, _get_ch_type_mapping, _check_datatype\n'), ((4550, 4574), 'mne_bids.utils._age_on_date', '_age_on_date', (['bday', 'exp2'], {}), '(bday, exp2)\n', (4562, 4574), False, 'from mne_bids.utils import _check_types, _age_on_date, _handle_datatype, _infer_eeg_placement_scheme, _get_ch_type_mapping, _check_datatype\n'), ((4592, 4616), 'mne_bids.utils._age_on_date', '_age_on_date', (['bday', 'exp3'], {}), '(bday, exp3)\n', (4604, 4616), False, 'from mne_bids.utils import _check_types, _age_on_date, _handle_datatype, _infer_eeg_placement_scheme, _get_ch_type_mapping, _check_datatype\n'), ((4632, 4657), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4645, 4657), False, 'import pytest\n'), ((4667, 4691), 'mne_bids.utils._age_on_date', '_age_on_date', (['bday', 'exp4'], {}), '(bday, exp4)\n', (4679, 4691), False, 'from mne_bids.utils import _check_types, _age_on_date, _handle_datatype, _infer_eeg_placement_scheme, _get_ch_type_mapping, _check_datatype\n'), ((6769, 6799), 'mne_bids.utils._check_datatype', '_check_datatype', (['raw', 'datatype'], {}), '(raw, datatype)\n', (6784, 6799), False, 'from mne_bids.utils import _check_types, _age_on_date, _handle_datatype, _infer_eeg_placement_scheme, _get_ch_type_mapping, _check_datatype\n'), ((1889, 1920), 'mne_bids.utils._handle_datatype', '_handle_datatype', (['raw', 'datatype'], {}), '(raw, datatype)\n', (1905, 1920), False, 'from mne_bids.utils import _check_types, _age_on_date, _handle_datatype, _infer_eeg_placement_scheme, _get_ch_type_mapping, _check_datatype\n'), ((2436, 2462), 'numpy.random.random', 'random', (['(2, sampling_rate)'], {}), '((2, sampling_rate))\n', (2442, 2462), False, 'from numpy.random import random, RandomState\n'), ((2485, 2516), 'mne_bids.utils._handle_datatype', '_handle_datatype', (['raw', 'datatype'], {}), '(raw, datatype)\n', (2501, 2516), False, 'from mne_bids.utils import _check_types, _age_on_date, _handle_datatype, _infer_eeg_placement_scheme, _get_ch_type_mapping, _check_datatype\n'), ((2864, 2890), 'numpy.random.random', 'random', (['(2, sampling_rate)'], {}), '((2, sampling_rate))\n', (2870, 2890), False, 'from numpy.random import random, RandomState\n'), ((2913, 2944), 'mne_bids.utils._handle_datatype', '_handle_datatype', (['raw', 'datatype'], {}), '(raw, datatype)\n', (2929, 2944), False, 'from mne_bids.utils import _check_types, _age_on_date, _handle_datatype, _infer_eeg_placement_scheme, _get_ch_type_mapping, _check_datatype\n'), ((3147, 3201), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Multiple data types"""'}), "(ValueError, match='Multiple data types')\n", (3160, 3201), False, 'import pytest\n'), ((3222, 3282), 'mne.create_info', 'mne.create_info', (['n_channels', 'sampling_rate'], {'ch_types': 'ch_type'}), '(n_channels, sampling_rate, ch_types=ch_type)\n', (3237, 3282), False, 'import mne\n'), ((3363, 3394), 'mne_bids.utils._handle_datatype', '_handle_datatype', (['raw', 'datatype'], {}), '(raw, datatype)\n', (3379, 3394), False, 'from mne_bids.utils import _check_types, _age_on_date, _handle_datatype, _infer_eeg_placement_scheme, _get_ch_type_mapping, _check_datatype\n'), ((4162, 4176), 'pathlib.Path', 'Path', (['path_str'], {}), '(path_str)\n', (4166, 4176), False, 'from pathlib import Path\n'), ((6424, 6515), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'f"""The specified datatype {datatype} is currently not"""'}), "(ValueError, match=\n f'The specified datatype {datatype} is currently not')\n", (6437, 6515), False, 'import pytest\n'), ((6573, 6607), 'mne_bids.utils._check_datatype', '_check_datatype', (['raw_eeg', 'datatype'], {}), '(raw_eeg, datatype)\n', (6588, 6607), False, 'from mne_bids.utils import _check_types, _age_on_date, _handle_datatype, _infer_eeg_placement_scheme, _get_ch_type_mapping, _check_datatype\n'), ((7038, 7126), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'f"""The specified datatype {datatype} was not found"""'}), "(ValueError, match=\n f'The specified datatype {datatype} was not found')\n", (7051, 7126), False, 'import pytest\n'), ((7184, 7214), 'mne_bids.utils._check_datatype', '_check_datatype', (['raw', 'datatype'], {}), '(raw, datatype)\n', (7199, 7214), False, 'from mne_bids.utils import _check_types, _age_on_date, _handle_datatype, _infer_eeg_placement_scheme, _get_ch_type_mapping, _check_datatype\n'), ((3317, 3343), 'numpy.random.random', 'random', (['(2, sampling_rate)'], {}), '((2, sampling_rate))\n', (3323, 3343), False, 'from numpy.random import random, RandomState\n')]
# This file stores check whether the altitude and control scheduled are being # stored correctly for every eval_QoI import os import numpy as np import cmath import chaospy as cp from pystatreduce.new_stochastic_collocation import StochasticCollocation2 from pystatreduce.stochastic_collocation import StochasticCollocation from pystatreduce.quantity_of_interest import QuantityOfInterest from pystatreduce.dimension_reduction import DimensionReduction import pystatreduce.examples as examples from pystatreduce.examples.supersonic_interceptor.interceptor_rdo2 import DymosInterceptorGlue import pystatreduce.utils as utils import pystatreduce.optimize.dymos_interceptor.eigen_information as eigen_info output_directory = os.environ['HOME'] + '/UserApps/pyStatReduce/pystatreduce/optimize/dymos_interceptor/trajectory_schedules' input_dict = {'num_segments': 15, 'transcription_order' : 3, 'transcription_type': 'LGR', 'solve_segments': False, 'use_for_collocation' : False, 'n_collocation_samples': 20, 'use_polynomial_control': False, 'write_files' : True, 'target_output_directory' : output_directory, 'aggregate_solutions' : True, } systemsize = input_dict['num_segments'] * input_dict['transcription_order'] mu = np.zeros(systemsize) std_dev = np.array([0.1659134, 0.1659134, 0.16313925, 0.16080975, 0.14363596, 0.09014088, 0.06906912, 0.03601839, 0.0153984 , 0.01194864, 0.00705978, 0.0073889 , 0.00891946, 0.01195811, 0.01263033, 0.01180144, 0.00912247, 0.00641914, 0.00624566, 0.00636504, 0.0064624 , 0.00639544, 0.0062501 , 0.00636687, 0.00650337, 0.00699955, 0.00804997, 0.00844582, 0.00942114, 0.01080109, 0.01121497, 0.01204432, 0.0128207 , 0.01295824, 0.01307331, 0.01359864, 0.01408001, 0.01646131, 0.02063841, 0.02250183, 0.02650464, 0.02733539, 0.02550976, 0.01783919, 0.0125073 , 0.01226541]) # 0.04*np.eye(systemsize) # 0.04* np.random.rand(systemsize) jdist = cp.MvNormal(mu, np.diag(std_dev[:-1])) QoI = DymosInterceptorGlue(systemsize, input_dict) t_final = QoI.eval_QoI(np.zeros(systemsize), np.zeros(systemsize)) # Read the file # read_file_name = output_directory + '/sample_0.npz' # npzfile = np.load(read_file_name) # print('altitude0 = ', npzfile['altitude']) # print('alpha0 = ', npzfile['alpha']) # npzfile.close() # Run it again t_final = QoI.eval_QoI(np.zeros(systemsize), np.zeros(systemsize)) # Read the file # read_file_name = output_directory + '/sample_1.npz' # npzfile = np.load(read_file_name) # print('altitude0 = ', npzfile['altitude']) # print('alpha0 = ', npzfile['alpha']) # npzfile.close()
[ "numpy.array", "numpy.diag", "numpy.zeros", "pystatreduce.examples.supersonic_interceptor.interceptor_rdo2.DymosInterceptorGlue" ]
[((1362, 1382), 'numpy.zeros', 'np.zeros', (['systemsize'], {}), '(systemsize)\n', (1370, 1382), True, 'import numpy as np\n'), ((1394, 1981), 'numpy.array', 'np.array', (['[0.1659134, 0.1659134, 0.16313925, 0.16080975, 0.14363596, 0.09014088, \n 0.06906912, 0.03601839, 0.0153984, 0.01194864, 0.00705978, 0.0073889, \n 0.00891946, 0.01195811, 0.01263033, 0.01180144, 0.00912247, 0.00641914,\n 0.00624566, 0.00636504, 0.0064624, 0.00639544, 0.0062501, 0.00636687, \n 0.00650337, 0.00699955, 0.00804997, 0.00844582, 0.00942114, 0.01080109,\n 0.01121497, 0.01204432, 0.0128207, 0.01295824, 0.01307331, 0.01359864, \n 0.01408001, 0.01646131, 0.02063841, 0.02250183, 0.02650464, 0.02733539,\n 0.02550976, 0.01783919, 0.0125073, 0.01226541]'], {}), '([0.1659134, 0.1659134, 0.16313925, 0.16080975, 0.14363596, \n 0.09014088, 0.06906912, 0.03601839, 0.0153984, 0.01194864, 0.00705978, \n 0.0073889, 0.00891946, 0.01195811, 0.01263033, 0.01180144, 0.00912247, \n 0.00641914, 0.00624566, 0.00636504, 0.0064624, 0.00639544, 0.0062501, \n 0.00636687, 0.00650337, 0.00699955, 0.00804997, 0.00844582, 0.00942114,\n 0.01080109, 0.01121497, 0.01204432, 0.0128207, 0.01295824, 0.01307331, \n 0.01359864, 0.01408001, 0.01646131, 0.02063841, 0.02250183, 0.02650464,\n 0.02733539, 0.02550976, 0.01783919, 0.0125073, 0.01226541])\n', (1402, 1981), True, 'import numpy as np\n'), ((2076, 2120), 'pystatreduce.examples.supersonic_interceptor.interceptor_rdo2.DymosInterceptorGlue', 'DymosInterceptorGlue', (['systemsize', 'input_dict'], {}), '(systemsize, input_dict)\n', (2096, 2120), False, 'from pystatreduce.examples.supersonic_interceptor.interceptor_rdo2 import DymosInterceptorGlue\n'), ((2046, 2067), 'numpy.diag', 'np.diag', (['std_dev[:-1]'], {}), '(std_dev[:-1])\n', (2053, 2067), True, 'import numpy as np\n'), ((2144, 2164), 'numpy.zeros', 'np.zeros', (['systemsize'], {}), '(systemsize)\n', (2152, 2164), True, 'import numpy as np\n'), ((2166, 2186), 'numpy.zeros', 'np.zeros', (['systemsize'], {}), '(systemsize)\n', (2174, 2186), True, 'import numpy as np\n'), ((2436, 2456), 'numpy.zeros', 'np.zeros', (['systemsize'], {}), '(systemsize)\n', (2444, 2456), True, 'import numpy as np\n'), ((2458, 2478), 'numpy.zeros', 'np.zeros', (['systemsize'], {}), '(systemsize)\n', (2466, 2478), True, 'import numpy as np\n')]
"""Functions to analyze model performance.""" import pytest import unittest.mock as mock import deepchem import numpy as np import cytoxnet.models.analyze import cytoxnet.models.models def test_pair_predict(): """Plot of predictions vs true values.""" # set up a mock model to use model = mock.MagicMock(spec=cytoxnet.models.models.ToxModel) model.tasks = ['target'] model.predict.return_value = np.array([1, 2, 3, 4, 5]).reshape(-1, 1) dataset = deepchem.data.NumpyDataset( X=np.array(np.random.random((5, 2))), y=np.array([1, 3, 3, 4, 5]).reshape(-1, 1) ) transformer = mock.MagicMock() transformer.untransform.return_value = np.array( [1, 2, 3, 4, 5] ).reshape(-1, 1) with mock.patch('cytoxnet.models.analyze.alt'): # no transform should work cytoxnet.models.analyze.pair_predict( model, dataset, untransform=False) # assign transformer and use model.transformers = [transformer] cytoxnet.models.analyze.pair_predict( model, dataset, untransform=True) assert transformer.untransform.called # and task handling model.tasks = ['t1', 't2'] with pytest.raises(AssertionError): cytoxnet.models.analyze.pair_predict( model, dataset) # specify it this time model.predict.return_value = np.array( [[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]] ).T dataset = deepchem.data.NumpyDataset( X=np.array(np.random.random((5, 2))), y=np.array( [[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]] ).T ) chart = cytoxnet.models.analyze.pair_predict( model, dataset, task='t1', untransform=False) assert chart is not None,\ "Nothing was returned from the visualization." return
[ "unittest.mock.MagicMock", "unittest.mock.patch", "pytest.raises", "numpy.random.random", "numpy.array" ]
[((306, 358), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'spec': 'cytoxnet.models.models.ToxModel'}), '(spec=cytoxnet.models.models.ToxModel)\n', (320, 358), True, 'import unittest.mock as mock\n'), ((625, 641), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (639, 641), True, 'import unittest.mock as mock\n'), ((749, 790), 'unittest.mock.patch', 'mock.patch', (['"""cytoxnet.models.analyze.alt"""'], {}), "('cytoxnet.models.analyze.alt')\n", (759, 790), True, 'import unittest.mock as mock\n'), ((421, 446), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (429, 446), True, 'import numpy as np\n'), ((685, 710), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (693, 710), True, 'import numpy as np\n'), ((1214, 1243), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1227, 1243), False, 'import pytest\n'), ((1395, 1439), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]'], {}), '([[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]])\n', (1403, 1439), True, 'import numpy as np\n'), ((523, 547), 'numpy.random.random', 'np.random.random', (['(5, 2)'], {}), '((5, 2))\n', (539, 547), True, 'import numpy as np\n'), ((560, 585), 'numpy.array', 'np.array', (['[1, 3, 3, 4, 5]'], {}), '([1, 3, 3, 4, 5])\n', (568, 585), True, 'import numpy as np\n'), ((1533, 1557), 'numpy.random.random', 'np.random.random', (['(5, 2)'], {}), '((5, 2))\n', (1549, 1557), True, 'import numpy as np\n'), ((1574, 1618), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]'], {}), '([[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]])\n', (1582, 1618), True, 'import numpy as np\n')]
''' This code is part of QuTIpy. (c) Copyright <NAME>, 2021 This code is licensed under the Apache License, Version 2.0. You may obtain a copy of this license in the LICENSE.txt file in the root directory of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. Any modifications or derivative works of this code must retain this copyright notice, and modified files need to carry a notice indicating that they have been altered from the originals. ''' import numpy as np from qutipy.general_functions import dag,ket,eye def su_generators(d): ''' Generates the basis (aka generators) of the Lie algebra su(d) corresponding to the Lie group SU(d). The basis has d^2-1 elements. All of the generators are traceless and Hermitian. After adding the identity matrix, they form an orthogonal basis for all dxd matrices. The orthogonality condition is Tr[l_i*l_j]=d*delta_{i,j} (This is a particular convention we use here; there are other conventions.) For d=2, we get the Pauli matrices. ''' L=[] L.append(eye(d)) for l in range(d): for k in range(l): L.append(np.sqrt(d/2)*(ket(d,k)@dag(ket(d,l))+ket(d,l)@dag(ket(d,k)))) L.append(np.sqrt(d/2)*(-1j*ket(d,k)@dag(ket(d,l))+1j*ket(d,l)@dag(ket(d,k)))) for k in range(1,d): X=0 for j in range(k): X+=ket(d,j)@dag(ket(d,j)) L.append(np.sqrt(d/(k*(k+1)))*(X-k*ket(d,k)@dag(ket(d,k)))) return L
[ "qutipy.general_functions.eye", "numpy.sqrt", "qutipy.general_functions.ket" ]
[((1082, 1088), 'qutipy.general_functions.eye', 'eye', (['d'], {}), '(d)\n', (1085, 1088), False, 'from qutipy.general_functions import dag, ket, eye\n'), ((1394, 1403), 'qutipy.general_functions.ket', 'ket', (['d', 'j'], {}), '(d, j)\n', (1397, 1403), False, 'from qutipy.general_functions import dag, ket, eye\n'), ((1443, 1469), 'numpy.sqrt', 'np.sqrt', (['(d / (k * (k + 1)))'], {}), '(d / (k * (k + 1)))\n', (1450, 1469), True, 'import numpy as np\n'), ((1162, 1176), 'numpy.sqrt', 'np.sqrt', (['(d / 2)'], {}), '(d / 2)\n', (1169, 1176), True, 'import numpy as np\n'), ((1245, 1259), 'numpy.sqrt', 'np.sqrt', (['(d / 2)'], {}), '(d / 2)\n', (1252, 1259), True, 'import numpy as np\n'), ((1407, 1416), 'qutipy.general_functions.ket', 'ket', (['d', 'j'], {}), '(d, j)\n', (1410, 1416), False, 'from qutipy.general_functions import dag, ket, eye\n'), ((1176, 1185), 'qutipy.general_functions.ket', 'ket', (['d', 'k'], {}), '(d, k)\n', (1179, 1185), False, 'from qutipy.general_functions import dag, ket, eye\n'), ((1199, 1208), 'qutipy.general_functions.ket', 'ket', (['d', 'l'], {}), '(d, l)\n', (1202, 1208), False, 'from qutipy.general_functions import dag, ket, eye\n'), ((1469, 1478), 'qutipy.general_functions.ket', 'ket', (['d', 'k'], {}), '(d, k)\n', (1472, 1478), False, 'from qutipy.general_functions import dag, ket, eye\n'), ((1482, 1491), 'qutipy.general_functions.ket', 'ket', (['d', 'k'], {}), '(d, k)\n', (1485, 1491), False, 'from qutipy.general_functions import dag, ket, eye\n'), ((1189, 1198), 'qutipy.general_functions.ket', 'ket', (['d', 'l'], {}), '(d, l)\n', (1192, 1198), False, 'from qutipy.general_functions import dag, ket, eye\n'), ((1212, 1221), 'qutipy.general_functions.ket', 'ket', (['d', 'k'], {}), '(d, k)\n', (1215, 1221), False, 'from qutipy.general_functions import dag, ket, eye\n'), ((1263, 1272), 'qutipy.general_functions.ket', 'ket', (['d', 'k'], {}), '(d, k)\n', (1266, 1272), False, 'from qutipy.general_functions import dag, ket, eye\n'), ((1276, 1285), 'qutipy.general_functions.ket', 'ket', (['d', 'l'], {}), '(d, l)\n', (1279, 1285), False, 'from qutipy.general_functions import dag, ket, eye\n'), ((1289, 1298), 'qutipy.general_functions.ket', 'ket', (['d', 'l'], {}), '(d, l)\n', (1292, 1298), False, 'from qutipy.general_functions import dag, ket, eye\n'), ((1302, 1311), 'qutipy.general_functions.ket', 'ket', (['d', 'k'], {}), '(d, k)\n', (1305, 1311), False, 'from qutipy.general_functions import dag, ket, eye\n')]
""" Plot the results from the evaluations on artificial data. """ # TODO: Get these from somewhere else? import numpy as np import matplotlib.pyplot as plt import scipy.optimize as op import pickle from collections import OrderedDict from glob import glob search_strategies = OrderedDict([ ("BayesStepper", dict()), ("KasarapuAllison2015", dict()) ]) results_path_template = "data/*{search_strategy}.output" times = dict() result_keys = ["K", "N", "D", "I", "I_t", "time"] for search_strategy in search_strategies.keys(): results_paths = glob( results_path_template.format(search_strategy=search_strategy)) N = len(results_paths) print(f"Collecting {N} result files on {search_strategy}") if N < 1: continue times[search_strategy] = [] for i, results_path in enumerate(results_paths): #print(f"At {i}/{N}: {results_path}") with open(results_path, "rb") as fp: results = pickle.load(fp) # Average draws? assert len(results) == 1 for result in results: times[search_strategy].append([result[k] for k in result_keys]) times[search_strategy] = np.array(times[search_strategy]) # Plot as a function of N, D, K x_labels = ["N", "D", "K", "ND", "NK"] y_label = "time" L = len(x_labels) fig, axes = plt.subplots(1, L, figsize=(4 * L, 4)) max_y = 0 upper_lim = lambda existing: 10**(1 + np.ceil(np.log10(np.max(existing)))) for i, (ax, x_label) in enumerate(zip(axes, x_labels)): try: x_idx = result_keys.index(x_label) except ValueError: x_idx = np.array([result_keys.index(_) for _ in x_label]) y_idx = result_keys.index(y_label) max_x = 0 for search_strategy, data in times.items(): x = data.T[x_idx] y = data.T[y_idx] if len(x.shape) > 1: x = np.product(x, axis=0) converged = data.T[result_keys.index("I")] <= data.T[result_keys.index("I_t")] idx = np.argsort(x) x, y, converged = x[idx], y[idx], converged[idx] max_y = max(y.max(), max_y) max_x = max(x.max(), max_x) scat = ax.scatter(x[converged], y[converged], label=search_strategy) ax.scatter(x[~converged], y[~converged], alpha=0.5, c=scat.get_facecolor()) ax.loglog() ax.set_xlim(1e-0, upper_lim([max_x])) ax.set_xlabel(x_label) ax.set_ylabel(y_label) for ax in axes: ax.set_ylim(1e-2, upper_lim([max_y])) fig.tight_layout() x_labels = ["K", "N", "D"] # Fit the cost. for search_strategy, data in times.items(): x_idx = np.array([result_keys.index(x_label) for x_label in x_labels]) y_idx = result_keys.index(y_label) log_x = np.log(data.T[x_idx]) log_y = np.log(data.T[y_idx]) f = lambda _, *p: p @ log_x i, j = log_x.shape p_opt, p_cov = op.curve_fit(f, np.ones(j), log_y, p0=np.ones(i)) # Plot the time relative to K order_repr = "".join([f"[{x}^{p:.1f}]" for x, p in zip(x_labels, p_opt)]) print(f"{search_strategy}: O({order_repr})") raise a
[ "numpy.log", "numpy.ones", "numpy.argsort", "numpy.product", "pickle.load", "numpy.array", "numpy.max", "matplotlib.pyplot.subplots" ]
[((1338, 1376), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', 'L'], {'figsize': '(4 * L, 4)'}), '(1, L, figsize=(4 * L, 4))\n', (1350, 1376), True, 'import matplotlib.pyplot as plt\n'), ((1183, 1215), 'numpy.array', 'np.array', (['times[search_strategy]'], {}), '(times[search_strategy])\n', (1191, 1215), True, 'import numpy as np\n'), ((2717, 2738), 'numpy.log', 'np.log', (['data.T[x_idx]'], {}), '(data.T[x_idx])\n', (2723, 2738), True, 'import numpy as np\n'), ((2751, 2772), 'numpy.log', 'np.log', (['data.T[y_idx]'], {}), '(data.T[y_idx])\n', (2757, 2772), True, 'import numpy as np\n'), ((1996, 2009), 'numpy.argsort', 'np.argsort', (['x'], {}), '(x)\n', (2006, 2009), True, 'import numpy as np\n'), ((2865, 2875), 'numpy.ones', 'np.ones', (['j'], {}), '(j)\n', (2872, 2875), True, 'import numpy as np\n'), ((969, 984), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (980, 984), False, 'import pickle\n'), ((1870, 1891), 'numpy.product', 'np.product', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1880, 1891), True, 'import numpy as np\n'), ((2887, 2897), 'numpy.ones', 'np.ones', (['i'], {}), '(i)\n', (2894, 2897), True, 'import numpy as np\n'), ((1444, 1460), 'numpy.max', 'np.max', (['existing'], {}), '(existing)\n', (1450, 1460), True, 'import numpy as np\n')]
# BSD 3-Clause License; see https://github.com/jpivarski/awkward-1.0/blob/master/LICENSE """ Source and Resource for a memory mapped file, which is never multithreaded. """ from __future__ import absolute_import import numpy import uproot4.source.chunk import uproot4.source.futures import uproot4._util class MemmapSource(uproot4.source.chunk.Source): """ Source for a memory-mapped file. Threading is unnecessary because a memory-map is stateless. """ __slots__ = ["_file_path", "_file"] _dtype = uproot4.source.chunk.Chunk._dtype def __init__(self, file_path): """ Args: file_path (str): Path to the file. """ self._file_path = file_path self._file = numpy.memmap(self._file_path, dtype=self._dtype, mode="r") @property def file(self): """ Path to the file. """ return self._file def __enter__(self): """ Passes `__enter__` to the memory-map. Returns self. """ if hasattr(self._file._mmap, "__enter__"): self._file._mmap.__enter__() return self def __exit__(self, exception_type, exception_value, traceback): """ Passes `__exit__` to the memory-map or otherwise closes the file. """ if hasattr(self._file._mmap, "__exit__"): self._file._mmap.__exit__(exception_type, exception_value, traceback) else: self._file._mmap.close() def chunks(self, ranges): """ Args: ranges (iterable of (int, int)): The start (inclusive) and stop (exclusive) byte ranges for each desired chunk. Returns a list of Chunks that are already filled with data. """ if uproot4._util.py2: try: self._file._mmap.tell() except ValueError: raise OSError("memmap is closed for file {0}".format(self._file_path)) elif self._file._mmap.closed: raise OSError("memmap is closed for file {0}".format(self._file_path)) chunks = [] for start, stop in ranges: future = uproot4.source.futures.TrivialFuture(self._file[start:stop]) chunks.append(uproot4.source.chunk.Chunk(self, start, stop, future)) return chunks
[ "numpy.memmap" ]
[((744, 802), 'numpy.memmap', 'numpy.memmap', (['self._file_path'], {'dtype': 'self._dtype', 'mode': '"""r"""'}), "(self._file_path, dtype=self._dtype, mode='r')\n", (756, 802), False, 'import numpy\n')]
import math import random import os import numpy as np import torch from torch import nn, autograd, optim from torch.nn import functional as F from torch.utils import data import torch.distributed as dist from torchvision import transforms from torchvision.utils import save_image from tqdm import tqdm from args import args import utils def inference_one_latent_and_save(generator, z=None, image_size=256, output=None, texture_idx=0, crop_idx=0): i_h, i_w =image_size//64, image_size//64 with torch.no_grad(): fake_img, _ = generator([z], input_is_latent=False, i_h=i_h, i_w=i_w) fake_img = utils.deprocess_image(fake_img) img_folder = os.path.join(output, "%04dby%04d"%(image_size,image_size), "%04d_th_crop"%texture_idx) utils.mkdir(img_folder) img_filename = os.path.join(img_folder,"%04d_th_texture_%04d_th_crop_%04dby%04d.png" % (texture_idx, crop_idx, image_size, image_size)) save_image(fake_img[0], img_filename) if __name__ == "__main__": device = args.device np.random.seed(args.seed) torch.manual_seed(args.seed) if args.model_name == "texture": from model import MultiScaleTextureGenerator generator = MultiScaleTextureGenerator(size=args.image_size[0], style_dim=args.latent_dim, n_mlp=args.n_mlp, channel_multiplier=args.channel_multiplier, max_texton_size=args.max_texton_size, n_textons=args.n_textons) else: from model import Generator generator = Generator(size=args.image_size[0], style_dim=args.latent_dim, n_mlp=args.n_mlp, channel_multiplier=args.channel_multiplier) generator.to(device) generator.eval() if args.load_ckpt is not None: print("Loading %s model from %s:" % (args.model_name, args.load_ckpt)) ckpt = torch.load(args.load_ckpt, map_location=lambda storage, loc: storage) generator.load_state_dict(ckpt["g_ema"]) ckpt_name = os.path.splitext(os.path.basename(args.load_ckpt.strip("/")))[0] try: sample_z = torch.load(args.input).to(device) print("Successfully loaded pre-defined latent vectors for inference") latent_name = os.path.splitext(os.path.basename(args.input.strip("/")))[0] folder_path = os.path.join(args.output, "offline_inference", ckpt_name, latent_name, "seed"+str(int(args.seed))) utils.mkdir(folder_path) for i in tqdm(range(sample_z.shape[0])): z = sample_z[i:i+1] for j in range(args.samples_per_texture): for img_size in args.image_size: inference_one_latent_and_save(generator, z, img_size, folder_path, i, j) except: print("No pre-defined latent vectors provided. Latent vectors will be sampled online") folder_path = os.path.join(args.output, "online_inference", ckpt_name, "seed"+str(int(args.seed))) utils.mkdir(folder_path) for i in tqdm(range(args.n_textures)): z = torch.randn(1, args.latent_dim, device=device, requires_grad=False) for img_size in args.image_size: for j in range(args.samples_per_texture): inference_one_latent_and_save(generator, z, img_size, folder_path, i, j) z_filename = os.path.join(folder_path, "%09d.pt" % i) torch.save(z, z_filename)
[ "numpy.random.seed", "torch.manual_seed", "torch.load", "utils.mkdir", "torch.randn", "torch.save", "model.Generator", "torchvision.utils.save_image", "args.args.input.strip", "model.MultiScaleTextureGenerator", "torch.no_grad", "os.path.join", "args.args.load_ckpt.strip", "utils.deprocess_image" ]
[((1039, 1064), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1053, 1064), True, 'import numpy as np\n'), ((1069, 1097), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1086, 1097), False, 'import torch\n'), ((505, 520), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (518, 520), False, 'import torch\n'), ((619, 650), 'utils.deprocess_image', 'utils.deprocess_image', (['fake_img'], {}), '(fake_img)\n', (640, 650), False, 'import utils\n'), ((672, 768), 'os.path.join', 'os.path.join', (['output', "('%04dby%04d' % (image_size, image_size))", "('%04d_th_crop' % texture_idx)"], {}), "(output, '%04dby%04d' % (image_size, image_size), \n '%04d_th_crop' % texture_idx)\n", (684, 768), False, 'import os\n'), ((767, 790), 'utils.mkdir', 'utils.mkdir', (['img_folder'], {}), '(img_folder)\n', (778, 790), False, 'import utils\n'), ((814, 940), 'os.path.join', 'os.path.join', (['img_folder', "('%04d_th_texture_%04d_th_crop_%04dby%04d.png' % (texture_idx, crop_idx,\n image_size, image_size))"], {}), "(img_folder, '%04d_th_texture_%04d_th_crop_%04dby%04d.png' % (\n texture_idx, crop_idx, image_size, image_size))\n", (826, 940), False, 'import os\n'), ((944, 981), 'torchvision.utils.save_image', 'save_image', (['fake_img[0]', 'img_filename'], {}), '(fake_img[0], img_filename)\n', (954, 981), False, 'from torchvision.utils import save_image\n'), ((1209, 1428), 'model.MultiScaleTextureGenerator', 'MultiScaleTextureGenerator', ([], {'size': 'args.image_size[0]', 'style_dim': 'args.latent_dim', 'n_mlp': 'args.n_mlp', 'channel_multiplier': 'args.channel_multiplier', 'max_texton_size': 'args.max_texton_size', 'n_textons': 'args.n_textons'}), '(size=args.image_size[0], style_dim=args.\n latent_dim, n_mlp=args.n_mlp, channel_multiplier=args.\n channel_multiplier, max_texton_size=args.max_texton_size, n_textons=\n args.n_textons)\n', (1235, 1428), False, 'from model import MultiScaleTextureGenerator\n'), ((1480, 1608), 'model.Generator', 'Generator', ([], {'size': 'args.image_size[0]', 'style_dim': 'args.latent_dim', 'n_mlp': 'args.n_mlp', 'channel_multiplier': 'args.channel_multiplier'}), '(size=args.image_size[0], style_dim=args.latent_dim, n_mlp=args.\n n_mlp, channel_multiplier=args.channel_multiplier)\n', (1489, 1608), False, 'from model import Generator\n'), ((1780, 1849), 'torch.load', 'torch.load', (['args.load_ckpt'], {'map_location': '(lambda storage, loc: storage)'}), '(args.load_ckpt, map_location=lambda storage, loc: storage)\n', (1790, 1849), False, 'import torch\n'), ((2338, 2362), 'utils.mkdir', 'utils.mkdir', (['folder_path'], {}), '(folder_path)\n', (2349, 2362), False, 'import utils\n'), ((2863, 2887), 'utils.mkdir', 'utils.mkdir', (['folder_path'], {}), '(folder_path)\n', (2874, 2887), False, 'import utils\n'), ((2013, 2035), 'torch.load', 'torch.load', (['args.input'], {}), '(args.input)\n', (2023, 2035), False, 'import torch\n'), ((2962, 3029), 'torch.randn', 'torch.randn', (['(1)', 'args.latent_dim'], {'device': 'device', 'requires_grad': '(False)'}), '(1, args.latent_dim, device=device, requires_grad=False)\n', (2973, 3029), False, 'import torch\n'), ((3254, 3294), 'os.path.join', 'os.path.join', (['folder_path', "('%09d.pt' % i)"], {}), "(folder_path, '%09d.pt' % i)\n", (3266, 3294), False, 'import os\n'), ((3307, 3332), 'torch.save', 'torch.save', (['z', 'z_filename'], {}), '(z, z_filename)\n', (3317, 3332), False, 'import torch\n'), ((1953, 1978), 'args.args.load_ckpt.strip', 'args.load_ckpt.strip', (['"""/"""'], {}), "('/')\n", (1973, 1978), False, 'from args import args\n'), ((2181, 2202), 'args.args.input.strip', 'args.input.strip', (['"""/"""'], {}), "('/')\n", (2197, 2202), False, 'from args import args\n')]
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import argparse import time import tensorflow as tf import paddle.v2 as paddle def parse_args(): parser = argparse.ArgumentParser("LSTM model benchmark.") parser.add_argument( '--batch_size', type=int, default=32, help='The sequence number of a batch data. (default: %(default)d)') parser.add_argument( '--stacked_num', type=int, default=5, help='Number of lstm layers to stack. (default: %(default)d)') parser.add_argument( '--embedding_dim', type=int, default=512, help='Dimension of embedding table. (default: %(default)d)') parser.add_argument( '--hidden_dim', type=int, default=512, help='Hidden size of lstm unit. (default: %(default)d)') parser.add_argument( '--pass_num', type=int, default=10, help='Epoch number to train. (default: %(default)d)') parser.add_argument( '--learning_rate', type=float, default=0.0002, help='Learning rate used to train. (default: %(default)f)') parser.add_argument( '--infer_only', action='store_true', help='If set, run forward only.') args = parser.parse_args() return args def print_arguments(args): print('----------- Configuration Arguments -----------') for arg, value in sorted(vars(args).iteritems()): print('%s: %s' % (arg, value)) print('------------------------------------------------') def dynamic_lstm_model(dict_size, embedding_dim, hidden_dim, stacked_num, class_num=2, is_train=True): word_idx = tf.placeholder(tf.int64, shape=[None, None]) sequence_length = tf.placeholder(tf.int64, shape=[None, ]) embedding_weights = tf.get_variable('word_embeddings', [dict_size, embedding_dim]) embedding = tf.nn.embedding_lookup(embedding_weights, word_idx) lstm_cell = tf.nn.rnn_cell.LSTMCell( num_units=hidden_dim, use_peepholes=False) stacked_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * stacked_num) # final_state [LSTMTuple(c, h), LSTMTuple(c, h) ...] total stacked_num LSTMTuples _, final_state = tf.nn.dynamic_rnn( cell=stacked_cell, inputs=embedding, dtype=tf.float32, sequence_length=sequence_length) w = tf.Variable( tf.truncated_normal([hidden_dim, class_num]), dtype=tf.float32) bias = tf.Variable( tf.constant( value=0.0, shape=[class_num], dtype=tf.float32)) prediction = tf.matmul(final_state[-1][1], w) + bias if not is_train: return (word_idx, sequence_length), tf.nn.softmax(prediction) label = tf.placeholder(tf.int64, shape=[None, ]) loss = tf.nn.softmax_cross_entropy_with_logits( labels=tf.one_hot(label, 2), logits=prediction) avg_loss = tf.reduce_mean(loss) correct_count = tf.equal(tf.argmax(prediction, 1), label) acc = tf.reduce_mean(tf.cast(correct_count, tf.float32)) with tf.variable_scope("reset_metrics_accuracy_scope") as scope: g_acc = tf.metrics.accuracy(label, tf.argmax(prediction, axis=1)) vars = tf.contrib.framework.get_variables( scope, collection=tf.GraphKeys.LOCAL_VARIABLES) reset_op = tf.variables_initializer(vars) return (word_idx, sequence_length, label), avg_loss, acc, g_acc, reset_op def padding_data(data, padding_size, value): data = data + [value] * padding_size return data[:padding_size] def train(args): word_dict = paddle.dataset.imdb.word_dict() dict_size = len(word_dict) feeding_list, avg_loss, acc, g_acc, reset_op = dynamic_lstm_model( dict_size, args.embedding_dim, args.hidden_dim, args.stacked_num) adam_optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate) train_op = adam_optimizer.minimize(avg_loss) train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.imdb.train(word_dict), buf_size=25000), batch_size=args.batch_size) test_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.imdb.test(word_dict), buf_size=25000), batch_size=args.batch_size) def do_validation(sess): sess.run(reset_op) for batch_id, data in enumerate(test_reader()): word_idx = map(lambda x: x[0], data) sequence_length = np.array( [len(seq) for seq in word_idx]).astype('int64') maxlen = np.max(sequence_length) word_idx = [padding_data(seq, maxlen, 0) for seq in word_idx] word_idx = np.array(word_idx).astype('int64') label = np.array(map(lambda x: x[1], data)).astype('int64') _, loss, fetch_acc, fetch_g_acc = sess.run( [train_op, avg_loss, acc, g_acc], feed_dict={ feeding_list[0]: word_idx, feeding_list[1]: sequence_length, feeding_list[2]: label }) return fetch_g_acc[1] config = tf.ConfigProto( intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: init_g = tf.global_variables_initializer() init_l = tf.local_variables_initializer() sess.run(init_l) sess.run(init_g) for pass_id in xrange(args.pass_num): # clear accuracy local variable sess.run(reset_op) pass_start_time = time.time() words_seen = 0 for batch_id, data in enumerate(train_reader()): word_idx = map(lambda x: x[0], data) sequence_length = np.array( [len(seq) for seq in word_idx]).astype('int64') words_seen += np.sum(sequence_length) maxlen = np.max(sequence_length) word_idx = [padding_data(seq, maxlen, 0) for seq in word_idx] word_idx = np.array(word_idx).astype('int64') label = np.array(map(lambda x: x[1], data)).astype('int64') _, loss, fetch_acc, fetch_g_acc = sess.run( [train_op, avg_loss, acc, g_acc], feed_dict={ feeding_list[0]: word_idx, feeding_list[1]: sequence_length, feeding_list[2]: label }) print("pass_id=%d, batch_id=%d, loss: %f, acc: %f, avg_acc: %f" % (pass_id, batch_id, loss, fetch_acc, fetch_g_acc[1])) pass_end_time = time.time() time_consumed = pass_end_time - pass_start_time words_per_sec = words_seen / time_consumed test_acc = do_validation(sess) print("pass_id=%d, test_acc: %f, words/s: %f, sec/pass: %f" % (pass_id, test_acc, words_per_sec, time_consumed)) if __name__ == '__main__': args = parse_args() print_arguments(args) if args.infer_only: pass else: train(args)
[ "numpy.sum", "argparse.ArgumentParser", "tensorflow.variables_initializer", "tensorflow.local_variables_initializer", "tensorflow.ConfigProto", "tensorflow.matmul", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.truncated_normal", "tensorflow.get_variable", "tensorflow.nn.softmax", "tensorflow.contrib.framework.get_variables", "tensorflow.one_hot", "paddle.v2.dataset.imdb.word_dict", "tensorflow.variable_scope", "tensorflow.placeholder", "tensorflow.cast", "numpy.max", "paddle.v2.dataset.imdb.test", "tensorflow.nn.embedding_lookup", "tensorflow.global_variables_initializer", "tensorflow.reduce_mean", "tensorflow.Session", "tensorflow.constant", "tensorflow.nn.rnn_cell.MultiRNNCell", "paddle.v2.dataset.imdb.train", "tensorflow.nn.dynamic_rnn", "tensorflow.argmax", "time.time", "numpy.array", "tensorflow.train.AdamOptimizer" ]
[((855, 903), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""LSTM model benchmark."""'], {}), "('LSTM model benchmark.')\n", (878, 903), False, 'import argparse\n'), ((2481, 2525), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': '[None, None]'}), '(tf.int64, shape=[None, None])\n', (2495, 2525), True, 'import tensorflow as tf\n'), ((2548, 2586), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': '[None]'}), '(tf.int64, shape=[None])\n', (2562, 2586), True, 'import tensorflow as tf\n'), ((2614, 2676), 'tensorflow.get_variable', 'tf.get_variable', (['"""word_embeddings"""', '[dict_size, embedding_dim]'], {}), "('word_embeddings', [dict_size, embedding_dim])\n", (2629, 2676), True, 'import tensorflow as tf\n'), ((2733, 2784), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding_weights', 'word_idx'], {}), '(embedding_weights, word_idx)\n', (2755, 2784), True, 'import tensorflow as tf\n'), ((2802, 2868), 'tensorflow.nn.rnn_cell.LSTMCell', 'tf.nn.rnn_cell.LSTMCell', ([], {'num_units': 'hidden_dim', 'use_peepholes': '(False)'}), '(num_units=hidden_dim, use_peepholes=False)\n', (2825, 2868), True, 'import tensorflow as tf\n'), ((2897, 2951), 'tensorflow.nn.rnn_cell.MultiRNNCell', 'tf.nn.rnn_cell.MultiRNNCell', (['([lstm_cell] * stacked_num)'], {}), '([lstm_cell] * stacked_num)\n', (2924, 2951), True, 'import tensorflow as tf\n'), ((3060, 3169), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'stacked_cell', 'inputs': 'embedding', 'dtype': 'tf.float32', 'sequence_length': 'sequence_length'}), '(cell=stacked_cell, inputs=embedding, dtype=tf.float32,\n sequence_length=sequence_length)\n', (3077, 3169), True, 'import tensorflow as tf\n'), ((3561, 3599), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': '[None]'}), '(tf.int64, shape=[None])\n', (3575, 3599), True, 'import tensorflow as tf\n'), ((3725, 3745), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (3739, 3745), True, 'import tensorflow as tf\n'), ((4408, 4439), 'paddle.v2.dataset.imdb.word_dict', 'paddle.dataset.imdb.word_dict', ([], {}), '()\n', (4437, 4439), True, 'import paddle.v2 as paddle\n'), ((4639, 4695), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'args.learning_rate'}), '(learning_rate=args.learning_rate)\n', (4661, 4695), True, 'import tensorflow as tf\n'), ((5937, 6015), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'intra_op_parallelism_threads': '(1)', 'inter_op_parallelism_threads': '(1)'}), '(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\n', (5951, 6015), True, 'import tensorflow as tf\n'), ((3229, 3273), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[hidden_dim, class_num]'], {}), '([hidden_dim, class_num])\n', (3248, 3273), True, 'import tensorflow as tf\n'), ((3325, 3384), 'tensorflow.constant', 'tf.constant', ([], {'value': '(0.0)', 'shape': '[class_num]', 'dtype': 'tf.float32'}), '(value=0.0, shape=[class_num], dtype=tf.float32)\n', (3336, 3384), True, 'import tensorflow as tf\n'), ((3416, 3448), 'tensorflow.matmul', 'tf.matmul', (['final_state[-1][1]', 'w'], {}), '(final_state[-1][1], w)\n', (3425, 3448), True, 'import tensorflow as tf\n'), ((3776, 3800), 'tensorflow.argmax', 'tf.argmax', (['prediction', '(1)'], {}), '(prediction, 1)\n', (3785, 3800), True, 'import tensorflow as tf\n'), ((3834, 3868), 'tensorflow.cast', 'tf.cast', (['correct_count', 'tf.float32'], {}), '(correct_count, tf.float32)\n', (3841, 3868), True, 'import tensorflow as tf\n'), ((3880, 3929), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""reset_metrics_accuracy_scope"""'], {}), "('reset_metrics_accuracy_scope')\n", (3897, 3929), True, 'import tensorflow as tf\n'), ((4029, 4116), 'tensorflow.contrib.framework.get_variables', 'tf.contrib.framework.get_variables', (['scope'], {'collection': 'tf.GraphKeys.LOCAL_VARIABLES'}), '(scope, collection=tf.GraphKeys.\n LOCAL_VARIABLES)\n', (4063, 4116), True, 'import tensorflow as tf\n'), ((4144, 4174), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['vars'], {}), '(vars)\n', (4168, 4174), True, 'import tensorflow as tf\n'), ((6077, 6102), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (6087, 6102), True, 'import tensorflow as tf\n'), ((6129, 6162), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6160, 6162), True, 'import tensorflow as tf\n'), ((6180, 6212), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (6210, 6212), True, 'import tensorflow as tf\n'), ((3522, 3547), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['prediction'], {}), '(prediction)\n', (3535, 3547), True, 'import tensorflow as tf\n'), ((3669, 3689), 'tensorflow.one_hot', 'tf.one_hot', (['label', '(2)'], {}), '(label, 2)\n', (3679, 3689), True, 'import tensorflow as tf\n'), ((3983, 4012), 'tensorflow.argmax', 'tf.argmax', (['prediction'], {'axis': '(1)'}), '(prediction, axis=1)\n', (3992, 4012), True, 'import tensorflow as tf\n'), ((4822, 4858), 'paddle.v2.dataset.imdb.train', 'paddle.dataset.imdb.train', (['word_dict'], {}), '(word_dict)\n', (4847, 4858), True, 'import paddle.v2 as paddle\n'), ((4989, 5024), 'paddle.v2.dataset.imdb.test', 'paddle.dataset.imdb.test', (['word_dict'], {}), '(word_dict)\n', (5013, 5024), True, 'import paddle.v2 as paddle\n'), ((5366, 5389), 'numpy.max', 'np.max', (['sequence_length'], {}), '(sequence_length)\n', (5372, 5389), True, 'import numpy as np\n'), ((6416, 6427), 'time.time', 'time.time', ([], {}), '()\n', (6425, 6427), False, 'import time\n'), ((7515, 7526), 'time.time', 'time.time', ([], {}), '()\n', (7524, 7526), False, 'import time\n'), ((6712, 6735), 'numpy.sum', 'np.sum', (['sequence_length'], {}), '(sequence_length)\n', (6718, 6735), True, 'import numpy as np\n'), ((6761, 6784), 'numpy.max', 'np.max', (['sequence_length'], {}), '(sequence_length)\n', (6767, 6784), True, 'import numpy as np\n'), ((5487, 5505), 'numpy.array', 'np.array', (['word_idx'], {}), '(word_idx)\n', (5495, 5505), True, 'import numpy as np\n'), ((6890, 6908), 'numpy.array', 'np.array', (['word_idx'], {}), '(word_idx)\n', (6898, 6908), True, 'import numpy as np\n')]
""" Methods for constructing point clouds from meshes. """ import numpy as np def from_mesh(vertices, triangles, n=1000): """ Randomly sample points by area on a triangle mesh. This function is extremely fast by using broadcasting/numpy operations in lieu of loops Inputs ------- vertices : ndarray (N, 3) Array of points in 3D triangles : ndarray (M, 3) Array of triangles connecting points, pointing to vertex indices n : int Number of points to sample Returns ------- data : NDArray (n, 3) array of sampled points """ assert vertices.shape[1] == 3 assert triangles.shape[1] == 3 # Step 1: Compute cross product of all face triangles and use to compute # areas (very similar to code used to compute vertex normals) # Vectors spanning two triangle edges P0 = vertices[triangles[:, 0], :] P1 = vertices[triangles[:, 1], :] P2 = vertices[triangles[:, 2], :] V1 = P1 - P0 V2 = P2 - P0 FNormals = np.cross(V1, V2) # import pdb; pdb.set_trace() FAreas = np.sqrt(np.sum(FNormals ** 2, 1)).flatten() # Get rid of zero area faces and update points triangles = triangles[FAreas > 0, :] FNormals = FNormals[FAreas > 0, :] FAreas = FAreas[FAreas > 0] P0 = vertices[triangles[:, 0], :] P1 = vertices[triangles[:, 1], :] P2 = vertices[triangles[:, 2], :] # Compute normals NTris = triangles.shape[0] FNormals = FNormals / FAreas[:, None] FAreas = 0.5 * FAreas FNormals = FNormals # VNormals = np.zeros_like(vertices) VAreas = np.zeros(vertices.shape[0]) for k in range(3): # VNormals[triangles[:, k], :] += FAreas[:, None] * FNormals VAreas[triangles[:, k]] += FAreas # Normalize normals VAreas[VAreas == 0] = 1 # VNormals = VNormals / VAreas[:, None] # Step 2: Randomly sample points based on areas FAreas = FAreas / np.sum(FAreas) AreasC = np.cumsum(FAreas) samples = np.sort(np.random.rand(n)) # Figure out how many samples there are for each face FSamples = np.zeros(NTris, np.int64) fidx = 0 for s in samples: while s > AreasC[fidx]: fidx += 1 FSamples[fidx] += 1 # Now initialize an array that stores the triangle sample indices tidx = np.zeros(n, dtype=np.int64) idx = 0 for i in range(len(FSamples)): tidx[idx : idx + FSamples[i]] = i idx += FSamples[i] # N = np.zeros((n, 3)) # Allocate space for normals idx = 0 # Vector used to determine if points need to be flipped across parallelogram V3 = P2 - P1 V3 = V3 / np.sqrt(np.sum(V3 ** 2, 1))[:, None] # Normalize # Randomly sample points on each face # Generate random points uniformly in parallelogram u = np.random.rand(n, 1) v = np.random.rand(n, 1) Ps = u * V1[tidx, :] + P0[tidx, :] Ps += v * V2[tidx, :] # Flip over points which are on the other side of the triangle dP = Ps - P1[tidx, :] proj = np.sum(dP * V3[tidx, :], 1) dPPar = V3[tidx, :] * proj[:, None] # Parallel project onto edge dPPerp = dP - dPPar Qs = Ps - dPPerp dP0QSqr = np.sum((Qs - P0[tidx, :]) ** 2, 1) dP0PSqr = np.sum((Ps - P0[tidx, :]) ** 2, 1) idxreg = np.arange(n, dtype=np.int64) idxflip = idxreg[dP0QSqr < dP0PSqr] u[idxflip, :] = 1 - u[idxflip, :] v[idxflip, :] = 1 - v[idxflip, :] Ps[idxflip, :] = ( P0[tidx[idxflip], :] + u[idxflip, :] * V1[tidx[idxflip], :] + v[idxflip, :] * V2[tidx[idxflip], :] ) # # Step 3: Compute normals of sampled points by barycentric interpolation # Ns = u * VNormals[triangles[tidx, 1], :] # Ns += v * VNormals[triangles[tidx, 2], :] # Ns += (1 - u - v) * VNormals[triangles[tidx, 0], :] return Ps __all__ = ["from_mesh"]
[ "numpy.sum", "numpy.zeros", "numpy.cross", "numpy.cumsum", "numpy.arange", "numpy.random.rand" ]
[((1025, 1041), 'numpy.cross', 'np.cross', (['V1', 'V2'], {}), '(V1, V2)\n', (1033, 1041), True, 'import numpy as np\n'), ((1611, 1638), 'numpy.zeros', 'np.zeros', (['vertices.shape[0]'], {}), '(vertices.shape[0])\n', (1619, 1638), True, 'import numpy as np\n'), ((1973, 1990), 'numpy.cumsum', 'np.cumsum', (['FAreas'], {}), '(FAreas)\n', (1982, 1990), True, 'import numpy as np\n'), ((2106, 2131), 'numpy.zeros', 'np.zeros', (['NTris', 'np.int64'], {}), '(NTris, np.int64)\n', (2114, 2131), True, 'import numpy as np\n'), ((2331, 2358), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'np.int64'}), '(n, dtype=np.int64)\n', (2339, 2358), True, 'import numpy as np\n'), ((2814, 2834), 'numpy.random.rand', 'np.random.rand', (['n', '(1)'], {}), '(n, 1)\n', (2828, 2834), True, 'import numpy as np\n'), ((2843, 2863), 'numpy.random.rand', 'np.random.rand', (['n', '(1)'], {}), '(n, 1)\n', (2857, 2863), True, 'import numpy as np\n'), ((3034, 3061), 'numpy.sum', 'np.sum', (['(dP * V3[tidx, :])', '(1)'], {}), '(dP * V3[tidx, :], 1)\n', (3040, 3061), True, 'import numpy as np\n'), ((3191, 3225), 'numpy.sum', 'np.sum', (['((Qs - P0[tidx, :]) ** 2)', '(1)'], {}), '((Qs - P0[tidx, :]) ** 2, 1)\n', (3197, 3225), True, 'import numpy as np\n'), ((3240, 3274), 'numpy.sum', 'np.sum', (['((Ps - P0[tidx, :]) ** 2)', '(1)'], {}), '((Ps - P0[tidx, :]) ** 2, 1)\n', (3246, 3274), True, 'import numpy as np\n'), ((3288, 3316), 'numpy.arange', 'np.arange', (['n'], {'dtype': 'np.int64'}), '(n, dtype=np.int64)\n', (3297, 3316), True, 'import numpy as np\n'), ((1945, 1959), 'numpy.sum', 'np.sum', (['FAreas'], {}), '(FAreas)\n', (1951, 1959), True, 'import numpy as np\n'), ((2013, 2030), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (2027, 2030), True, 'import numpy as np\n'), ((1097, 1121), 'numpy.sum', 'np.sum', (['(FNormals ** 2)', '(1)'], {}), '(FNormals ** 2, 1)\n', (1103, 1121), True, 'import numpy as np\n'), ((2665, 2683), 'numpy.sum', 'np.sum', (['(V3 ** 2)', '(1)'], {}), '(V3 ** 2, 1)\n', (2671, 2683), True, 'import numpy as np\n')]
import numpy as np import pydensecrf.densecrf as dcrf from pydensecrf.utils import unary_from_softmax def crf_inference(img, probs, t=10, scale_factor=1, labels=21): h, w = img.shape[:2] n_labels = labels d = dcrf.DenseCRF2D(w, h, n_labels) unary = unary_from_softmax(probs) unary = np.ascontiguousarray(unary) d.setUnaryEnergy(unary) d.addPairwiseGaussian(sxy=3/scale_factor, compat=3) d.addPairwiseBilateral(sxy=80/scale_factor, srgb=13, rgbim=np.copy(img), compat=10) Q = d.inference(t) return np.array(Q).reshape((n_labels, h, w))
[ "numpy.copy", "numpy.array", "pydensecrf.utils.unary_from_softmax", "pydensecrf.densecrf.DenseCRF2D", "numpy.ascontiguousarray" ]
[((225, 256), 'pydensecrf.densecrf.DenseCRF2D', 'dcrf.DenseCRF2D', (['w', 'h', 'n_labels'], {}), '(w, h, n_labels)\n', (240, 256), True, 'import pydensecrf.densecrf as dcrf\n'), ((270, 295), 'pydensecrf.utils.unary_from_softmax', 'unary_from_softmax', (['probs'], {}), '(probs)\n', (288, 295), False, 'from pydensecrf.utils import unary_from_softmax\n'), ((308, 335), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['unary'], {}), '(unary)\n', (328, 335), True, 'import numpy as np\n'), ((484, 496), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (491, 496), True, 'import numpy as np\n'), ((544, 555), 'numpy.array', 'np.array', (['Q'], {}), '(Q)\n', (552, 555), True, 'import numpy as np\n')]
import numpy as np import openamundsen.constants as c import pandas as pd def day_angle(doy): """ Return the day of the year in angular form. Parameters ---------- doy : int Day of the year (Jan 1 = 1). Returns ------- day_angle : float Day angle in radians. """ # TODO # Use 2*pi/365 for normal years and 2*pi/366 for leap years # instead of 365.25? return (2. * np.pi / c.DAYS_PER_YEAR) * (doy - 1) def equation_of_time(doy, method='spencer'): """ Calculate the equation of time, i.e., the difference in time between solar noon at 0 degrees longitude and 12:00 UTC. Parameters ---------- doy : int Day of the year (Jan 1 = 1). Returns ------- eot_m : float Equation of time in minutes for the given day. The value is within +/- 16 minutes throughout the entire year. References ---------- .. [1] <NAME>, "Fourier series representation of the position of the sun" in Search 2 (5), p. 172 (1971) """ # TODO # Implement Reda method (https://www.nrel.gov/docs/fy08osti/34302.pdf), # possibly PVCDROM (see https://github.com/pvlib/pvlib-python/blob/master/pvlib/solarposition.py) da = day_angle(doy) if method == 'spencer': eot = ( 0.0000075 + 0.001868 * np.cos(da) - 0.032077 * np.sin(da) - 0.014615 * np.cos(2 * da) - 0.040849 * np.sin(2 * da) ) # in radians else: raise NotImplementedError(f'Unsupported method: {method}') eot_m = c.HOURS_PER_DAY * c.MINUTES_PER_HOUR / (2 * np.pi) * eot # in minutes return eot_m def declination_angle(doy): """ Calculate the solar declination angle after Bourges (1985). Parameters ---------- doy : int Day of the year (Jan 1 = 1). Returns ------- declination : float Solar declination angle in degrees. References ---------- .. [1] <NAME>. Improvement in solar declination computation. Solar Energy, 1985, 35(4), pp.367-369. """ pass day_number = np.deg2rad((360 / c.DAYS_PER_YEAR) * (doy - 79.346)) declination = ( 0.3723 + 23.2567 * np.sin(day_number) - 0.7580 * np.cos(day_number) + 0.1149 * np.sin(2 * day_number) + 0.3656 * np.cos(2 * day_number) - 0.1712 * np.sin(3 * day_number) + 0.0201 * np.cos(3 * day_number) ) return declination def hour_angle(date, timezone, lon, eot): """ Calculate the hour angle, i.e., the angular displacement of the sun east or west of the local meridian due to rotation of the earth on its axis at 15° per hour. Parameters ---------- timezone : int Timezone, e.g. 1 for CET. lon : float Longitude. eot : float Equation of time in minutes. Returns ------- hour_angle : float Hour angle in degrees. """ date = pd.to_datetime(date) hour = (date - date.normalize()).total_seconds() / c.SECONDS_PER_HOUR # fractional hour of the day lstm = c.STANDARD_TIMEZONE_WIDTH * timezone # local standard time meridian tc = c.MINUTES_PER_DEGREE_OF_EARTH_ROTATION * (lon - lstm) + eot # time correction (minutes) lst = hour + tc / c.MINUTES_PER_HOUR # local solar time ha = c.SUN_DEGREES_PER_HOUR * (lst - 12) return ha def sun_vector(lat, ha, dec): """ Calculate the vector defining the position of the sun after Corripio (2003). Parameters ---------- lat : float Latitude (degrees). ha : float Hour angle (degrees). dec : float Declination angle (degrees). Returns ------- vec : ndarray Solar vector. References ---------- .. [1] <NAME>. (2003). Vectorial algebra algorithms for calculating terrain parameters from DEMs and solar radiation modelling in mountainous terrain. International Journal of Geographical Information Science, 17(1), 1–23. https://doi.org/10.1080/13658810210157796 """ lat_rad = np.deg2rad(lat) ha_rad = np.deg2rad(ha) dec_rad = np.deg2rad(dec) return np.array([ -np.sin(ha_rad) * np.cos(dec_rad), np.sin(lat_rad) * np.cos(ha_rad) * np.cos(dec_rad) - np.cos(lat_rad) * np.sin(dec_rad), np.cos(lat_rad) * np.cos(ha_rad) * np.cos(dec_rad) + np.sin(lat_rad) * np.sin(dec_rad), ]) def sun_parameters(date, lon, lat, timezone): """ Calculate sun related parameters for a specified date and position. Parameters ---------- date : datetime-like Date and time. lon : float Longitude (degrees). lat : float Latitude (degrees). timezone : int Timezone, e.g. 1 for CET. Returns ------- d : dict Dictionary containing the following keys: - 'day_angle': day angle (radians) - 'hour_angle': hour angle (degrees) - 'declination_angle': declination angle (degrees) - 'equation_of_time': equation of time (minutes) - 'sun_vector': vector describing the position of the sun - 'zenith_angle': zenith angle (degrees) - 'sun_over_horizon': True if the sun is over the horizon """ date = pd.to_datetime(date) eot = equation_of_time(date.dayofyear) da = day_angle(date.dayofyear) ha = hour_angle(date, timezone, lon, eot) dec = declination_angle(date.dayofyear) sv = sun_vector(lat, ha, dec) zenith_angle = np.rad2deg(np.arccos(sv[2])) return { 'day_angle': da, 'hour_angle': ha, 'declination_angle': dec, 'equation_of_time': eot, 'sun_vector': sv, 'zenith_angle': zenith_angle, 'sun_over_horizon': zenith_angle < 90, }
[ "numpy.deg2rad", "numpy.sin", "pandas.to_datetime", "numpy.cos", "numpy.arccos" ]
[((2137, 2187), 'numpy.deg2rad', 'np.deg2rad', (['(360 / c.DAYS_PER_YEAR * (doy - 79.346))'], {}), '(360 / c.DAYS_PER_YEAR * (doy - 79.346))\n', (2147, 2187), True, 'import numpy as np\n'), ((2964, 2984), 'pandas.to_datetime', 'pd.to_datetime', (['date'], {}), '(date)\n', (2978, 2984), True, 'import pandas as pd\n'), ((4097, 4112), 'numpy.deg2rad', 'np.deg2rad', (['lat'], {}), '(lat)\n', (4107, 4112), True, 'import numpy as np\n'), ((4126, 4140), 'numpy.deg2rad', 'np.deg2rad', (['ha'], {}), '(ha)\n', (4136, 4140), True, 'import numpy as np\n'), ((4155, 4170), 'numpy.deg2rad', 'np.deg2rad', (['dec'], {}), '(dec)\n', (4165, 4170), True, 'import numpy as np\n'), ((5280, 5300), 'pandas.to_datetime', 'pd.to_datetime', (['date'], {}), '(date)\n', (5294, 5300), True, 'import pandas as pd\n'), ((5533, 5549), 'numpy.arccos', 'np.arccos', (['sv[2]'], {}), '(sv[2])\n', (5542, 5549), True, 'import numpy as np\n'), ((2415, 2437), 'numpy.cos', 'np.cos', (['(3 * day_number)'], {}), '(3 * day_number)\n', (2421, 2437), True, 'import numpy as np\n'), ((1462, 1476), 'numpy.sin', 'np.sin', (['(2 * da)'], {}), '(2 * da)\n', (1468, 1476), True, 'import numpy as np\n'), ((2381, 2403), 'numpy.sin', 'np.sin', (['(3 * day_number)'], {}), '(3 * day_number)\n', (2387, 2403), True, 'import numpy as np\n'), ((4220, 4235), 'numpy.cos', 'np.cos', (['dec_rad'], {}), '(dec_rad)\n', (4226, 4235), True, 'import numpy as np\n'), ((1434, 1448), 'numpy.cos', 'np.cos', (['(2 * da)'], {}), '(2 * da)\n', (1440, 1448), True, 'import numpy as np\n'), ((2339, 2361), 'numpy.cos', 'np.cos', (['(2 * day_number)'], {}), '(2 * day_number)\n', (2345, 2361), True, 'import numpy as np\n'), ((4203, 4217), 'numpy.sin', 'np.sin', (['ha_rad'], {}), '(ha_rad)\n', (4209, 4217), True, 'import numpy as np\n'), ((4280, 4295), 'numpy.cos', 'np.cos', (['dec_rad'], {}), '(dec_rad)\n', (4286, 4295), True, 'import numpy as np\n'), ((4298, 4313), 'numpy.cos', 'np.cos', (['lat_rad'], {}), '(lat_rad)\n', (4304, 4313), True, 'import numpy as np\n'), ((4316, 4331), 'numpy.sin', 'np.sin', (['dec_rad'], {}), '(dec_rad)\n', (4322, 4331), True, 'import numpy as np\n'), ((4376, 4391), 'numpy.cos', 'np.cos', (['dec_rad'], {}), '(dec_rad)\n', (4382, 4391), True, 'import numpy as np\n'), ((4394, 4409), 'numpy.sin', 'np.sin', (['lat_rad'], {}), '(lat_rad)\n', (4400, 4409), True, 'import numpy as np\n'), ((4412, 4427), 'numpy.sin', 'np.sin', (['dec_rad'], {}), '(dec_rad)\n', (4418, 4427), True, 'import numpy as np\n'), ((1398, 1408), 'numpy.sin', 'np.sin', (['da'], {}), '(da)\n', (1404, 1408), True, 'import numpy as np\n'), ((2305, 2327), 'numpy.sin', 'np.sin', (['(2 * day_number)'], {}), '(2 * day_number)\n', (2311, 2327), True, 'import numpy as np\n'), ((4245, 4260), 'numpy.sin', 'np.sin', (['lat_rad'], {}), '(lat_rad)\n', (4251, 4260), True, 'import numpy as np\n'), ((4263, 4277), 'numpy.cos', 'np.cos', (['ha_rad'], {}), '(ha_rad)\n', (4269, 4277), True, 'import numpy as np\n'), ((4341, 4356), 'numpy.cos', 'np.cos', (['lat_rad'], {}), '(lat_rad)\n', (4347, 4356), True, 'import numpy as np\n'), ((4359, 4373), 'numpy.cos', 'np.cos', (['ha_rad'], {}), '(ha_rad)\n', (4365, 4373), True, 'import numpy as np\n'), ((1374, 1384), 'numpy.cos', 'np.cos', (['da'], {}), '(da)\n', (1380, 1384), True, 'import numpy as np\n'), ((2267, 2285), 'numpy.cos', 'np.cos', (['day_number'], {}), '(day_number)\n', (2273, 2285), True, 'import numpy as np\n'), ((2237, 2255), 'numpy.sin', 'np.sin', (['day_number'], {}), '(day_number)\n', (2243, 2255), True, 'import numpy as np\n')]
''' This code is part of QuTIpy. (c) Copyright <NAME>, 2021 This code is licensed under the Apache License, Version 2.0. You may obtain a copy of this license in the LICENSE.txt file in the root directory of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. Any modifications or derivative works of this code must retain this copyright notice, and modified files need to carry a notice indicating that they have been altered from the originals. ''' import numpy as np from qutipy.general_functions import tensor def Natural_representation(K): ''' Calculates the natural representation of the channel (in the standard basis) given by the Kraus operators in K. In terms of the Kraus operators, the natural representation of the channel in the standard basis is given by N=sum_i K_i ⊗ conj(K_i), where the sum is over the Kraus operators K_i in K. ''' return np.sum([tensor(k,np.conjugate(k)) for k in K],1)
[ "numpy.conjugate" ]
[((933, 948), 'numpy.conjugate', 'np.conjugate', (['k'], {}), '(k)\n', (945, 948), True, 'import numpy as np\n')]
import sys import numpy as np import pandas as pd import hypercomparison.utils import hypercomparison.networks import hypercomparison.correlation_and_greedy_routing from rpy2.robjects.packages import importr hydra = importr('hydra') import rpy2.robjects.numpy2ri rpy2.robjects.numpy2ri.activate() logger = hypercomparison.utils.get_logger(__name__) network_name = sys.argv[1] dimensions = int(sys.argv[2]) out_path = sys.argv[-1] result_list = [] network = hypercomparison.networks.RealNetwork(network_name) network.index_nodes() network.generate_shortest_path_length_matrix() if dimensions > len(network.G.nodes): dimensions = len(network.G.nodes) pos = hydra.hydra(network.shortest_path_length_matrix, dimensions) directional = np.array(pos.rx2('directional')) r = np.array(pos.rx2('r')) temp_pos = [list(directional[i]*r[i]) for i in range(len(network.G.nodes))] temp_pos = np.array(temp_pos) embeddings = {str(network.id2node[i]): temp_pos[i] for i in range(len(network.id2node))} logger.info("Working on network {} dimension {}".format(network_name, dimensions)) all_tasks = hypercomparison.correlation_and_greedy_routing.AllTasks(network, embeddings, distance_func='poincare') rate, length, efficiency, stretch, score = all_tasks.greedy_routing() pearson_correlation, _, spearman_correlation, _ = all_tasks.calculate_correlation() absolute_error, relative_error = all_tasks.calculate_error() result_list.append([ network_name, dimensions, rate, length, efficiency, stretch, score, pearson_correlation, spearman_correlation, absolute_error, relative_error]) df = pd.DataFrame(result_list, columns=[ 'network_name', 'dimensions', 'gr_rate', 'gr_length', 'gr_efficiency', 'gr_stretch', 'gr_score', 'pearson_correlation', 'spearman_correlation', 'absolute_error', 'relative_error' ]) df.to_csv(out_path, index=None)
[ "numpy.array", "rpy2.robjects.packages.importr", "pandas.DataFrame" ]
[((217, 233), 'rpy2.robjects.packages.importr', 'importr', (['"""hydra"""'], {}), "('hydra')\n", (224, 233), False, 'from rpy2.robjects.packages import importr\n'), ((888, 906), 'numpy.array', 'np.array', (['temp_pos'], {}), '(temp_pos)\n', (896, 906), True, 'import numpy as np\n'), ((1590, 1817), 'pandas.DataFrame', 'pd.DataFrame', (['result_list'], {'columns': "['network_name', 'dimensions', 'gr_rate', 'gr_length', 'gr_efficiency',\n 'gr_stretch', 'gr_score', 'pearson_correlation', 'spearman_correlation',\n 'absolute_error', 'relative_error']"}), "(result_list, columns=['network_name', 'dimensions', 'gr_rate',\n 'gr_length', 'gr_efficiency', 'gr_stretch', 'gr_score',\n 'pearson_correlation', 'spearman_correlation', 'absolute_error',\n 'relative_error'])\n", (1602, 1817), True, 'import pandas as pd\n')]
import numpy as np from scipy import linalg from numpy import matmul import time import torch def LU_solver(A,b): P,L,U = linalg.lu(A) y = linalg.solve(L,matmul(P,b)) x = linalg.solve(U,y) return x def Simulation_LU_solver(A,b,x): P,L,U = linalg.lu(A) y = linalg.solve(L,matmul(P,b)) x = linalg.solve(U,y) if __name__ == "__main__": device = 'cuda' A = np.array([[17,24,1,8,15],[23,5,7,14,16],[4,6,13,20,22],[10,12,19,21,3],[11,18,25,2,9]]) b = 65*np.ones((5,1)) timer1 = time.perf_counter() for i in range(10): x = LU_solver(A,b) timer2 = time.perf_counter() print(timer2-timer1) A= torch.tensor(A,device=device) b = torch.tensor(b,device=device) timer1 = time.perf_counter() for i in range(10): x = torch.linalg.solve(A,b) timer2 = time.perf_counter() print(timer2-timer1)
[ "scipy.linalg.solve", "time.perf_counter", "scipy.linalg.lu", "numpy.ones", "torch.linalg.solve", "numpy.array", "numpy.matmul", "torch.tensor" ]
[((127, 139), 'scipy.linalg.lu', 'linalg.lu', (['A'], {}), '(A)\n', (136, 139), False, 'from scipy import linalg\n'), ((184, 202), 'scipy.linalg.solve', 'linalg.solve', (['U', 'y'], {}), '(U, y)\n', (196, 202), False, 'from scipy import linalg\n'), ((261, 273), 'scipy.linalg.lu', 'linalg.lu', (['A'], {}), '(A)\n', (270, 273), False, 'from scipy import linalg\n'), ((318, 336), 'scipy.linalg.solve', 'linalg.solve', (['U', 'y'], {}), '(U, y)\n', (330, 336), False, 'from scipy import linalg\n'), ((398, 514), 'numpy.array', 'np.array', (['[[17, 24, 1, 8, 15], [23, 5, 7, 14, 16], [4, 6, 13, 20, 22], [10, 12, 19, \n 21, 3], [11, 18, 25, 2, 9]]'], {}), '([[17, 24, 1, 8, 15], [23, 5, 7, 14, 16], [4, 6, 13, 20, 22], [10, \n 12, 19, 21, 3], [11, 18, 25, 2, 9]])\n', (406, 514), True, 'import numpy as np\n'), ((526, 545), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (543, 545), False, 'import time\n'), ((611, 630), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (628, 630), False, 'import time\n'), ((664, 694), 'torch.tensor', 'torch.tensor', (['A'], {'device': 'device'}), '(A, device=device)\n', (676, 694), False, 'import torch\n'), ((702, 732), 'torch.tensor', 'torch.tensor', (['b'], {'device': 'device'}), '(b, device=device)\n', (714, 732), False, 'import torch\n'), ((747, 766), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (764, 766), False, 'import time\n'), ((841, 860), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (858, 860), False, 'import time\n'), ((163, 175), 'numpy.matmul', 'matmul', (['P', 'b'], {}), '(P, b)\n', (169, 175), False, 'from numpy import matmul\n'), ((297, 309), 'numpy.matmul', 'matmul', (['P', 'b'], {}), '(P, b)\n', (303, 309), False, 'from numpy import matmul\n'), ((497, 512), 'numpy.ones', 'np.ones', (['(5, 1)'], {}), '((5, 1))\n', (504, 512), True, 'import numpy as np\n'), ((804, 828), 'torch.linalg.solve', 'torch.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (822, 828), False, 'import torch\n')]
import numpy as np import matplotlib.pyplot as plt datapath = 'C:\\Users\\arvid\\Documents\\KTH\\Masterkurser\\Deep Learning\\Assignments\\Assignment 1\\' np.random.seed(137) def unpickle(file): import pickle with open(file, 'rb') as fo: dict = pickle.load(fo, encoding='bytes') return dict def one_hot_encoding(labels): t = np.concatenate((np.arange(len(labels)).reshape(len(labels),1),np.array(labels).reshape(len(labels),1)),axis=1) one_hot = np.zeros((len(labels),10)) one_hot[t[:,0],t[:,1]] = 1 return one_hot.T def load_batch(file): data_as_dict = unpickle(file) img_data = data_as_dict[b'data'] labels = data_as_dict[b'labels'] labels_encoded = one_hot_encoding(labels) return img_data, labels, labels_encoded def preprocess(tr_data,val_data,test_data): tr_mean = np.mean(tr_data,axis=0) tr_std = np.std(tr_data,axis=0) tr_data = (tr_data - tr_mean)/tr_std val_data = (val_data - tr_mean)/tr_std test_data = (test_data - tr_mean)/tr_std return tr_data.T, val_data.T, test_data.T def initializeWeights(): W = np.random.normal(0,0.01,size=3072*10).reshape(10,3072) b = np.random.normal(0,0.01,size=10).reshape(10,1) return W,b def softMax(x): return np.exp(x)/np.sum(np.exp(x),axis=0) def evaluateClassifier(X,W,b): s = W@X+b return softMax(s) def computeCost(X,Y,W,b,Lambda): p = evaluateClassifier(X,W,b) J = Lambda*np.sum(W**2)-1/(np.shape(X)[1])*np.sum(np.log(np.diag(Y.T@p))) return J def computeAccuracy(X,y,W,b): p = evaluateClassifier(X,W,b) kstar = np.argmax(p,axis=0) acc = np.sum(kstar==y)/len(y) return acc def computeGradient(X,Y,P,W,Lambda): n = np.shape(X)[1] g = -(Y-P) dJdW = [email protected]/n+2*Lambda*W dJdb = [email protected]((n,1))/n return [dJdW, dJdb] def ComputeGradsNumSlow(X, Y, W, b, Lambda, h): K = W.shape[0] grad_W = np.zeros(W.shape) grad_b = np.zeros((K, 1)) for i in range(b.shape[0]): b_try = b.copy() b_try[i] -= h c1 = computeCost(X, Y, W, b_try, Lambda) b_try = b.copy() b_try[i] += h c2 = computeCost(X, Y, W, b_try, Lambda) grad_b[i] = (c2 - c1) / (2*h) for i in range(W.shape[0]): for j in range(W.shape[1]): W_try = W.copy() W_try[i, j] -= h c1 = computeCost(X, Y, W_try, b, Lambda) W_try = W.copy() W_try[i, j] += h c2 = computeCost(X, Y, W_try, b, Lambda) grad_W[i, j] = (c2 - c1) / (2*h) return grad_W, grad_b def compute_relative_error(grad_num, grad_anal, eps): error = np.abs(grad_num - grad_anal)/np.maximum(eps, np.abs(grad_num) + np.abs(grad_anal)) return error def acceptable_ratio(error,tolerance): return 100*np.sum(np.array(error)<tolerance)/np.size(error) def compare_gradients(X_train,Y_train,Lambda,num_dim,num_samples): tolerances = [1e-1,1e-3,1e-4,1e-5,1e-6,1e-7,1e-8,1e-9] W,b = initializeWeights() P = evaluateClassifier(X_train[0:num_dim,0:num_samples],W[:,0:num_dim],b) grad_num = computeGradient(X_train[0:num_dim,0:num_samples],Y_train[:,0:num_samples],P,W[:,0:num_dim],Lambda) grad_anal = ComputeGradsNumSlow(X_train[0:num_dim,0:num_samples], Y_train[:,0:num_samples], W[:,0:num_dim], b, Lambda, 1e-6) error_W = compute_relative_error(grad_num[0], grad_anal[0], 1e-8) error_b = compute_relative_error(grad_num[1], grad_anal[1], 1e-8) ratios_W = [] ratios_b = [] for tol in tolerances: ratios_W.append(acceptable_ratio(error_W,tol)) ratios_b.append(acceptable_ratio(error_b,tol)) return ratios_W,ratios_b X_train, y_train, Y_train = load_batch(datapath+'data_batch_1') X_val, y_val,Y_val = load_batch(datapath+'data_batch_2') X_test, y_test, Y_test= load_batch(datapath+'data_batch_3') X_train,X_val,X_test=preprocess(X_train,X_val,X_test) Lambda = 0.1 ratios_w,ratios_b=compare_gradients(X_train,Y_train,Lambda,20,10) tolerances = [1e-1,1e-3,1e-4,1e-5,1e-6,1e-7,1e-8,1e-9] plt.semilogx(tolerances,ratios_w,'-o',tolerances,ratios_b,'-o') plt.legend(["Ratio in W","Ratio in b"]) plt.ylabel('Ratio of correct analytical gradients') plt.xlabel('Tolerance') ax = plt.gca() ax.set_xlim(ax.get_xlim()[::-1]) plt.show()
[ "numpy.random.seed", "numpy.sum", "numpy.abs", "numpy.argmax", "numpy.ones", "numpy.shape", "numpy.mean", "pickle.load", "numpy.exp", "numpy.random.normal", "matplotlib.pyplot.gca", "numpy.diag", "numpy.std", "numpy.size", "matplotlib.pyplot.show", "matplotlib.pyplot.legend", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.semilogx", "numpy.zeros", "numpy.array", "matplotlib.pyplot.xlabel" ]
[((158, 177), 'numpy.random.seed', 'np.random.seed', (['(137)'], {}), '(137)\n', (172, 177), True, 'import numpy as np\n'), ((3903, 3971), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['tolerances', 'ratios_w', '"""-o"""', 'tolerances', 'ratios_b', '"""-o"""'], {}), "(tolerances, ratios_w, '-o', tolerances, ratios_b, '-o')\n", (3915, 3971), True, 'import matplotlib.pyplot as plt\n'), ((3967, 4007), 'matplotlib.pyplot.legend', 'plt.legend', (["['Ratio in W', 'Ratio in b']"], {}), "(['Ratio in W', 'Ratio in b'])\n", (3977, 4007), True, 'import matplotlib.pyplot as plt\n'), ((4007, 4058), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Ratio of correct analytical gradients"""'], {}), "('Ratio of correct analytical gradients')\n", (4017, 4058), True, 'import matplotlib.pyplot as plt\n'), ((4059, 4082), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tolerance"""'], {}), "('Tolerance')\n", (4069, 4082), True, 'import matplotlib.pyplot as plt\n'), ((4088, 4097), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4095, 4097), True, 'import matplotlib.pyplot as plt\n'), ((4131, 4141), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4139, 4141), True, 'import matplotlib.pyplot as plt\n'), ((807, 831), 'numpy.mean', 'np.mean', (['tr_data'], {'axis': '(0)'}), '(tr_data, axis=0)\n', (814, 831), True, 'import numpy as np\n'), ((841, 864), 'numpy.std', 'np.std', (['tr_data'], {'axis': '(0)'}), '(tr_data, axis=0)\n', (847, 864), True, 'import numpy as np\n'), ((1523, 1543), 'numpy.argmax', 'np.argmax', (['p'], {'axis': '(0)'}), '(p, axis=0)\n', (1532, 1543), True, 'import numpy as np\n'), ((1813, 1830), 'numpy.zeros', 'np.zeros', (['W.shape'], {}), '(W.shape)\n', (1821, 1830), True, 'import numpy as np\n'), ((1844, 1860), 'numpy.zeros', 'np.zeros', (['(K, 1)'], {}), '((K, 1))\n', (1852, 1860), True, 'import numpy as np\n'), ((264, 297), 'pickle.load', 'pickle.load', (['fo'], {'encoding': '"""bytes"""'}), "(fo, encoding='bytes')\n", (275, 297), False, 'import pickle\n'), ((1205, 1214), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1211, 1214), True, 'import numpy as np\n'), ((1550, 1568), 'numpy.sum', 'np.sum', (['(kstar == y)'], {}), '(kstar == y)\n', (1556, 1568), True, 'import numpy as np\n'), ((1630, 1641), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (1638, 1641), True, 'import numpy as np\n'), ((2553, 2581), 'numpy.abs', 'np.abs', (['(grad_num - grad_anal)'], {}), '(grad_num - grad_anal)\n', (2559, 2581), True, 'import numpy as np\n'), ((2739, 2753), 'numpy.size', 'np.size', (['error'], {}), '(error)\n', (2746, 2753), True, 'import numpy as np\n'), ((1060, 1101), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.01)'], {'size': '(3072 * 10)'}), '(0, 0.01, size=3072 * 10)\n', (1076, 1101), True, 'import numpy as np\n'), ((1120, 1154), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.01)'], {'size': '(10)'}), '(0, 0.01, size=10)\n', (1136, 1154), True, 'import numpy as np\n'), ((1222, 1231), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1228, 1231), True, 'import numpy as np\n'), ((1379, 1393), 'numpy.sum', 'np.sum', (['(W ** 2)'], {}), '(W ** 2)\n', (1385, 1393), True, 'import numpy as np\n'), ((1694, 1709), 'numpy.ones', 'np.ones', (['(n, 1)'], {}), '((n, 1))\n', (1701, 1709), True, 'import numpy as np\n'), ((2598, 2614), 'numpy.abs', 'np.abs', (['grad_num'], {}), '(grad_num)\n', (2604, 2614), True, 'import numpy as np\n'), ((2617, 2634), 'numpy.abs', 'np.abs', (['grad_anal'], {}), '(grad_anal)\n', (2623, 2634), True, 'import numpy as np\n'), ((412, 428), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (420, 428), True, 'import numpy as np\n'), ((1395, 1406), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (1403, 1406), True, 'import numpy as np\n'), ((1425, 1441), 'numpy.diag', 'np.diag', (['(Y.T @ p)'], {}), '(Y.T @ p)\n', (1432, 1441), True, 'import numpy as np\n'), ((2712, 2727), 'numpy.array', 'np.array', (['error'], {}), '(error)\n', (2720, 2727), True, 'import numpy as np\n')]
''' description: FRBCat functionality for pyAccess license: APACHE 2.0 author: <NAME>, NLeSC (<EMAIL>) ''' import pymysql.cursors import pandas as pd from pyAccess import dbase as dbase from pyAccess import utils as utils import os import sys from numpy import append as npappend from numpy import array as nparray from pyAccess import dbase from numpy import where as npwhere from numpy import ravel as npravel import voeventparse as vp import datetime import re class FRBCat_add: def __init__(self, connection, cursor, mapping): self.connection = connection self.cursor = cursor self.mapping = mapping def author_exists(self, ivorn): ''' Check if author already exists in database if author is found, set self.author_id ''' # check if the author ivorn is already in the database author_id = dbase.extract_from_db_sql(self.cursor, 'authors', 'id', 'ivorn', ivorn) if not author_id: # did not find the author ivorn return False else: # set self.author_id to the one in the database self.author_id = author_id['id'] return True def event_exists(self, ivorn): ''' Check if event ivorn already exists in database if event is found, set self.event_id ''' # check if the event ivorn is already in the database event_id = dbase.extract_from_db_sql( self.cursor, 'radio_measured_params', 'id', 'voevent_ivorn', ivorn) if not event_id: # did not find the event ivorn return False else: # set self.event_id to the id of the ivorn in the database self.event_id = event_id['id'] return True def add_authors(self, table, rows, value): ''' Add author to the database if the ivorn is not in the authors table ''' # check if author already exists in database # TODO: try/except ivorn = value[npwhere(rows == 'ivorn')][0] author_exists = self.author_exists(ivorn) # add author to database if author does not yet exist in db if not author_exists: self.author_id = self.insert_into_database(table, rows, value) def add_frbs(self, table, rows, value): ''' Add event to the frbs table ''' rows = npappend(rows, 'author_id') value = npappend(value, self.author_id) self.frb_id = self.insert_into_database(table, rows, value) def add_frbs_notes(self, table, rows, value): ''' Add event to the frbs_notes table ''' rows = npappend(rows, ('frb_id')) value = npappend(value, (self.frb_id)) frb_notes_id = self.insert_into_database(table, rows, value) def add_frbs_have_publications(self, table, rows, value): ''' Add event to the frbs_have_publications table ''' rows = npappend(rows, ('frb_id', 'pub_id')) value = npappend(value, (self.frb_id, self.pub_id)) self.insert_into_database(table, rows, value) def add_observations(self, table, rows, value): ''' Add event to the observations table ''' rows = npappend(rows, ('frb_id', 'author_id')) value = npappend(value, (self.frb_id, self.author_id)) self.obs_id = self.insert_into_database(table, rows, value) def add_observations_notes(self, table, rows, value): ''' Add event to the observations_notes table ''' rows = npappend(rows, ('obs_id')) value = npappend(value, (self.obs_id)) obs_notes_id = self.insert_into_database(table, rows, value) def add_observations_have_publications(self, table, rows, value): ''' Add event to the observations_have_publications table ''' rows = npappend(rows, ('obs_id', 'pub_id')) value = npappend(value, (self.obs_id, self.pub_id)) self.insert_into_database(table, rows, value) def add_radio_observations_params(self, table, rows, value): ''' Add event to the radio_observations_params table ''' rows = npappend(rows, ('obs_id', 'author_id')) value = npappend(value, (self.obs_id, self.author_id)) self.rop_id = self.insert_into_database(table, rows, value) def add_radio_observations_params_notes(self, table, rows, value): ''' Add event to the radio_observations_params_notes table ''' rows = npappend(rows, ('rop_id')) value = npappend(value, (self.rop_id)) rop_notes_id = self.insert_into_database(table, rows, value) def add_radio_observations_params_have_publications( self, table, rows, value): ''' Add event to the radio_observations_params_have_publications table ''' rows = npappend(rows, ('rop_id', 'pub_id')) value = npappend(value, (self.rop_id, self.pub_id)) self.insert_into_database(table, rows, value) def add_radio_measured_params(self, table, rows, value): ''' Add event to the radio_measured_params table ''' rows = npappend(rows, ('rop_id', 'author_id')) value = npappend(value, (self.rop_id, self.author_id)) ivorn = value[npwhere(rows == 'voevent_ivorn')][0] self.event_exists = self.event_exists(ivorn) # add event to the database if it does not exist yet if not self.event_exists: self.rmp_id = self.insert_into_database(table, rows, value) def add_radio_measured_params_notes(self, table, rows, value): ''' Add event to the radio_measured_params_notes table ''' rows = npappend(rows, ('rmp_id')) value = npappend(value, (self.rmp_id)) rmp_notes_id = self.insert_into_database(table, rows, value) def add_radio_measured_params_have_publications(self, table, rows, value): ''' Add event to the radio_measured_params_have_publications table ''' rows = npappend(rows, ('rmp_id', 'pub_id')) value = npappend(value, (self.rmp_id, self.pub_id)) self.insert_into_database(table, rows, value) def add_publications(self, table, rows, value): ''' Add event to the publications table ''' self.pubid = self.insert_into_database(table, rows, value) def add_radio_images(self, table, rows, value): ''' Add event to the radio_images table ''' self.rid = self.insert_into_database(table, rows, value) def add_radio_images_have_rmp(self, table, rows, value): ''' Add event to the radio_images_have_rmp table ''' rows = npappend(rows, ('radio_image_id', 'rmp_id')) value = npappend(value, (self.rid, self.rmp_id)) self.insert_into_database(table, rows, value) def insert_into_database(self, table, rows, value): row_sql = ', '.join(map(str, rows)) self.cursor.execute("INSERT INTO {} ({}) VALUES {}".format( table, row_sql, tuple(value))) return self.connection.insert_id() # alternatively cursor.lastrowid def add_VOEvent_to_FRBCat(self): ''' Add a VOEvent to the FRBCat database - input: connection: database connection cursor: database cursor object mapping: mapping between database entry and VOEvent value db tables in mapping['FRBCAT TABLE'] db columns in mapping['FRBCAT COLUMN'] db values in mapping['values'] ''' # define database tables in the order they need to be filled tables = ['authors', 'frbs', 'frbs_notes', 'observations', 'observations_notes', 'radio_observations_params', 'radio_observations_params_notes', 'radio_measured_params', 'radio_measured_params_notes'] # loop over defined tables for table in tables: try: del value except NameError: pass # extract the rows from the mapping that are in the table to_add = self.mapping.loc[(self.mapping['FRBCAT TABLE'] == table) & (self.mapping['value'].notnull())] # extract db rows and values to add rows = to_add['FRBCAT COLUMN'].values # loop over extracted rows and insert values for row in rows: # extract value from pandas dataframe try: value value.append(to_add.loc[to_add['FRBCAT COLUMN'] == row][ 'value'].values[0]) except UnboundLocalError: value = [to_add.loc[to_add[ 'FRBCAT COLUMN'] == row]['value'].values[0]] value = npravel(nparray(value)) # convert to numpy array if table == 'authors': self.add_authors(table, rows, value) if table == 'frbs': self.add_frbs(table, rows, value) if table == 'frbs_notes': self.add_frbs_notes(table, rows, value) if table == 'observations': self.add_observations(table, rows, value) if table == 'observations_notes': self.add_observations_notes(table, rows, value) if table == 'radio_observations_params': self.add_radio_observations_params(table, rows, value) if table == 'radio_observations_params_notes': self.add_radio_observations_params_notes(table, rows, value) if table == 'radio_measured_params': self.add_radio_measured_params(table, rows, value) if self.event_exists: break # don't want to add already existing event if table == 'radio_measured_params_notes': self.add_radio_measured_params_notes(table, rows, value) if self.event_exists: # event is already in database, rollback # TODO: is this what we want to do? self.connection.rollback() else: # commit changes to db self.connection.rollback() # TODO: placeholder for next line # dbase.commitToDB(self.connection, self.cursor) dbase.closeDBConnection(self.connection, self.cursor) class FRBCat_decode: def __init__(self, connection, cursor, frbs_id): self.connection = connection self.cursor = cursor self.frbs_id = frbs_id def decode_VOEvent_from_FRBCat(self): ''' Decode a VOEvent from the FRBCat database input: cursor: database cursor object mapping: mapping between database entry and VOEvent xml output: updated mapping with added values column ''' sql = """select *, radio_measured_params_notes.note as rmp_note, radio_observations_params_notes.note as rop_note, observations_notes.note as obs_note, publications.type as pub_type FROM frbs INNER JOIN authors ON frbs.author_id=authors.id INNER JOIN observations ON observations.frb_id=frbs.id LEFT JOIN radio_observations_params ON radio_observations_params.obs_id=observations.id LEFT JOIN radio_measured_params ON radio_measured_params.rop_id=radio_observations_params.id LEFT JOIN radio_measured_params_notes ON radio_measured_params_notes.rmp_id=radio_measured_params.id LEFT JOIN radio_observations_params_notes ON radio_observations_params_notes.rop_id= radio_observations_params.id LEFT JOIN observations_notes ON observations_notes.obs_id=observations.id INNER JOIN frbs_have_publications ON frbs_have_publications.frb_id=frbs.id INNER JOIN publications ON frbs_have_publications.pub_id=publications.id WHERE frbs.id in ({})""".format(self.frbs_id) self.cursor.execute(sql) while True: # extract next event from cursor self.event = self.cursor.fetchone() if not self.event: # no more events to process break # set the xml name try: # more than 1 event for this frb counter += 1 xmlname = self.event['name'] + '_' + str(counter) + '.xml' except NameError: # first event for this frb counter = 0 xmlname = self.event['name'] + '.xml' self.create_xml(xmlname) def create_xml(self, xmlname): ''' create VOEvent xml file from extracted database values ''' # Initialize voevent self.init_voevent() # Define Who details self.set_who() # Define WhereWhen details self.set_wherewhen() # Define How details self.set_how() # Define What section self.set_what() # Define Why section self.set_why() # TODO: add citations (not in frbcat?) self.save_xml(xmlname) def init_voevent(self): ''' Initialize voevent ''' # /begin placeholders stream = 'teststream' stream_id = 1 role = vp.definitions.roles.test # /end placeholders self.v = vp.Voevent(stream=stream, stream_id=stream_id, role=role) # set description TODO, do we have something to put here? # v.Description = def set_who(self): ''' Add who section to voevent object ''' # Set Who.Date timestamp to date of packet-generation # regular expression to remove ivo:// in the beginning of string vp.set_who(self.v, date=datetime.datetime.utcnow(), author_ivorn=re.sub('^ivo://', '', self.event['ivorn'])) # Delete the voevent-parse tag that gets added to the Who skeleton if hasattr(self.v.Who, 'Description'): del self.v.Who.Description # set author self.set_author() def set_author(self): ''' Add author section to voevent object ''' # TODO: placeholder, not in database; one of author details need # to be specified for a valid voevent 2.0 file self.event['contact_name'] = 'placeholder' vp.set_author(self.v, title=self.event['title'], shortName=self.event['short_name'], logoURL=self.event['logo_url'], contactName=self.event['contact_name'], contactEmail=self.event['contact_email'], contactPhone=self.event['contact_phone']) def set_what(self): ''' Add What section to voevent object ''' # Add radio observations params section self.rop_params() # Add radio measured params section self.rmp_params() def set_how(self): ''' Add How section to voevent object ''' # Describe the reference/telescope here # TODO: reference of telescope? vp.add_how(self.v, descriptions=self.event['telescope'], references=vp.Reference("")) def set_wherewhen(self): ''' Add WhereWhen section to voevent object ''' # TODO: add coord system to database? # ra: right ascension; dec: declination; err: error radius vp.add_where_when(self.v, coords=vp.Position2D (ra=utils.dms2decdeg(self.event['raj']), dec=utils.dms2decdeg(self.event['decj']), err=self.event['pointing_error'], units='deg', system=vp.definitions.sky_coord_system.utc_fk5_geo), obs_time=self.event['utc'], observatory_location=self.event['telescope']) def set_why(self): ''' Add Why section to voevent object ''' # Why section (optional) allows for speculation on probable # astrophysical cause if self.event['detected']: vp.add_why(self.v, inferences=vp.Inference(relation='detected', name=self.event['name'])) else: vp.add_why(self.v, inferences=vp.Inference(name=self.event['name'])) def save_xml(self, xmlname): ''' Check the validity of the voevent xml file and save as xmlname ''' # check if the created event is a valid VOEvent v2.0 event if vp.valid_as_v2_0(self.v): # save to VOEvent xml with open(xmlname, 'w') as f: vp.dump(self.v, f, pretty_print=True) def rop_params(self): ''' Add radio observations params section to voevent object ''' # rop params in group 'radio observations params' rop_params = ['backend', 'beam', 'gl', 'gb', 'FWHM', 'sampling_time', 'bandwidth', 'centre_frequency', 'npol', 'channel_bandwidth', 'bits_per_sample', 'gain', 'tsys', 'ne2001_dm_limit', 'rop_note'] rop_param_list = self.createParamList(rop_params) self.v.What.append(vp.Group(params=rop_param_list, name='radio observations params')) def rmp_params(self): ''' Add radio measured params section to voevent object ''' rmp_params = ['dm', 'dm_error', 'snr', 'width', 'width_error_upper', 'width_error_lower', 'flux', 'flux_prefix', 'flux_error_upper', 'flux_error_lower', 'flux_calibrated', 'dm_index', 'dm_index_error', 'scattering_index', 'scattering_index_error', 'scattering_time', 'scattering_time_error', 'linear_poln_frac', 'linear_poln_frac_error', 'circular_poln_frac', 'circular_poln_frac_error', 'spectral_index', 'spectral_index_error', 'z_phot', 'z_phot_error', 'z_spec', 'z_spec_error', 'rank', 'rmp_note'] rmp_param_list = self.createParamList(rmp_params) # rmp params in group 'radio measured params' self.v.What.append(vp.Group(params=rmp_param_list, name='radio measured params')) def createParamList(self, params): ''' ceate a list of params, so these can be written as group ''' # TODO: don't add params with no value for param in params: try: value = self.event[param] except KeyError: # key is not in database raise if value: try: paramList.extend(vp.Param(name=param, value=self.event[param])) except NameError: paramList = [vp.Param(name=param, value=self.event[param])] return paramList def VOEvent_FRBCAT_mapping(new_event=True): ''' Create a dictionary of dicts of VOEvent -> FRBCAT mapping new_event: boolean indicating if event is a new event,default=True ''' # read mapping.txt into a pandas dataframe convert = {0: utils.strip, 1: utils.strip, 2: utils.strip, 3: utils.strip, 4: utils.strip} # location of mapping.txt file mapping = os.path.join(os.path.dirname(sys.modules['pyAccess'].__file__), 'mapping.txt') df = pd.read_table(mapping, sep='/', engine='c', header=0, skiprows=[0], skip_blank_lines=True, skipinitialspace=True, converters=convert).fillna('None') return df
[ "pyAccess.utils.dms2decdeg", "voeventparse.dump", "voeventparse.set_author", "pyAccess.dbase.extract_from_db_sql", "os.path.dirname", "voeventparse.Group", "voeventparse.Inference", "voeventparse.Param", "numpy.append", "datetime.datetime.utcnow", "numpy.where", "numpy.array", "voeventparse.Reference", "voeventparse.Voevent", "voeventparse.valid_as_v2_0", "pandas.read_table", "re.sub", "pyAccess.dbase.closeDBConnection" ]
[((895, 966), 'pyAccess.dbase.extract_from_db_sql', 'dbase.extract_from_db_sql', (['self.cursor', '"""authors"""', '"""id"""', '"""ivorn"""', 'ivorn'], {}), "(self.cursor, 'authors', 'id', 'ivorn', ivorn)\n", (920, 966), False, 'from pyAccess import dbase\n'), ((1471, 1568), 'pyAccess.dbase.extract_from_db_sql', 'dbase.extract_from_db_sql', (['self.cursor', '"""radio_measured_params"""', '"""id"""', '"""voevent_ivorn"""', 'ivorn'], {}), "(self.cursor, 'radio_measured_params', 'id',\n 'voevent_ivorn', ivorn)\n", (1496, 1568), False, 'from pyAccess import dbase\n'), ((2423, 2450), 'numpy.append', 'npappend', (['rows', '"""author_id"""'], {}), "(rows, 'author_id')\n", (2431, 2450), True, 'from numpy import append as npappend\n'), ((2467, 2498), 'numpy.append', 'npappend', (['value', 'self.author_id'], {}), '(value, self.author_id)\n', (2475, 2498), True, 'from numpy import append as npappend\n'), ((2699, 2723), 'numpy.append', 'npappend', (['rows', '"""frb_id"""'], {}), "(rows, 'frb_id')\n", (2707, 2723), True, 'from numpy import append as npappend\n'), ((2742, 2770), 'numpy.append', 'npappend', (['value', 'self.frb_id'], {}), '(value, self.frb_id)\n', (2750, 2770), True, 'from numpy import append as npappend\n'), ((2998, 3034), 'numpy.append', 'npappend', (['rows', "('frb_id', 'pub_id')"], {}), "(rows, ('frb_id', 'pub_id'))\n", (3006, 3034), True, 'from numpy import append as npappend\n'), ((3051, 3094), 'numpy.append', 'npappend', (['value', '(self.frb_id, self.pub_id)'], {}), '(value, (self.frb_id, self.pub_id))\n', (3059, 3094), True, 'from numpy import append as npappend\n'), ((3285, 3324), 'numpy.append', 'npappend', (['rows', "('frb_id', 'author_id')"], {}), "(rows, ('frb_id', 'author_id'))\n", (3293, 3324), True, 'from numpy import append as npappend\n'), ((3341, 3387), 'numpy.append', 'npappend', (['value', '(self.frb_id, self.author_id)'], {}), '(value, (self.frb_id, self.author_id))\n', (3349, 3387), True, 'from numpy import append as npappend\n'), ((3604, 3628), 'numpy.append', 'npappend', (['rows', '"""obs_id"""'], {}), "(rows, 'obs_id')\n", (3612, 3628), True, 'from numpy import append as npappend\n'), ((3647, 3675), 'numpy.append', 'npappend', (['value', 'self.obs_id'], {}), '(value, self.obs_id)\n', (3655, 3675), True, 'from numpy import append as npappend\n'), ((3919, 3955), 'numpy.append', 'npappend', (['rows', "('obs_id', 'pub_id')"], {}), "(rows, ('obs_id', 'pub_id'))\n", (3927, 3955), True, 'from numpy import append as npappend\n'), ((3972, 4015), 'numpy.append', 'npappend', (['value', '(self.obs_id, self.pub_id)'], {}), '(value, (self.obs_id, self.pub_id))\n', (3980, 4015), True, 'from numpy import append as npappend\n'), ((4232, 4271), 'numpy.append', 'npappend', (['rows', "('obs_id', 'author_id')"], {}), "(rows, ('obs_id', 'author_id'))\n", (4240, 4271), True, 'from numpy import append as npappend\n'), ((4288, 4334), 'numpy.append', 'npappend', (['value', '(self.obs_id, self.author_id)'], {}), '(value, (self.obs_id, self.author_id))\n', (4296, 4334), True, 'from numpy import append as npappend\n'), ((4577, 4601), 'numpy.append', 'npappend', (['rows', '"""rop_id"""'], {}), "(rows, 'rop_id')\n", (4585, 4601), True, 'from numpy import append as npappend\n'), ((4620, 4648), 'numpy.append', 'npappend', (['value', 'self.rop_id'], {}), '(value, self.rop_id)\n', (4628, 4648), True, 'from numpy import append as npappend\n'), ((4925, 4961), 'numpy.append', 'npappend', (['rows', "('rop_id', 'pub_id')"], {}), "(rows, ('rop_id', 'pub_id'))\n", (4933, 4961), True, 'from numpy import append as npappend\n'), ((4978, 5021), 'numpy.append', 'npappend', (['value', '(self.rop_id, self.pub_id)'], {}), '(value, (self.rop_id, self.pub_id))\n', (4986, 5021), True, 'from numpy import append as npappend\n'), ((5230, 5269), 'numpy.append', 'npappend', (['rows', "('rop_id', 'author_id')"], {}), "(rows, ('rop_id', 'author_id'))\n", (5238, 5269), True, 'from numpy import append as npappend\n'), ((5286, 5332), 'numpy.append', 'npappend', (['value', '(self.rop_id, self.author_id)'], {}), '(value, (self.rop_id, self.author_id))\n', (5294, 5332), True, 'from numpy import append as npappend\n'), ((5778, 5802), 'numpy.append', 'npappend', (['rows', '"""rmp_id"""'], {}), "(rows, 'rmp_id')\n", (5786, 5802), True, 'from numpy import append as npappend\n'), ((5821, 5849), 'numpy.append', 'npappend', (['value', 'self.rmp_id'], {}), '(value, self.rmp_id)\n', (5829, 5849), True, 'from numpy import append as npappend\n'), ((6111, 6147), 'numpy.append', 'npappend', (['rows', "('rmp_id', 'pub_id')"], {}), "(rows, ('rmp_id', 'pub_id'))\n", (6119, 6147), True, 'from numpy import append as npappend\n'), ((6164, 6207), 'numpy.append', 'npappend', (['value', '(self.rmp_id, self.pub_id)'], {}), '(value, (self.rmp_id, self.pub_id))\n', (6172, 6207), True, 'from numpy import append as npappend\n'), ((6790, 6834), 'numpy.append', 'npappend', (['rows', "('radio_image_id', 'rmp_id')"], {}), "(rows, ('radio_image_id', 'rmp_id'))\n", (6798, 6834), True, 'from numpy import append as npappend\n'), ((6851, 6891), 'numpy.append', 'npappend', (['value', '(self.rid, self.rmp_id)'], {}), '(value, (self.rid, self.rmp_id))\n', (6859, 6891), True, 'from numpy import append as npappend\n'), ((10521, 10574), 'pyAccess.dbase.closeDBConnection', 'dbase.closeDBConnection', (['self.connection', 'self.cursor'], {}), '(self.connection, self.cursor)\n', (10544, 10574), False, 'from pyAccess import dbase\n'), ((13845, 13902), 'voeventparse.Voevent', 'vp.Voevent', ([], {'stream': 'stream', 'stream_id': 'stream_id', 'role': 'role'}), '(stream=stream, stream_id=stream_id, role=role)\n', (13855, 13902), True, 'import voeventparse as vp\n'), ((14847, 15102), 'voeventparse.set_author', 'vp.set_author', (['self.v'], {'title': "self.event['title']", 'shortName': "self.event['short_name']", 'logoURL': "self.event['logo_url']", 'contactName': "self.event['contact_name']", 'contactEmail': "self.event['contact_email']", 'contactPhone': "self.event['contact_phone']"}), "(self.v, title=self.event['title'], shortName=self.event[\n 'short_name'], logoURL=self.event['logo_url'], contactName=self.event[\n 'contact_name'], contactEmail=self.event['contact_email'], contactPhone\n =self.event['contact_phone'])\n", (14860, 15102), True, 'import voeventparse as vp\n'), ((17179, 17203), 'voeventparse.valid_as_v2_0', 'vp.valid_as_v2_0', (['self.v'], {}), '(self.v)\n', (17195, 17203), True, 'import voeventparse as vp\n'), ((20120, 20169), 'os.path.dirname', 'os.path.dirname', (["sys.modules['pyAccess'].__file__"], {}), "(sys.modules['pyAccess'].__file__)\n", (20135, 20169), False, 'import os\n'), ((17865, 17930), 'voeventparse.Group', 'vp.Group', ([], {'params': 'rop_param_list', 'name': '"""radio observations params"""'}), "(params=rop_param_list, name='radio observations params')\n", (17873, 17930), True, 'import voeventparse as vp\n'), ((18948, 19009), 'voeventparse.Group', 'vp.Group', ([], {'params': 'rmp_param_list', 'name': '"""radio measured params"""'}), "(params=rmp_param_list, name='radio measured params')\n", (18956, 19009), True, 'import voeventparse as vp\n'), ((20222, 20359), 'pandas.read_table', 'pd.read_table', (['mapping'], {'sep': '"""/"""', 'engine': '"""c"""', 'header': '(0)', 'skiprows': '[0]', 'skip_blank_lines': '(True)', 'skipinitialspace': '(True)', 'converters': 'convert'}), "(mapping, sep='/', engine='c', header=0, skiprows=[0],\n skip_blank_lines=True, skipinitialspace=True, converters=convert)\n", (20235, 20359), True, 'import pandas as pd\n'), ((2051, 2075), 'numpy.where', 'npwhere', (["(rows == 'ivorn')"], {}), "(rows == 'ivorn')\n", (2058, 2075), True, 'from numpy import where as npwhere\n'), ((5355, 5387), 'numpy.where', 'npwhere', (["(rows == 'voevent_ivorn')"], {}), "(rows == 'voevent_ivorn')\n", (5362, 5387), True, 'from numpy import where as npwhere\n'), ((9033, 9047), 'numpy.array', 'nparray', (['value'], {}), '(value)\n', (9040, 9047), True, 'from numpy import array as nparray\n'), ((14252, 14278), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (14276, 14278), False, 'import datetime\n'), ((14312, 14354), 're.sub', 're.sub', (['"""^ivo://"""', '""""""', "self.event['ivorn']"], {}), "('^ivo://', '', self.event['ivorn'])\n", (14318, 14354), False, 'import re\n'), ((15729, 15745), 'voeventparse.Reference', 'vp.Reference', (['""""""'], {}), "('')\n", (15741, 15745), True, 'import voeventparse as vp\n'), ((17297, 17334), 'voeventparse.dump', 'vp.dump', (['self.v', 'f'], {'pretty_print': '(True)'}), '(self.v, f, pretty_print=True)\n', (17304, 17334), True, 'import voeventparse as vp\n'), ((16747, 16805), 'voeventparse.Inference', 'vp.Inference', ([], {'relation': '"""detected"""', 'name': "self.event['name']"}), "(relation='detected', name=self.event['name'])\n", (16759, 16805), True, 'import voeventparse as vp\n'), ((16933, 16970), 'voeventparse.Inference', 'vp.Inference', ([], {'name': "self.event['name']"}), "(name=self.event['name'])\n", (16945, 16970), True, 'import voeventparse as vp\n'), ((16073, 16108), 'pyAccess.utils.dms2decdeg', 'utils.dms2decdeg', (["self.event['raj']"], {}), "(self.event['raj'])\n", (16089, 16108), True, 'from pyAccess import utils as utils\n'), ((16141, 16177), 'pyAccess.utils.dms2decdeg', 'utils.dms2decdeg', (["self.event['decj']"], {}), "(self.event['decj'])\n", (16157, 16177), True, 'from pyAccess import utils as utils\n'), ((19474, 19519), 'voeventparse.Param', 'vp.Param', ([], {'name': 'param', 'value': 'self.event[param]'}), '(name=param, value=self.event[param])\n', (19482, 19519), True, 'import voeventparse as vp\n'), ((19634, 19679), 'voeventparse.Param', 'vp.Param', ([], {'name': 'param', 'value': 'self.event[param]'}), '(name=param, value=self.event[param])\n', (19642, 19679), True, 'import voeventparse as vp\n')]
from __future__ import division import numpy as np from numpy.testing import assert_equal from pyoperators import ( CompositionOperator, ConstantOperator, Operator, ZeroOperator, flags, rule_manager, O) from pyoperators.utils import ndarraywrap from pyoperators.utils.testing import ( assert_is, assert_is_instance, assert_is_none, assert_same, assert_is_type, skiptest) from .common import OPS, ndarray2, attr2 op = Operator() ops = [_() for _ in OPS] + [_(flags={'linear': False}) for _ in OPS] zeros_left = ( ZeroOperator(classout=ndarray2, attrout=attr2), ZeroOperator(shapein=4, classout=ndarray2, attrout=attr2)) zeros_right = ( ZeroOperator(classout=ndarray2, attrout=attr2), ZeroOperator(classout=ndarray2, attrout=attr2, flags='square'), ZeroOperator(shapein=3, classout=ndarray2, attrout=attr2), ZeroOperator(shapein=3, shapeout=3, classout=ndarray2, attrout=attr2)) def test_zero1(): z = ZeroOperator() o = Operator(shapein=3, shapeout=6) zo = z(o) assert_is_instance(zo, ZeroOperator) assert_equal(zo.shapein, o.shapein) assert_is_none(zo.shapeout) def test_zero2(): z = ZeroOperator(shapein=3, shapeout=6) o = Operator() zo = z(o) assert_is_instance(zo, ZeroOperator) assert_is_none(zo.shapein, 'in') assert_equal(zo.shapeout, z.shapeout, 'out') def test_zero3(): z = ZeroOperator(shapein=3, shapeout=6) o = Operator(flags='square') zo = z*o assert_is_instance(zo, ZeroOperator) assert_equal(zo.shapein, z.shapein, 'in') assert_equal(zo.shapeout, z.shapeout, 'out') def test_zero4(): z = ZeroOperator() o = Operator(flags='linear') assert_is_instance(z*o, ZeroOperator) assert_is_instance(o*z, ZeroOperator) def test_zero5(): z = ZeroOperator() o = Operator(shapein=3, shapeout=6, flags='linear') zo = z*o oz = o*z assert_is_instance(zo, ZeroOperator, 'zo') assert_equal(zo.shapein, o.shapein, 'zo in') assert_is_none(zo.shapeout, 'zo out') assert_is_instance(oz, ZeroOperator, 'oz') assert_is_none(oz.shapein, 'oz, in') assert_equal(oz.shapeout, o.shapeout, 'oz, out') @skiptest def test_zero6(): @flags.linear class Op(Operator): def direct(self, input, output): output[:] = np.concatenate([input, 2*input]) def transpose(self, input, output): output[:] = input[0:output.size] def reshapein(self, shapein): return (2 * shapein[0],) def reshapeout(self, shapeout): return (shapeout[0] // 2,) z = ZeroOperator(flags='square') o = Op() od = o.todense(shapein=4) zo = z * o zod_ref = np.dot(np.zeros((8, 8)), od) assert_same((z * o).todense(shapein=4), zod_ref) oz = o * z ozd_ref = np.dot(od, np.zeros((4, 4))) assert_same((o * z).todense(shapein=4), ozd_ref) assert_same(zo.T.todense(shapein=8), zod_ref.T) assert_same(oz.T.todense(shapein=8), ozd_ref.T) def test_zero7(): z = ZeroOperator() assert_equal(z*z, z) def test_zero8(): class Op(Operator): pass o = Op() assert_is_type(o + O, Op) def test_merge_zero_left(): def func(op1, op2): op = op1(op2) assert_is_instance(op, ZeroOperator) attr = {} attr.update(op2.attrout) attr.update(op1.attrout) assert_equal(op.attrout, attr) x = np.ones(3) y = ndarraywrap(4) op(x, y) y2_tmp = np.empty(4) y2 = np.empty(4) op2(x, y2_tmp) op1(y2_tmp, y2) assert_equal(y, y2) assert_is_instance(y, op1.classout) for op1 in zeros_left: for op2 in ops: yield func, op1, op2 def test_merge_zero_right(): def func(op1, op2): op = op1(op2) attr = {} attr.update(op2.attrout) attr.update(op1.attrout) assert_equal(op.attrout, attr) assert_is(op.classout, op1.classout) if op1.flags.linear: assert_is_type(op, ZeroOperator) assert_same(op.todense(shapein=3, shapeout=4), np.zeros((4, 3))) return if op1.flags.shape_output == 'unconstrained' or \ op1.flags.shape_input != 'explicit' and \ op2.flags.shape_output != 'explicit': assert_is_type(op, CompositionOperator) else: assert_is_type(op, ConstantOperator) if op1.flags.shape_input == 'unconstrained' and \ op2.flags.shape_output == 'unconstrained': return with rule_manager(none=True): op_ref = op1(op2) assert_same(op.todense(shapein=3, shapeout=4), op_ref.todense(shapein=3, shapeout=4)) for op1 in ops: for op2 in zeros_right: yield func, op1, op2
[ "pyoperators.rule_manager", "pyoperators.utils.ndarraywrap", "numpy.concatenate", "pyoperators.utils.testing.assert_is", "numpy.empty", "pyoperators.ZeroOperator", "numpy.zeros", "pyoperators.Operator", "numpy.ones", "pyoperators.utils.testing.assert_is_none", "pyoperators.utils.testing.assert_is_type", "numpy.testing.assert_equal", "pyoperators.utils.testing.assert_is_instance" ]
[((434, 444), 'pyoperators.Operator', 'Operator', ([], {}), '()\n', (442, 444), False, 'from pyoperators import CompositionOperator, ConstantOperator, Operator, ZeroOperator, flags, rule_manager, O\n'), ((533, 579), 'pyoperators.ZeroOperator', 'ZeroOperator', ([], {'classout': 'ndarray2', 'attrout': 'attr2'}), '(classout=ndarray2, attrout=attr2)\n', (545, 579), False, 'from pyoperators import CompositionOperator, ConstantOperator, Operator, ZeroOperator, flags, rule_manager, O\n'), ((585, 642), 'pyoperators.ZeroOperator', 'ZeroOperator', ([], {'shapein': '(4)', 'classout': 'ndarray2', 'attrout': 'attr2'}), '(shapein=4, classout=ndarray2, attrout=attr2)\n', (597, 642), False, 'from pyoperators import CompositionOperator, ConstantOperator, Operator, ZeroOperator, flags, rule_manager, O\n'), ((664, 710), 'pyoperators.ZeroOperator', 'ZeroOperator', ([], {'classout': 'ndarray2', 'attrout': 'attr2'}), '(classout=ndarray2, attrout=attr2)\n', (676, 710), False, 'from pyoperators import CompositionOperator, ConstantOperator, Operator, ZeroOperator, flags, rule_manager, O\n'), ((716, 778), 'pyoperators.ZeroOperator', 'ZeroOperator', ([], {'classout': 'ndarray2', 'attrout': 'attr2', 'flags': '"""square"""'}), "(classout=ndarray2, attrout=attr2, flags='square')\n", (728, 778), False, 'from pyoperators import CompositionOperator, ConstantOperator, Operator, ZeroOperator, flags, rule_manager, O\n'), ((784, 841), 'pyoperators.ZeroOperator', 'ZeroOperator', ([], {'shapein': '(3)', 'classout': 'ndarray2', 'attrout': 'attr2'}), '(shapein=3, classout=ndarray2, attrout=attr2)\n', (796, 841), False, 'from pyoperators import CompositionOperator, ConstantOperator, Operator, ZeroOperator, flags, rule_manager, O\n'), ((847, 916), 'pyoperators.ZeroOperator', 'ZeroOperator', ([], {'shapein': '(3)', 'shapeout': '(3)', 'classout': 'ndarray2', 'attrout': 'attr2'}), '(shapein=3, shapeout=3, classout=ndarray2, attrout=attr2)\n', (859, 916), False, 'from pyoperators import CompositionOperator, ConstantOperator, Operator, ZeroOperator, flags, rule_manager, O\n'), ((946, 960), 'pyoperators.ZeroOperator', 'ZeroOperator', ([], {}), '()\n', (958, 960), False, 'from pyoperators import CompositionOperator, ConstantOperator, Operator, ZeroOperator, flags, rule_manager, O\n'), ((969, 1000), 'pyoperators.Operator', 'Operator', ([], {'shapein': '(3)', 'shapeout': '(6)'}), '(shapein=3, shapeout=6)\n', (977, 1000), False, 'from pyoperators import CompositionOperator, ConstantOperator, Operator, ZeroOperator, flags, rule_manager, O\n'), ((1019, 1055), 'pyoperators.utils.testing.assert_is_instance', 'assert_is_instance', (['zo', 'ZeroOperator'], {}), '(zo, ZeroOperator)\n', (1037, 1055), False, 'from pyoperators.utils.testing import assert_is, assert_is_instance, assert_is_none, assert_same, assert_is_type, skiptest\n'), ((1060, 1095), 'numpy.testing.assert_equal', 'assert_equal', (['zo.shapein', 'o.shapein'], {}), '(zo.shapein, o.shapein)\n', (1072, 1095), False, 'from numpy.testing import assert_equal\n'), ((1100, 1127), 'pyoperators.utils.testing.assert_is_none', 'assert_is_none', (['zo.shapeout'], {}), '(zo.shapeout)\n', (1114, 1127), False, 'from pyoperators.utils.testing import assert_is, assert_is_instance, assert_is_none, assert_same, assert_is_type, skiptest\n'), ((1156, 1191), 'pyoperators.ZeroOperator', 'ZeroOperator', ([], {'shapein': '(3)', 'shapeout': '(6)'}), '(shapein=3, shapeout=6)\n', (1168, 1191), False, 'from pyoperators import CompositionOperator, ConstantOperator, Operator, ZeroOperator, flags, rule_manager, O\n'), ((1200, 1210), 'pyoperators.Operator', 'Operator', ([], {}), '()\n', (1208, 1210), False, 'from pyoperators import CompositionOperator, ConstantOperator, Operator, ZeroOperator, flags, rule_manager, O\n'), ((1229, 1265), 'pyoperators.utils.testing.assert_is_instance', 'assert_is_instance', (['zo', 'ZeroOperator'], {}), '(zo, ZeroOperator)\n', (1247, 1265), False, 'from pyoperators.utils.testing import assert_is, assert_is_instance, assert_is_none, assert_same, assert_is_type, skiptest\n'), ((1270, 1302), 'pyoperators.utils.testing.assert_is_none', 'assert_is_none', (['zo.shapein', '"""in"""'], {}), "(zo.shapein, 'in')\n", (1284, 1302), False, 'from pyoperators.utils.testing import assert_is, assert_is_instance, assert_is_none, assert_same, assert_is_type, skiptest\n'), ((1307, 1351), 'numpy.testing.assert_equal', 'assert_equal', (['zo.shapeout', 'z.shapeout', '"""out"""'], {}), "(zo.shapeout, z.shapeout, 'out')\n", (1319, 1351), False, 'from numpy.testing import assert_equal\n'), ((1380, 1415), 'pyoperators.ZeroOperator', 'ZeroOperator', ([], {'shapein': '(3)', 'shapeout': '(6)'}), '(shapein=3, shapeout=6)\n', (1392, 1415), False, 'from pyoperators import CompositionOperator, ConstantOperator, Operator, ZeroOperator, flags, rule_manager, O\n'), ((1424, 1448), 'pyoperators.Operator', 'Operator', ([], {'flags': '"""square"""'}), "(flags='square')\n", (1432, 1448), False, 'from pyoperators import CompositionOperator, ConstantOperator, Operator, ZeroOperator, flags, rule_manager, O\n'), ((1466, 1502), 'pyoperators.utils.testing.assert_is_instance', 'assert_is_instance', (['zo', 'ZeroOperator'], {}), '(zo, ZeroOperator)\n', (1484, 1502), False, 'from pyoperators.utils.testing import assert_is, assert_is_instance, assert_is_none, assert_same, assert_is_type, skiptest\n'), ((1507, 1548), 'numpy.testing.assert_equal', 'assert_equal', (['zo.shapein', 'z.shapein', '"""in"""'], {}), "(zo.shapein, z.shapein, 'in')\n", (1519, 1548), False, 'from numpy.testing import assert_equal\n'), ((1553, 1597), 'numpy.testing.assert_equal', 'assert_equal', (['zo.shapeout', 'z.shapeout', '"""out"""'], {}), "(zo.shapeout, z.shapeout, 'out')\n", (1565, 1597), False, 'from numpy.testing import assert_equal\n'), ((1626, 1640), 'pyoperators.ZeroOperator', 'ZeroOperator', ([], {}), '()\n', (1638, 1640), False, 'from pyoperators import CompositionOperator, ConstantOperator, Operator, ZeroOperator, flags, rule_manager, O\n'), ((1649, 1673), 'pyoperators.Operator', 'Operator', ([], {'flags': '"""linear"""'}), "(flags='linear')\n", (1657, 1673), False, 'from pyoperators import CompositionOperator, ConstantOperator, Operator, ZeroOperator, flags, rule_manager, O\n'), ((1678, 1717), 'pyoperators.utils.testing.assert_is_instance', 'assert_is_instance', (['(z * o)', 'ZeroOperator'], {}), '(z * o, ZeroOperator)\n', (1696, 1717), False, 'from pyoperators.utils.testing import assert_is, assert_is_instance, assert_is_none, assert_same, assert_is_type, skiptest\n'), ((1720, 1759), 'pyoperators.utils.testing.assert_is_instance', 'assert_is_instance', (['(o * z)', 'ZeroOperator'], {}), '(o * z, ZeroOperator)\n', (1738, 1759), False, 'from pyoperators.utils.testing import assert_is, assert_is_instance, assert_is_none, assert_same, assert_is_type, skiptest\n'), ((1786, 1800), 'pyoperators.ZeroOperator', 'ZeroOperator', ([], {}), '()\n', (1798, 1800), False, 'from pyoperators import CompositionOperator, ConstantOperator, Operator, ZeroOperator, flags, rule_manager, O\n'), ((1809, 1856), 'pyoperators.Operator', 'Operator', ([], {'shapein': '(3)', 'shapeout': '(6)', 'flags': '"""linear"""'}), "(shapein=3, shapeout=6, flags='linear')\n", (1817, 1856), False, 'from pyoperators import CompositionOperator, ConstantOperator, Operator, ZeroOperator, flags, rule_manager, O\n'), ((1887, 1929), 'pyoperators.utils.testing.assert_is_instance', 'assert_is_instance', (['zo', 'ZeroOperator', '"""zo"""'], {}), "(zo, ZeroOperator, 'zo')\n", (1905, 1929), False, 'from pyoperators.utils.testing import assert_is, assert_is_instance, assert_is_none, assert_same, assert_is_type, skiptest\n'), ((1934, 1978), 'numpy.testing.assert_equal', 'assert_equal', (['zo.shapein', 'o.shapein', '"""zo in"""'], {}), "(zo.shapein, o.shapein, 'zo in')\n", (1946, 1978), False, 'from numpy.testing import assert_equal\n'), ((1983, 2020), 'pyoperators.utils.testing.assert_is_none', 'assert_is_none', (['zo.shapeout', '"""zo out"""'], {}), "(zo.shapeout, 'zo out')\n", (1997, 2020), False, 'from pyoperators.utils.testing import assert_is, assert_is_instance, assert_is_none, assert_same, assert_is_type, skiptest\n'), ((2025, 2067), 'pyoperators.utils.testing.assert_is_instance', 'assert_is_instance', (['oz', 'ZeroOperator', '"""oz"""'], {}), "(oz, ZeroOperator, 'oz')\n", (2043, 2067), False, 'from pyoperators.utils.testing import assert_is, assert_is_instance, assert_is_none, assert_same, assert_is_type, skiptest\n'), ((2072, 2108), 'pyoperators.utils.testing.assert_is_none', 'assert_is_none', (['oz.shapein', '"""oz, in"""'], {}), "(oz.shapein, 'oz, in')\n", (2086, 2108), False, 'from pyoperators.utils.testing import assert_is, assert_is_instance, assert_is_none, assert_same, assert_is_type, skiptest\n'), ((2113, 2161), 'numpy.testing.assert_equal', 'assert_equal', (['oz.shapeout', 'o.shapeout', '"""oz, out"""'], {}), "(oz.shapeout, o.shapeout, 'oz, out')\n", (2125, 2161), False, 'from numpy.testing import assert_equal\n'), ((2586, 2614), 'pyoperators.ZeroOperator', 'ZeroOperator', ([], {'flags': '"""square"""'}), "(flags='square')\n", (2598, 2614), False, 'from pyoperators import CompositionOperator, ConstantOperator, Operator, ZeroOperator, flags, rule_manager, O\n'), ((3012, 3026), 'pyoperators.ZeroOperator', 'ZeroOperator', ([], {}), '()\n', (3024, 3026), False, 'from pyoperators import CompositionOperator, ConstantOperator, Operator, ZeroOperator, flags, rule_manager, O\n'), ((3031, 3053), 'numpy.testing.assert_equal', 'assert_equal', (['(z * z)', 'z'], {}), '(z * z, z)\n', (3043, 3053), False, 'from numpy.testing import assert_equal\n'), ((3126, 3151), 'pyoperators.utils.testing.assert_is_type', 'assert_is_type', (['(o + O)', 'Op'], {}), '(o + O, Op)\n', (3140, 3151), False, 'from pyoperators.utils.testing import assert_is, assert_is_instance, assert_is_none, assert_same, assert_is_type, skiptest\n'), ((2694, 2710), 'numpy.zeros', 'np.zeros', (['(8, 8)'], {}), '((8, 8))\n', (2702, 2710), True, 'import numpy as np\n'), ((2809, 2825), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (2817, 2825), True, 'import numpy as np\n'), ((3236, 3272), 'pyoperators.utils.testing.assert_is_instance', 'assert_is_instance', (['op', 'ZeroOperator'], {}), '(op, ZeroOperator)\n', (3254, 3272), False, 'from pyoperators.utils.testing import assert_is, assert_is_instance, assert_is_none, assert_same, assert_is_type, skiptest\n'), ((3365, 3395), 'numpy.testing.assert_equal', 'assert_equal', (['op.attrout', 'attr'], {}), '(op.attrout, attr)\n', (3377, 3395), False, 'from numpy.testing import assert_equal\n'), ((3408, 3418), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (3415, 3418), True, 'import numpy as np\n'), ((3431, 3445), 'pyoperators.utils.ndarraywrap', 'ndarraywrap', (['(4)'], {}), '(4)\n', (3442, 3445), False, 'from pyoperators.utils import ndarraywrap\n'), ((3480, 3491), 'numpy.empty', 'np.empty', (['(4)'], {}), '(4)\n', (3488, 3491), True, 'import numpy as np\n'), ((3505, 3516), 'numpy.empty', 'np.empty', (['(4)'], {}), '(4)\n', (3513, 3516), True, 'import numpy as np\n'), ((3572, 3591), 'numpy.testing.assert_equal', 'assert_equal', (['y', 'y2'], {}), '(y, y2)\n', (3584, 3591), False, 'from numpy.testing import assert_equal\n'), ((3600, 3635), 'pyoperators.utils.testing.assert_is_instance', 'assert_is_instance', (['y', 'op1.classout'], {}), '(y, op1.classout)\n', (3618, 3635), False, 'from pyoperators.utils.testing import assert_is, assert_is_instance, assert_is_none, assert_same, assert_is_type, skiptest\n'), ((3889, 3919), 'numpy.testing.assert_equal', 'assert_equal', (['op.attrout', 'attr'], {}), '(op.attrout, attr)\n', (3901, 3919), False, 'from numpy.testing import assert_equal\n'), ((3928, 3964), 'pyoperators.utils.testing.assert_is', 'assert_is', (['op.classout', 'op1.classout'], {}), '(op.classout, op1.classout)\n', (3937, 3964), False, 'from pyoperators.utils.testing import assert_is, assert_is_instance, assert_is_none, assert_same, assert_is_type, skiptest\n'), ((2299, 2333), 'numpy.concatenate', 'np.concatenate', (['[input, 2 * input]'], {}), '([input, 2 * input])\n', (2313, 2333), True, 'import numpy as np\n'), ((4006, 4038), 'pyoperators.utils.testing.assert_is_type', 'assert_is_type', (['op', 'ZeroOperator'], {}), '(op, ZeroOperator)\n', (4020, 4038), False, 'from pyoperators.utils.testing import assert_is, assert_is_instance, assert_is_none, assert_same, assert_is_type, skiptest\n'), ((4307, 4346), 'pyoperators.utils.testing.assert_is_type', 'assert_is_type', (['op', 'CompositionOperator'], {}), '(op, CompositionOperator)\n', (4321, 4346), False, 'from pyoperators.utils.testing import assert_is, assert_is_instance, assert_is_none, assert_same, assert_is_type, skiptest\n'), ((4373, 4409), 'pyoperators.utils.testing.assert_is_type', 'assert_is_type', (['op', 'ConstantOperator'], {}), '(op, ConstantOperator)\n', (4387, 4409), False, 'from pyoperators.utils.testing import assert_is, assert_is_instance, assert_is_none, assert_same, assert_is_type, skiptest\n'), ((4555, 4578), 'pyoperators.rule_manager', 'rule_manager', ([], {'none': '(True)'}), '(none=True)\n', (4567, 4578), False, 'from pyoperators import CompositionOperator, ConstantOperator, Operator, ZeroOperator, flags, rule_manager, O\n'), ((4098, 4114), 'numpy.zeros', 'np.zeros', (['(4, 3)'], {}), '((4, 3))\n', (4106, 4114), True, 'import numpy as np\n')]
"""Exposes functions to estimate population size given a list of samples.""" from collections import Counter import numpy as np def _calculate_error(estimate, num_entities, sample_sizes): """Calculates the error of a population estimate given the number of entities observed and the sizes of samples taken without replacement. Parameters ---------- estimate: int The estimate of population size to be evaluated. num_entities: int The number of distinct entities observed. sample_sizes: list A list of integers indicating the size of each sample taken. Returns ------- float The error incurred by this estimate of population size. """ # Contribution to log sample expectation from each sample log_sample_contributions = [ np.log(estimate - sample_size) - np.log(estimate) for sample_size in sample_sizes ] # Contribution to log sample expectation from estimated population size log_pop_contribution = np.log(estimate) # Expectation of unobserved entity count from sample distribution sample_expectation = np.exp( sum(log_sample_contributions) + log_pop_contribution ) # Expectation of unobserved entity count from counting (i.e., the difference # between the estimated population size and the number of observed entities) count_expectation = estimate - num_entities # Return the difference in expectations as a measure of error return sample_expectation - count_expectation def _recurse_to_best_estimate( lower_bound, upper_bound, num_entities, sample_sizes ): """Recursively finds the best estimate of population size by identifying which half of [lower_bound, upper_bound] contains the best estimate. Parameters ---------- lower_bound: int The lower bound of the interval to be tested; the value of the error function can always be assumed to be positive at this point. upper_bound: int The upper bound of the interval to be tested; the value of the error function can always be assumed to be negative at this point. num_entities: int The number of distinct entities observed. sample_sizes: list A list of integers indicating the size of each sample taken. Returns ------- int The best estimate of population size. """ # Base case - return the upper bound when the upper and lower bounds are # adjacent if upper_bound - lower_bound <= 1: return upper_bound # Otherwise calculate error at midpoint and recursively evaluate the # relevant half of the interval midpoint = int(np.ceil((lower_bound + upper_bound) / 2)) error_at_midpoint = _calculate_error(midpoint, num_entities, sample_sizes) if error_at_midpoint > 0: return _recurse_to_best_estimate( midpoint, upper_bound, num_entities, sample_sizes ) else: return _recurse_to_best_estimate( lower_bound, midpoint, num_entities, sample_sizes ) def _find_best_estimate(num_entities, max_pop_size, sample_sizes): """Finds the best integer estimate of population size in the domain [num_entities, max_pop_size]. The time complexity is O(log(max_pop_size)). NB: this algorithm relies on the following facts: that the error function is decreasing on the entire domain; and that the error function is positive-valued at the domain's lower bound. Parameters ---------- num_entities: int The number of distinct entities observed. max_pop_size: int The maximum allowable population size. sample_sizes: list A list of integers indicating the size of each sample taken. Returns ------- int The best estimate of population size or, barring that, the maximum allowable estimate. """ # Catch cases where maximum allowable estimate is still too low or where # minimum allowable estimate is still too high, and return to save # computation error_at_max = _calculate_error(max_pop_size, num_entities, sample_sizes) if error_at_max > 0: return max_pop_size error_at_min = _calculate_error(num_entities, num_entities, sample_sizes) if error_at_min <= 0: return num_entities # Return the best estimate in the passed domain return _recurse_to_best_estimate( num_entities, max_pop_size, num_entities, sample_sizes ) def _cross_validate_estimate( simulated_population, samples, num_observed, cv_ppn ): """Returns a cross-validated estimate of population size. Parameters ---------- simulated_population: list A simulated population containing all observed entities and a number of simulated entities not in any sample. samples: list A list of lists; each element is a list of entities observed in a particular sample. num_observed: int The number of distinct entities observed in the samples (this is derivable from samples, but it's already calculated in the calling function, so why duplicate effort?). cv_ppn: float The proportion of samples to be used for each cross-validation attempt. Returns ------- int A cross-validated estimate of population size. """ # Select samples to hold as a validation set cv_size = int(np.ceil(cv_ppn * len(samples))) validation_samples = np.random.choice( np.array(samples, dtype=object), cv_size, replace=False ).tolist() # Construct simulated samples identical in size to the holdout sets simulated_entities = set( entity for simulated_sample in [ np.random.choice( simulated_population, len(sample), replace=False ).tolist() for sample in validation_samples ] for entity in simulated_sample ) # Identify entities in samples not held back for cross validation retained_samples = [ sample for sample in samples if sample not in validation_samples ] retained_entities = set( entity for sample in retained_samples for entity in sample ) # Calculate number of "new" entities in held-back and simulated samples, # determine correction factor true_new = num_observed - len(retained_entities) simulated_new = len(simulated_entities.difference(retained_entities)) correction_factor = max(true_new - simulated_new, 0) / max(simulated_new, 1) # Return corrected estimate of population size corrected_estimate_of_new_entities = int(np.ceil( (len(simulated_population) - num_observed) * (1 + correction_factor) )) return num_observed + corrected_estimate_of_new_entities def cuthbert(samples, min_survival=0.01, cv=None, cv_ppn=0.2): """Estimates population size given a collection of samples without replacement, using method proposed in Cuthbert (2009). Parameters ---------- samples: list A list of lists; each element is a list of entities observed in a particular sample. min_survival: float The minimum allowable estimate of the survival rate; implicitly defines the maximum population size that will be returned. cv: int The number of cross-validation iterations that should be performed; if None then cross-validation will be skipped. cv_ppn: float The proportion of samples to be used for each cross-validation attempt. Returns ------- dict All estimated population sizes; the structure is { 'uncorrected': int, 'corrected': [int] }, where len(dict['corrected']) is equal to cv. """ # Initialize result estimates = {'uncorrected': 0, 'corrected': []} # Generate an uncorrected estimate of population size by recursively # identifying the estimate of minimum error within the domain defined by the # number of sampled entities and min_survival sample_sizes = [len(sample) for sample in samples] entities = set(entity for sample in samples for entity in sample) max_pop_size = int(np.ceil(len(entities) / min_survival)) estimates['uncorrected'] = _find_best_estimate( len(entities), max_pop_size, sample_sizes ) # If indicated, generate corrected estimates using cross-validation if cv is not None: # Generate a simulated population the size of the uncorrected estimate simulated_entities = [ 'simulated_entity_' + str(i) for i in range(estimates['uncorrected'] - len(entities)) ] simulated_population = list(entities) + simulated_entities # Calculate cv cross-validated estimates for _ in range(cv): estimates['corrected'].append( _cross_validate_estimate( simulated_population, samples, len(entities), cv_ppn ) ) # Return final result return estimates def bbc(samples, max_delta=0.001): """Estimates population size given a collection of samples without replacement, using method proposed in Boneh, Boneh, and Caron (1998). Parameters ---------- samples: list A list of lists; each element is a list of entities observed in a particular sample. max_delta: float The incremental change to which the correction algorithm must converge prior to termination. Returns ------- int The estimated population size. Raises ------ ValueError If there are no entities observed exactly once. """ # Generate a dictionary mapping natural numbers, n, to the number of # entities observed n times across all samples entity_counts = dict(Counter( [entity for sample in samples for entity in sample] )) frequency_counts = dict(Counter(list(entity_counts.values()))) # Raise value error if no singletons if frequency_counts.get(1) is None: raise ValueError('no entity was observed exactly once') # Generate biased estimate of the number of unobserved entities biased_est = sum( [frequency_counts[f] / np.exp(f) for f in frequency_counts] ) # Correct for bias in estimate of unobserved entities via BBC's suggested # algorithm corrected_est = biased_est delta = max_delta + 1 while delta > max_delta: previous_est = corrected_est corrected_est = biased_est + ( previous_est * np.exp(-1 * frequency_counts[1] / previous_est) ) delta = abs(corrected_est - previous_est) # Return corrected estimated total population size corrected_est = int(np.ceil(corrected_est)) return len(entity_counts) + corrected_est
[ "numpy.ceil", "numpy.log", "numpy.array", "numpy.exp", "collections.Counter" ]
[((1019, 1035), 'numpy.log', 'np.log', (['estimate'], {}), '(estimate)\n', (1025, 1035), True, 'import numpy as np\n'), ((2683, 2723), 'numpy.ceil', 'np.ceil', (['((lower_bound + upper_bound) / 2)'], {}), '((lower_bound + upper_bound) / 2)\n', (2690, 2723), True, 'import numpy as np\n'), ((9810, 9870), 'collections.Counter', 'Counter', (['[entity for sample in samples for entity in sample]'], {}), '([entity for sample in samples for entity in sample])\n', (9817, 9870), False, 'from collections import Counter\n'), ((10736, 10758), 'numpy.ceil', 'np.ceil', (['corrected_est'], {}), '(corrected_est)\n', (10743, 10758), True, 'import numpy as np\n'), ((820, 850), 'numpy.log', 'np.log', (['(estimate - sample_size)'], {}), '(estimate - sample_size)\n', (826, 850), True, 'import numpy as np\n'), ((853, 869), 'numpy.log', 'np.log', (['estimate'], {}), '(estimate)\n', (859, 869), True, 'import numpy as np\n'), ((5513, 5544), 'numpy.array', 'np.array', (['samples'], {'dtype': 'object'}), '(samples, dtype=object)\n', (5521, 5544), True, 'import numpy as np\n'), ((10221, 10230), 'numpy.exp', 'np.exp', (['f'], {}), '(f)\n', (10227, 10230), True, 'import numpy as np\n'), ((10548, 10595), 'numpy.exp', 'np.exp', (['(-1 * frequency_counts[1] / previous_est)'], {}), '(-1 * frequency_counts[1] / previous_est)\n', (10554, 10595), True, 'import numpy as np\n')]
#!/usr/bin/env python3 import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Arc from writeout import writeout plt.rcParams["font.family"] = "serif" plt.rcParams["mathtext.fontset"] = "cm" fsize = 20.0 def makeaxes(xmin, xmax, ymin, ymax): x = np.linspace(xmin, xmax, 2) y = np.linspace(ymin, ymax, 2) zero = np.zeros(np.shape(x)) plt.plot(x, zero, 'k', lw = 1.0) plt.plot(zero, y, 'k', lw = 1.0) plt.figure(figsize=(8, 6)) makeaxes(-0.25, 1.2, -0.1, 0.8) plt.text(-0.05, -0.07, r'$0$', fontsize=fsize) psi1 = 0.2 psi2 = 0.35 plt.fill([psi1, 1.2, 1.2, psi1, psi1], [psi2, psi2, 0.8, 0.8, psi2], 'k', alpha = 0.2) plt.text(0.9, 0.6, r'$\mathcal{K}$', fontsize=fsize) plt.text(1.28, -0.02, r'$\mathcal{V}_1$', fontsize=fsize) plt.plot(psi1, 0.0, 'k.', ms = 12.0) plt.plot([psi1, 1.2], [0.0, 0.0], 'k', lw = 2.5) plt.text(psi1 - 0.07, - 0.085, r'$\psi(x_1)$', fontsize=fsize) plt.text(0.7, - 0.09, r'$\mathcal{K}_1$', fontsize=fsize) plt.gca().annotate('', xy=(0.7, psi2 - 0.3), xytext=(0.7, psi2 - 0.05), arrowprops=dict(arrowstyle='->, head_length=0.8, head_width=0.5')) plt.text(0.72, 0.15, r'$R_1$', fontsize=fsize) plt.text(-0.02, 0.89, r'$\mathcal{V}_2$', fontsize=fsize) plt.plot(0.0, psi2, 'k.', ms = 12.0) plt.plot([0.0, 0.0], [psi2, 0.8], 'k', lw = 2.5) plt.text(- 0.2, psi2 - 0.02, r'$\psi(x_2)$', fontsize=fsize) plt.text(- 0.11, 0.6, r'$\mathcal{K}_2$', fontsize=fsize) plt.gca().annotate('', xy=(psi1 - 0.15, 0.6), xytext=(psi1 - 0.05, 0.6), arrowprops=dict(arrowstyle='->, head_length=0.8, head_width=0.5')) plt.text(0.07, 0.65, r'$R_2$', fontsize=fsize) plt.axis('tight') plt.axis('off') plt.axis('equal') writeout('cartoon.pdf')
[ "matplotlib.pyplot.plot", "matplotlib.pyplot.fill", "matplotlib.pyplot.axis", "matplotlib.pyplot.text", "numpy.shape", "matplotlib.pyplot.figure", "numpy.linspace", "matplotlib.pyplot.gca", "writeout.writeout" ]
[((450, 476), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (460, 476), True, 'import matplotlib.pyplot as plt\n'), ((509, 554), 'matplotlib.pyplot.text', 'plt.text', (['(-0.05)', '(-0.07)', '"""$0$"""'], {'fontsize': 'fsize'}), "(-0.05, -0.07, '$0$', fontsize=fsize)\n", (517, 554), True, 'import matplotlib.pyplot as plt\n'), ((580, 668), 'matplotlib.pyplot.fill', 'plt.fill', (['[psi1, 1.2, 1.2, psi1, psi1]', '[psi2, psi2, 0.8, 0.8, psi2]', '"""k"""'], {'alpha': '(0.2)'}), "([psi1, 1.2, 1.2, psi1, psi1], [psi2, psi2, 0.8, 0.8, psi2], 'k',\n alpha=0.2)\n", (588, 668), True, 'import matplotlib.pyplot as plt\n'), ((685, 737), 'matplotlib.pyplot.text', 'plt.text', (['(0.9)', '(0.6)', '"""$\\\\mathcal{K}$"""'], {'fontsize': 'fsize'}), "(0.9, 0.6, '$\\\\mathcal{K}$', fontsize=fsize)\n", (693, 737), True, 'import matplotlib.pyplot as plt\n'), ((739, 796), 'matplotlib.pyplot.text', 'plt.text', (['(1.28)', '(-0.02)', '"""$\\\\mathcal{V}_1$"""'], {'fontsize': 'fsize'}), "(1.28, -0.02, '$\\\\mathcal{V}_1$', fontsize=fsize)\n", (747, 796), True, 'import matplotlib.pyplot as plt\n'), ((797, 831), 'matplotlib.pyplot.plot', 'plt.plot', (['psi1', '(0.0)', '"""k."""'], {'ms': '(12.0)'}), "(psi1, 0.0, 'k.', ms=12.0)\n", (805, 831), True, 'import matplotlib.pyplot as plt\n'), ((834, 880), 'matplotlib.pyplot.plot', 'plt.plot', (['[psi1, 1.2]', '[0.0, 0.0]', '"""k"""'], {'lw': '(2.5)'}), "([psi1, 1.2], [0.0, 0.0], 'k', lw=2.5)\n", (842, 880), True, 'import matplotlib.pyplot as plt\n'), ((883, 944), 'matplotlib.pyplot.text', 'plt.text', (['(psi1 - 0.07)', '(-0.085)', '"""$\\\\psi(x_1)$"""'], {'fontsize': 'fsize'}), "(psi1 - 0.07, -0.085, '$\\\\psi(x_1)$', fontsize=fsize)\n", (891, 944), True, 'import matplotlib.pyplot as plt\n'), ((946, 1002), 'matplotlib.pyplot.text', 'plt.text', (['(0.7)', '(-0.09)', '"""$\\\\mathcal{K}_1$"""'], {'fontsize': 'fsize'}), "(0.7, -0.09, '$\\\\mathcal{K}_1$', fontsize=fsize)\n", (954, 1002), True, 'import matplotlib.pyplot as plt\n'), ((1162, 1207), 'matplotlib.pyplot.text', 'plt.text', (['(0.72)', '(0.15)', '"""$R_1$"""'], {'fontsize': 'fsize'}), "(0.72, 0.15, '$R_1$', fontsize=fsize)\n", (1170, 1207), True, 'import matplotlib.pyplot as plt\n'), ((1210, 1267), 'matplotlib.pyplot.text', 'plt.text', (['(-0.02)', '(0.89)', '"""$\\\\mathcal{V}_2$"""'], {'fontsize': 'fsize'}), "(-0.02, 0.89, '$\\\\mathcal{V}_2$', fontsize=fsize)\n", (1218, 1267), True, 'import matplotlib.pyplot as plt\n'), ((1268, 1302), 'matplotlib.pyplot.plot', 'plt.plot', (['(0.0)', 'psi2', '"""k."""'], {'ms': '(12.0)'}), "(0.0, psi2, 'k.', ms=12.0)\n", (1276, 1302), True, 'import matplotlib.pyplot as plt\n'), ((1305, 1351), 'matplotlib.pyplot.plot', 'plt.plot', (['[0.0, 0.0]', '[psi2, 0.8]', '"""k"""'], {'lw': '(2.5)'}), "([0.0, 0.0], [psi2, 0.8], 'k', lw=2.5)\n", (1313, 1351), True, 'import matplotlib.pyplot as plt\n'), ((1354, 1413), 'matplotlib.pyplot.text', 'plt.text', (['(-0.2)', '(psi2 - 0.02)', '"""$\\\\psi(x_2)$"""'], {'fontsize': 'fsize'}), "(-0.2, psi2 - 0.02, '$\\\\psi(x_2)$', fontsize=fsize)\n", (1362, 1413), True, 'import matplotlib.pyplot as plt\n'), ((1415, 1471), 'matplotlib.pyplot.text', 'plt.text', (['(-0.11)', '(0.6)', '"""$\\\\mathcal{K}_2$"""'], {'fontsize': 'fsize'}), "(-0.11, 0.6, '$\\\\mathcal{K}_2$', fontsize=fsize)\n", (1423, 1471), True, 'import matplotlib.pyplot as plt\n'), ((1632, 1677), 'matplotlib.pyplot.text', 'plt.text', (['(0.07)', '(0.65)', '"""$R_2$"""'], {'fontsize': 'fsize'}), "(0.07, 0.65, '$R_2$', fontsize=fsize)\n", (1640, 1677), True, 'import matplotlib.pyplot as plt\n'), ((1680, 1697), 'matplotlib.pyplot.axis', 'plt.axis', (['"""tight"""'], {}), "('tight')\n", (1688, 1697), True, 'import matplotlib.pyplot as plt\n'), ((1698, 1713), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1706, 1713), True, 'import matplotlib.pyplot as plt\n'), ((1714, 1731), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (1722, 1731), True, 'import matplotlib.pyplot as plt\n'), ((1732, 1755), 'writeout.writeout', 'writeout', (['"""cartoon.pdf"""'], {}), "('cartoon.pdf')\n", (1740, 1755), False, 'from writeout import writeout\n'), ((280, 306), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(2)'], {}), '(xmin, xmax, 2)\n', (291, 306), True, 'import numpy as np\n'), ((315, 341), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', '(2)'], {}), '(ymin, ymax, 2)\n', (326, 341), True, 'import numpy as np\n'), ((379, 409), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'zero', '"""k"""'], {'lw': '(1.0)'}), "(x, zero, 'k', lw=1.0)\n", (387, 409), True, 'import matplotlib.pyplot as plt\n'), ((416, 446), 'matplotlib.pyplot.plot', 'plt.plot', (['zero', 'y', '"""k"""'], {'lw': '(1.0)'}), "(zero, y, 'k', lw=1.0)\n", (424, 446), True, 'import matplotlib.pyplot as plt\n'), ((362, 373), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (370, 373), True, 'import numpy as np\n'), ((1004, 1013), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1011, 1013), True, 'import matplotlib.pyplot as plt\n'), ((1473, 1482), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1480, 1482), True, 'import matplotlib.pyplot as plt\n')]
import numpy as np def embed_seq(time_series, tau, embedding_dimension): """Build a set of embedding sequences from given time series `time_series` with lag `tau` and embedding dimension `embedding_dimension`. Let time_series = [x(1), x(2), ... , x(N)], then for each i such that 1 < i < N - (embedding_dimension - 1) * tau, we build an embedding sequence, Y(i) = [x(i), x(i + tau), ... , x(i + (embedding_dimension - 1) * tau)]. All embedding sequences are placed in a matrix Y. Parameters ---------- time_series numpy.ndarray a 1D time series tau integer the lag or delay when building embedding sequence embedding_dimension integer the embedding dimension Returns ------- y 2-embedding_dimension list embedding matrix built """ shape = ( time_series.size - tau * (embedding_dimension - 1), embedding_dimension ) strides = (time_series.itemsize, tau * time_series.itemsize) return np.lib.stride_tricks.as_strided( time_series, shape=shape, strides=strides )
[ "numpy.lib.stride_tricks.as_strided" ]
[((1047, 1121), 'numpy.lib.stride_tricks.as_strided', 'np.lib.stride_tricks.as_strided', (['time_series'], {'shape': 'shape', 'strides': 'strides'}), '(time_series, shape=shape, strides=strides)\n', (1078, 1121), True, 'import numpy as np\n')]
# MIT License # # Copyright (c) 2021 GGggg-sp # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import numpy as np import pickle as pkl from openpyxl import Workbook from openpyxl.styles import Font, Color, Alignment, Border, Side, PatternFill # The number of nodes on the map number_of_nodes = 92 # The tools collection list tools_list = ['手', '藤条', '小红', '小绿', '树脂棍', '皮带', '戒尺'] # The positions and tools for spanking: # The positions are used as the keys in the dictionary and you can specific if there are some tools not suitable for # some positions, add your favorite position & tools as you like. position_tools_dict = {'OTK': [i for i in tools_list if i not in ['皮带', '小绿', '藤条']], # for example: pidai, xiaolv # and tentiao are not suitable for otk spanking, so we remove them by the expression above '趴在床沿': [i for i in tools_list if i not in ['手']], '床上平趴垫枕头': [i for i in tools_list if i not in ['手']], '身体趴在桌子上': [i for i in tools_list if i not in []], '手扶墙壁': [i for i in tools_list if i not in []], '手握脚踝': [i for i in tools_list if i not in []], '跪撅': [i for i in tools_list if i not in []], '换尿布式': [i for i in tools_list if i not in ['皮带']]} # some novel counting patterns that the spankee must count the numbers using bases such as dec, oct, etc # you can control the possibilities of each base as long as they enjoy a sum of 1 base_list = ['八进制', '二进制', '十进制', '十六进制'] possibilities_base_list = [0.15, 0.05, 0.7, 0.1] # not sp event list contains all the event types that are not spanking or special game patterns in 404 sanctuary not_sp_event_list = ['揉揉', '晾臀', '后退一格', '后退两格', '回到起点', '前进一格', '前进两格', '请主随意'] # special events featured by some dalaos in 404 sanctuary special_event_list = ['地刺王冰冰出现了!请您后退六格!', 'Genecro 请您回到起点并请罚', 'JayZ 为您点了数量任意的藤条', 'Zara 团宠觉得您应该享受更多的小红', '礼物姐看了眼你的屁屁表示不屑并建议增加任意数量的巴掌', '等子姐在等您前往避难所,因此您增加了晾臀时间', '泰坦:您今日戒色了吗?请您根据主的要求 DIY ', '冷子姐请您喝奶啤哦!喝完增加任意数量的棍状工具', '小诺姐值班时看到了您并将您写进了小说,本轮您可以自由选择将受到的惩罚', 'linu 告诉您反被为主并不是好的选择,并为您增加了亿些皮带数目', 'ladboy 的藤条到货了想请您去品尝,因此您增加了任意数量的藤条!', '悠久のSP資料館欢迎您前来阅览,您获得了五分钟的休息时间!', '火球太太声称火球在拍灰,火球对此事并没有回应,因此太太决定为您增加重度戒尺', 'vv 最近失踪辣,可能是在躲着姐姐叭,请您帮助 vv 承担亿些 OTK 手掌', '你这个一点都不重度诶,拳王如实说道,并为您点了重度藤条', 'Netspanker对你主的手法表示质疑,您有一次机会可以反被为主,冲!', 'dokoham 永远在默默无闻的搬运着资源,您选择挨任意数量的板状工具支持他'] special_event_list_bac = special_event_list.copy() # possibilities for sp, not sp event and the special event possibilities_event_list = [0.75, 0.15, 0.1] event_list = ['sp', 'not sp', 'special'] num_max_per_epoch = 40 num_list = list(range(10, num_max_per_epoch)) # The argument process_p is to indicate the process of the events. # For example, we can control the events possibility distribution in different stages of the whole spanking ludo games. def generate_single_event(process_p): event_type = np.random.choice(event_list, 1, p=possibilities_event_list).tolist()[0] if event_type == 'sp': position = np.random.choice(list(position_tools_dict.keys()), 1).tolist()[0] tool = np.random.choice(position_tools_dict[position], 1).tolist()[0] base = np.random.choice(base_list, 1, p=possibilities_base_list).tolist()[0] num = str(np.random.choice(num_list, 1).tolist()[0]) event_str = '姿势:' + position + '\n工具:' + tool + '\n数目:' + str(num) + '\n以' + base + '报数' event_dict = {'type': 'spanking', 'position': position, 'tool': tool, 'base': base, 'num': num} elif event_type == 'not sp': event_str = np.random.choice(not_sp_event_list) event_dict = {'type': 'not spanking', 'content': event_str} else: event_str = np.random.choice(special_event_list) special_event_list.remove(event_str) event_dict = {'type': 'special', 'content': event_str} return event_str, event_dict # Main procedure for generate events def generate_event_series(event_max_num: int): events_list = [] events_str_list = [] for current_event_num in range(event_max_num): current_event_str, current_event_dict = generate_single_event(current_event_num / event_max_num) events_str_list.append(current_event_str) return events_str_list, events_list def save_excel(event_series_str, excel_filename): workbook = Workbook() sheet = workbook.active # Calculate the size of the ludo map current_max_h = int(np.ceil(np.sqrt(len(event_series_str) * 2)) + 2) current_min_h = 1 current_max_v = current_max_h current_min_v = 1 current_h = 1 current_v = 1 speed_h = 1 speed_v = 0 base_ascii = 64 space = 2 for i in range(1, current_max_v): sheet.row_dimensions[i].height = 75 for i in range(1, current_max_h): sheet.column_dimensions[chr(base_ascii+i)].width = 20 for ind in range(len(event_series_str)): current_h = current_h + speed_h current_v = current_v + speed_v current_index = chr(base_ascii + current_v) + str(current_h) sheet[current_index] = event_series_str[ind] center_aligned_text = Alignment(horizontal="center", wrapText=True, vertical="center") double_border_side = Side(border_style="double") square_border = Border(top=double_border_side, right=double_border_side, bottom=double_border_side, left=double_border_side) sheet[current_index].border = square_border sheet[current_index].alignment = center_aligned_text if event_series_str[ind] in special_event_list_bac: sheet[current_index].fill = PatternFill(start_color="F48225", fill_type='solid') elif event_series_str[ind] in not_sp_event_list: sheet[current_index].fill = PatternFill(start_color="BBBBAA", fill_type='solid') # check if reached the boundary # -> if current_h + space == current_max_h and speed_h > 0: speed_h = 0 speed_v = 1 current_max_h = current_max_h - space # | # v if current_v + space == current_max_v and speed_v > 0: speed_h = -1 speed_v = 0 current_max_v = current_max_v - space # <- if current_h - space == current_min_h and speed_h < 0: speed_h = 0 speed_v = -1 current_min_h = current_min_h + space # ^ # | if current_v - space == current_min_v and speed_v < 0: speed_h = 1 speed_v = 0 current_min_v = current_min_v + space workbook.save(excel_filename) return if __name__ == '__main__': event_series_str, event_series_list = generate_event_series(event_max_num=number_of_nodes) # for i in range(len(event_series_str)): # print('第', i+1, '格') # print(event_series_str[i]) # Save generated event list to disk for further usage with open('sp_ludo.pkl', 'wb') as f: pkl.dump(event_series_list, f) # Save the excel file save_excel(event_series_str, "sp ludo.xlsx") print('Excel file saved!')
[ "pickle.dump", "openpyxl.Workbook", "openpyxl.styles.Alignment", "numpy.random.choice", "openpyxl.styles.Border", "openpyxl.styles.PatternFill", "openpyxl.styles.Side" ]
[((5618, 5628), 'openpyxl.Workbook', 'Workbook', ([], {}), '()\n', (5626, 5628), False, 'from openpyxl import Workbook\n'), ((6412, 6476), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""center"""', 'wrapText': '(True)', 'vertical': '"""center"""'}), "(horizontal='center', wrapText=True, vertical='center')\n", (6421, 6476), False, 'from openpyxl.styles import Font, Color, Alignment, Border, Side, PatternFill\n'), ((6506, 6533), 'openpyxl.styles.Side', 'Side', ([], {'border_style': '"""double"""'}), "(border_style='double')\n", (6510, 6533), False, 'from openpyxl.styles import Font, Color, Alignment, Border, Side, PatternFill\n'), ((6558, 6671), 'openpyxl.styles.Border', 'Border', ([], {'top': 'double_border_side', 'right': 'double_border_side', 'bottom': 'double_border_side', 'left': 'double_border_side'}), '(top=double_border_side, right=double_border_side, bottom=\n double_border_side, left=double_border_side)\n', (6564, 6671), False, 'from openpyxl.styles import Font, Color, Alignment, Border, Side, PatternFill\n'), ((8333, 8363), 'pickle.dump', 'pkl.dump', (['event_series_list', 'f'], {}), '(event_series_list, f)\n', (8341, 8363), True, 'import pickle as pkl\n'), ((4862, 4897), 'numpy.random.choice', 'np.random.choice', (['not_sp_event_list'], {}), '(not_sp_event_list)\n', (4878, 4897), True, 'import numpy as np\n'), ((4996, 5032), 'numpy.random.choice', 'np.random.choice', (['special_event_list'], {}), '(special_event_list)\n', (5012, 5032), True, 'import numpy as np\n'), ((6974, 7026), 'openpyxl.styles.PatternFill', 'PatternFill', ([], {'start_color': '"""F48225"""', 'fill_type': '"""solid"""'}), "(start_color='F48225', fill_type='solid')\n", (6985, 7026), False, 'from openpyxl.styles import Font, Color, Alignment, Border, Side, PatternFill\n'), ((4199, 4258), 'numpy.random.choice', 'np.random.choice', (['event_list', '(1)'], {'p': 'possibilities_event_list'}), '(event_list, 1, p=possibilities_event_list)\n', (4215, 4258), True, 'import numpy as np\n'), ((7124, 7176), 'openpyxl.styles.PatternFill', 'PatternFill', ([], {'start_color': '"""BBBBAA"""', 'fill_type': '"""solid"""'}), "(start_color='BBBBAA', fill_type='solid')\n", (7135, 7176), False, 'from openpyxl.styles import Font, Color, Alignment, Border, Side, PatternFill\n'), ((4398, 4448), 'numpy.random.choice', 'np.random.choice', (['position_tools_dict[position]', '(1)'], {}), '(position_tools_dict[position], 1)\n', (4414, 4448), True, 'import numpy as np\n'), ((4476, 4533), 'numpy.random.choice', 'np.random.choice', (['base_list', '(1)'], {'p': 'possibilities_base_list'}), '(base_list, 1, p=possibilities_base_list)\n', (4492, 4533), True, 'import numpy as np\n'), ((4564, 4593), 'numpy.random.choice', 'np.random.choice', (['num_list', '(1)'], {}), '(num_list, 1)\n', (4580, 4593), True, 'import numpy as np\n')]
import numpy as np class Parabola: coefficients: np.ndarray # Python 3.5 doesn't like this a: float b: float c: float extreme_point: list vertex_point: list def __init__(self, p: list, q: list, r: list): """Define a parabolic curve from 3 points. Given points are x1,y1 .. x3,y3. Want to find coefficients a, b, c for general eqn y = a x^2 + b x + c . Do this by solving system (matrix eqn) M * x = n , which is a system of: (a * x1^2) + (b * x1) + (c * 1) = y1 This class exposes several useful things: a function of the parabola, the coefficients, and the endpoints. :param p: extreme point of an ABG curve :param q: interior "vertex" point :param r: midpoint of an ABG curve """ if len(p) != len(q) != len(r) != 2: raise ValueError("All points must be 2D: %s %s %s" % (p, q, r)) x1, y1 = p x2, y2 = q x3, y3 = r m = np.array([[x1 ** 2, x1, 1], [x2 ** 2, x2, 1], [x3 ** 2, x3, 1]]) n = np.array([y1, y2, y3]) self.coefficients = np.linalg.solve(m, n) self.a, self.b, self.c = self.coefficients[0], self.coefficients[1], self.coefficients[2] self.f = lambda x: self.a * x ** 2 + self.b * x + self.c self.extreme_point = p self.vertex_point = q def cmp_func(y: float, behavior: str, f, x: float) -> bool: """Decide whether y < f(x), but where "<" can be specified. :param y: number to compare to f(x) :param behavior: string that can be 'less' or 'greater' :param f: function of 1 variable :param x: number at which to evaluate f, and then compare to y :return: boolean, whether or not y > f(x) or y < f(x) etc. """ if behavior == 'less': return y < f(x) elif behavior == 'greater': return y > f(x) else: raise ValueError("behavior must be 'less' or 'greater'. Got %s " % behavior) def line_func(p1: list, p2: list): """Take two x,y points. Return a linear function through them both. :param p1: list of [x1, y1] ordered pair (point) :param p2: list of [x2, y2] :return: (linear) function in 1 variable, which passes through both points. """ assert len(p1) == len(p2) == 2 x1, y1, = p1 x2, y2 = p2 m = (y1 - y2) / (x1 - x2) # let it raise error if x1 == x2 return lambda x: m * (x - x1) + y1 class Region: vtx_behavior: str ext_behavior: str def __init__(self, top: Parabola, bottom: Parabola, ext_behavior: str, vtx_behavior: str): """Define a region from 2 Parabola objects. Parabola objects comprise a curve and 2 endpoints. When you link the extreme and vertex endpoints of 2 parabolas, you get a Region (which has curve-line-curve-line bounds). The _behavior parameters define whether the 2 linear boundaries have region below them or above them. In future we may calculate this; currently we specify. :param top: the parabola that makes up the upper edge of the ABG region :param bottom: parabola for the bottom edge of the region :param ext_behavior: pass to cmp_func() whether region is </> segment connecting extreme points of the parabolas :param vtx_behavior: whether the region is below/above the segment connecting the 2 vertex points """ self.top_curve = top.f self.bottom_curve = bottom.f self.ext_line = line_func(top.extreme_point, bottom.extreme_point) self.vtx_line = line_func(top.vertex_point, bottom.vertex_point) self.ext_behavior = ext_behavior self.vtx_behavior = vtx_behavior def contains(self, point: list) -> bool: """Determine whether the Region object contains the specified point. :param point: list of point [x, y] usually pH and bicarbonate, respectively. :return: Boolean, whether the point is inside the region. """ if len(point) != 2: raise IndexError("Point must be 2D: %s" % point) x, y = point return self.bottom_curve(x) < y < self.top_curve(x) and \ cmp_func(y, self.ext_behavior, self.ext_line, x) and \ cmp_func(y, self.vtx_behavior, self.vtx_line, x) class RegionQuad: def __init__(self, p1: list, p2: list, p3: list, p4: list): """Define a quadrilateral region. p1 p2 p3 must define the top edges. p3 p4 p1 must define the bottom edges. p2 p1 p3 p4 :param p1: Start of one upper segment :param p2: End of 1st upper seg, start of 2nd upper :param p3: End of 2nd upper, start of 1st bottom seg. :param p4: End of 1st bottom seg, start of 2nd bottom """ self.top12 = line_func(p1, p2) self.top23 = line_func(p2, p3) self.bot34 = line_func(p3, p4) self.bot41 = line_func(p4, p1) def contains(self, point: list) -> bool: if len(point) != 2: raise IndexError("Point must be 2D: %s" % point) x, y = point return self.top12(x) > y > self.bot34(x) and \ self.top23(x) > y > self.bot41(x) class RegionSimple: def __init__(self, xmin, xmax, ymin, ymax): if xmin >= xmax or ymin >= ymax: raise ValueError("Must have xmin<xmax and ymin<ymax. Got %s %s %s %s" % (xmin, xmax, ymin, ymax)) self.xmin = xmin self.xmax = xmax self.ymin = ymin self.ymax = ymax def contains(self, point: list) -> bool: if len(point) != 2: raise IndexError("Point must be 2D: %s" % point) x, y = point return self.xmin <= x <= self.xmax and \ self.ymin <= y <= self.ymax
[ "numpy.linalg.solve", "numpy.array" ]
[((997, 1061), 'numpy.array', 'np.array', (['[[x1 ** 2, x1, 1], [x2 ** 2, x2, 1], [x3 ** 2, x3, 1]]'], {}), '([[x1 ** 2, x1, 1], [x2 ** 2, x2, 1], [x3 ** 2, x3, 1]])\n', (1005, 1061), True, 'import numpy as np\n'), ((1118, 1140), 'numpy.array', 'np.array', (['[y1, y2, y3]'], {}), '([y1, y2, y3])\n', (1126, 1140), True, 'import numpy as np\n'), ((1169, 1190), 'numpy.linalg.solve', 'np.linalg.solve', (['m', 'n'], {}), '(m, n)\n', (1184, 1190), True, 'import numpy as np\n')]
from skimage.io import imsave import numpy as np from skimage.transform import resize from model import bce_dice_loss, iou, dice_coef from read_one_image import read_one_image from post_process_image import post_processing, colorize_image from tensorflow.keras.models import load_model import tensorflow as tf class predict: def __init__(self, mode:str): '''The mode can either be mem_save or time_save. The parameter mem_save only loads the models into the memory when it is needed. This will make each of the prediction times longer. The mode time_save loads all of the models into the memory at the start so that when the algorithm is predicting, it is much faster. In short, the mem_save mode is better if you are just going to run one or two predictions or if you are limited on computer memory while the time_save function is better if you are doing many predictions. ''' mode_types = ["mem_save", "time_save"] if mode not in mode_types: raise ValueError("You are using a not correct mode type. Mode types must be either mem_save or time_save") USE_GPU = False if USE_GPU: # This code is required if I want tensorflow to use the GPU physical_devices = tf.config.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(physical_devices[0], True) else: # This code below is added to keep tensorflow from using GPU import os try: # Disable all GPUS tf.config.set_visible_devices([], 'GPU') visible_devices = tf.config.get_visible_devices() for device in visible_devices: assert device.device_type != 'GPU' except: # Invalid device or cannot modify virtual devices once initialized. pass os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if mode == "mem_save": self.unet_model = None self.unetpp_model = None self.fcn_model = None self.gan_model = None elif mode == "time_save": self.load_models() def load_models(self): self.unet_model = load_model("models/unet", custom_objects={"dice_coef": dice_coef}) self.unetpp_model = load_model("models/unet++", custom_objects={'bce_dice_loss': bce_dice_loss, 'iou': iou}) self.fcn_model = load_model("models/FCN", custom_objects={"dice_coef": dice_coef}) self.gan_model = load_model("models/GAN") def process_image(self, folder,filename, model_name): IMG_WIDTH=256 IMG_HEIGHT=256 if model_name == "unet": if self.unet_model == None: model = load_model("models/unet", custom_objects={"dice_coef": dice_coef}) else: model = self.unet_model elif model_name == "unetpp": if self.unetpp_model == None: model = load_model("models/unet++", custom_objects={'bce_dice_loss': bce_dice_loss, 'iou': iou}) else: model = self.unetpp_model elif model_name == "fcn": if self.fcn_model == None: model = load_model("models/FCN", custom_objects={"dice_coef": dice_coef}) else: model = self.fcn_model elif model_name == "gan": if self.gan_model == None: model = load_model("models/GAN") else: model = self.gan_model IMG_WIDTH=128 IMG_HEIGHT=128 input_image, input_image_sizes = read_one_image(folder + "/" + filename, IMG_WIDTH, IMG_HEIGHT) # Predict on test data predicted_mask = model(input_image, training=False) predicted_mask = np.array(predicted_mask) predicted_mask = predicted_mask[0,:,:,:] input_image = input_image[0,:,:,:] # post processing predicted_mask_processed = post_processing(predicted_mask) predicted_mask_processed = resize(predicted_mask_processed, input_image_sizes, mode='constant', preserve_range=True).astype('bool') input_image = resize(input_image, input_image_sizes, mode='constant', preserve_range=True,anti_aliasing=True) colorized_image = colorize_image(predicted_mask_processed,input_image) colorized_image_filename = "processed_" + filename colorized_image_path = folder + "/" + colorized_image_filename imsave(colorized_image_path, colorized_image) return "processed_" + filename
[ "tensorflow.keras.models.load_model", "read_one_image.read_one_image", "skimage.io.imsave", "tensorflow.config.set_visible_devices", "tensorflow.config.list_physical_devices", "tensorflow.config.experimental.set_memory_growth", "numpy.array", "skimage.transform.resize", "tensorflow.config.get_visible_devices", "post_process_image.post_processing", "post_process_image.colorize_image" ]
[((2259, 2325), 'tensorflow.keras.models.load_model', 'load_model', (['"""models/unet"""'], {'custom_objects': "{'dice_coef': dice_coef}"}), "('models/unet', custom_objects={'dice_coef': dice_coef})\n", (2269, 2325), False, 'from tensorflow.keras.models import load_model\n'), ((2354, 2446), 'tensorflow.keras.models.load_model', 'load_model', (['"""models/unet++"""'], {'custom_objects': "{'bce_dice_loss': bce_dice_loss, 'iou': iou}"}), "('models/unet++', custom_objects={'bce_dice_loss': bce_dice_loss,\n 'iou': iou})\n", (2364, 2446), False, 'from tensorflow.keras.models import load_model\n'), ((2468, 2533), 'tensorflow.keras.models.load_model', 'load_model', (['"""models/FCN"""'], {'custom_objects': "{'dice_coef': dice_coef}"}), "('models/FCN', custom_objects={'dice_coef': dice_coef})\n", (2478, 2533), False, 'from tensorflow.keras.models import load_model\n'), ((2559, 2583), 'tensorflow.keras.models.load_model', 'load_model', (['"""models/GAN"""'], {}), "('models/GAN')\n", (2569, 2583), False, 'from tensorflow.keras.models import load_model\n'), ((3674, 3736), 'read_one_image.read_one_image', 'read_one_image', (["(folder + '/' + filename)", 'IMG_WIDTH', 'IMG_HEIGHT'], {}), "(folder + '/' + filename, IMG_WIDTH, IMG_HEIGHT)\n", (3688, 3736), False, 'from read_one_image import read_one_image\n'), ((3854, 3878), 'numpy.array', 'np.array', (['predicted_mask'], {}), '(predicted_mask)\n', (3862, 3878), True, 'import numpy as np\n'), ((4033, 4064), 'post_process_image.post_processing', 'post_processing', (['predicted_mask'], {}), '(predicted_mask)\n', (4048, 4064), False, 'from post_process_image import post_processing, colorize_image\n'), ((4227, 4327), 'skimage.transform.resize', 'resize', (['input_image', 'input_image_sizes'], {'mode': '"""constant"""', 'preserve_range': '(True)', 'anti_aliasing': '(True)'}), "(input_image, input_image_sizes, mode='constant', preserve_range=True,\n anti_aliasing=True)\n", (4233, 4327), False, 'from skimage.transform import resize\n'), ((4349, 4402), 'post_process_image.colorize_image', 'colorize_image', (['predicted_mask_processed', 'input_image'], {}), '(predicted_mask_processed, input_image)\n', (4363, 4402), False, 'from post_process_image import post_processing, colorize_image\n'), ((4540, 4585), 'skimage.io.imsave', 'imsave', (['colorized_image_path', 'colorized_image'], {}), '(colorized_image_path, colorized_image)\n', (4546, 4585), False, 'from skimage.io import imsave\n'), ((1283, 1321), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (1314, 1321), True, 'import tensorflow as tf\n'), ((1335, 1402), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_devices[0]', '(True)'], {}), '(physical_devices[0], True)\n', (1375, 1402), True, 'import tensorflow as tf\n'), ((1581, 1621), 'tensorflow.config.set_visible_devices', 'tf.config.set_visible_devices', (['[]', '"""GPU"""'], {}), "([], 'GPU')\n", (1610, 1621), True, 'import tensorflow as tf\n'), ((1656, 1687), 'tensorflow.config.get_visible_devices', 'tf.config.get_visible_devices', ([], {}), '()\n', (1685, 1687), True, 'import tensorflow as tf\n'), ((2785, 2851), 'tensorflow.keras.models.load_model', 'load_model', (['"""models/unet"""'], {'custom_objects': "{'dice_coef': dice_coef}"}), "('models/unet', custom_objects={'dice_coef': dice_coef})\n", (2795, 2851), False, 'from tensorflow.keras.models import load_model\n'), ((4100, 4193), 'skimage.transform.resize', 'resize', (['predicted_mask_processed', 'input_image_sizes'], {'mode': '"""constant"""', 'preserve_range': '(True)'}), "(predicted_mask_processed, input_image_sizes, mode='constant',\n preserve_range=True)\n", (4106, 4193), False, 'from skimage.transform import resize\n'), ((3017, 3109), 'tensorflow.keras.models.load_model', 'load_model', (['"""models/unet++"""'], {'custom_objects': "{'bce_dice_loss': bce_dice_loss, 'iou': iou}"}), "('models/unet++', custom_objects={'bce_dice_loss': bce_dice_loss,\n 'iou': iou})\n", (3027, 3109), False, 'from tensorflow.keras.models import load_model\n'), ((3267, 3332), 'tensorflow.keras.models.load_model', 'load_model', (['"""models/FCN"""'], {'custom_objects': "{'dice_coef': dice_coef}"}), "('models/FCN', custom_objects={'dice_coef': dice_coef})\n", (3277, 3332), False, 'from tensorflow.keras.models import load_model\n'), ((3491, 3515), 'tensorflow.keras.models.load_model', 'load_model', (['"""models/GAN"""'], {}), "('models/GAN')\n", (3501, 3515), False, 'from tensorflow.keras.models import load_model\n')]
import seaborn as sns import pandas as pd import matplotlib.pyplot as plt plt.rcParams.update({'font.size': 10}) import json import os, sys import numpy as np if __name__ == "__main__": fname = sys.argv[1] data = np.loadtxt(fname) fig, ax = plt.subplots(1, figsize=(7,2.5)) # output profile and set point y = ax.plot(data[:,0],data[:,1], label='$y$') yset = ax.plot(data[:,0],data[:,2], label=r'$y_{set}$') ax.set_xlabel('Time') ax.set_ylabel('Temperature') # input profile axtwin = ax.twinx() u = axtwin.plot(data[:,0], data[:,3], label=r'$u$', color='red', linestyle='--') axtwin.set_ylabel('Power', color='red') axtwin.tick_params('y', colors='red') # shrink the axis for the legend on top box = ax.get_position() # Legend underneath #axlegend.set_position([box.x0, box.y0 + box.height * 0.1, # box.width, box.height * 0.9]) # Legend above ax.set_position([box.x0, box.y0 + box.height*0.05, box.width, box.height * 0.9]) alllines = y + yset + u alllabels = [l.get_label() for l in alllines] ax.legend(alllines, alllabels, ncol=3, bbox_to_anchor = (0, 1.02, 1, 0.2), loc='upper center') #ax.legend(alllines, alllabels, ncol=3, bbox_to_anchor = (1, 0), loc='lower right', bbox_transform=fig.transFigure) plt.tight_layout() plt.show() print(np.sum(np.abs(data[:,1]-data[:,2]))) print(np.sum([np.abs(data[i+1,3]-data[i,3]) for i in range(len(data[:,3])-1)])) #fig, ax = plt.subplots(1, figsize=(7,2.5)) #ax.plot(data[:,0],data[:,3], label='System') #ax.set_xlabel('Time') #ax.set_ylabel('Power') #ax.legend(loc='best') #plt.tight_layout() #plt.show()
[ "matplotlib.pyplot.show", "numpy.abs", "matplotlib.pyplot.subplots", "matplotlib.pyplot.rcParams.update", "numpy.loadtxt", "matplotlib.pyplot.tight_layout" ]
[((74, 112), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 10}"], {}), "({'font.size': 10})\n", (93, 112), True, 'import matplotlib.pyplot as plt\n'), ((222, 239), 'numpy.loadtxt', 'np.loadtxt', (['fname'], {}), '(fname)\n', (232, 239), True, 'import numpy as np\n'), ((255, 288), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(7, 2.5)'}), '(1, figsize=(7, 2.5))\n', (267, 288), True, 'import matplotlib.pyplot as plt\n'), ((1345, 1363), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1361, 1363), True, 'import matplotlib.pyplot as plt\n'), ((1368, 1378), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1376, 1378), True, 'import matplotlib.pyplot as plt\n'), ((1397, 1428), 'numpy.abs', 'np.abs', (['(data[:, 1] - data[:, 2])'], {}), '(data[:, 1] - data[:, 2])\n', (1403, 1428), True, 'import numpy as np\n'), ((1445, 1480), 'numpy.abs', 'np.abs', (['(data[i + 1, 3] - data[i, 3])'], {}), '(data[i + 1, 3] - data[i, 3])\n', (1451, 1480), True, 'import numpy as np\n')]
import librosa import librosa.display import numpy as np from pydub import AudioSegment import torch from matplotlib import pyplot as plt SAMPLE = 44100 TOP = 32767 def load_wave_file_to_numpy(file_path, sample_rate=SAMPLE, *args, **kwargs): return librosa.load(file_path, sr=sample_rate, *args, **kwargs) def plot_complex_spectrogram(D: np.ndarray, title="test audio", y_axis="log", cmap="binary", angle_off=False): angle = np.angle(D) abs = np.abs(D) librosa.display.specshow(abs, y_axis=y_axis, cmap=cmap) if not angle_off: librosa.display.specshow(angle, y_axis=y_axis, alpha=0.2) plt.title(f"Log-frequency power spectrogram of {title}") def plot_wave_and_spectrogram(numpy_wave: np.ndarray, D=None, title="test audio", y_axis="log", cmap="binary", angle_off=False): fig = plt.figure(figsize=(12, 8)) if D is None: D = _stft(numpy_wave) plt.subplot(2, 1, 1) plot_complex_spectrogram(D, title, y_axis, cmap=cmap, angle_off=angle_off) plt.subplot(2, 1, 2) plt.plot(numpy_wave, lw=0.6) plt.title("Waveform") plt.margins(x=0) plt.plot() plt.show() plt.close() def _stft(wave, n_fft=1024, hop_length=512, ): return librosa.stft(wave, n_fft=n_fft, hop_length=hop_length) def _istft(spec, hop_length=512 ): return librosa.istft(spec, hop_length=hop_length) def split_complex_spectra_into_magnitude_and_phase(spectra): return np.abs(spectra), np.angle(spectra) def merge_magnitude_and_phase_to_complex_spectra(magnitude, phase): Real = np.cos(phase) * magnitude Imag = np.sin(phase) * magnitude Bind = np.zeros_like(magnitude, dtype=complex) Bind.real = Real Bind.imag = Imag return Bind def griffin_lim(Amp, Ang): y = _istft(merge_magnitude_and_phase_to_complex_spectra(Amp, Ang)) _, Ang = split_complex_spectra_into_magnitude_and_phase(_stft(y)) return Ang def apply_magnitude_to_another(source, target): Source = _stft(source) Target = _stft(target) Angle = np.angle(Target) Mag = np.abs(Source) Real = np.cos(Angle) * Mag Imag = np.sin(Angle) * Mag Bind = np.zeros_like(Target, dtype=complex) Bind.real = Real Bind.imag = Imag wave = _istft(Bind) return wave def read_advanced_format_to_np(file, format): segment = AudioSegment.from_file(file, format) wave = segment.get_array_of_samples() wave = np.array(wave) / 65536 left = wave[::2] right = wave[1::2] wave = left + right wave = wave / 3 return wave def from_int16_to_float_numpy(arr): return arr / 65536 def mono_pad_or_truncate(original, estimate): if len(estimate) > len(original): return original, estimate[:len(original)] else: return original[:len(estimate)], estimate def overlap_click(original, miliseconds, sr=44100): for point in miliseconds: assert 0 <= point < len(original) / sr, "frame out of range" def standardize(wave): top = np.max(np.abs(wave)) return wave / top def overlap_click(original, click_position, sr=44100, click_freq=2000, click_duration=0.5): """ :param click_position: Notice that position should be given in second :return: wave """ cwave = librosa.clicks(np.array(click_position), sr=44100, click_freq=4000, click_duration=0.05) / 2 original, wave = mono_pad_or_truncate(original, cwave) return standardize(original + wave) def mmsecond_to_sample(mseconds, sr=44100): return [msecond * 44.1 for msecond in mseconds] def mfcc(wave: np.ndarray, n_mfcc=40, sr=44100): return librosa.feature.mfcc(wave, sr=sr, n_mfcc=n_mfcc) def plot_wave_and_mfcc(wave, sr=44100, n_mfcc=40): M = mfcc(wave, sr=sr, n_mfcc=n_mfcc) plot_wave_and_spectrogram(wave, D=M, cmap="hsv", y_axis="linear", angle_off=True) def mel(wave: np.ndarray, n_mels=80, sr=44100): return librosa.feature.melspectrogram(wave, sr=sr, n_mels=n_mels) def torch_raw_stft(wave, n_fft=2048, hop=512): """ :param wave: :param n_fft: :param hop: :return: [B, C, T, 2] """ if not torch.is_tensor(wave): wave = torch.tensor(wave) return torch.stft(wave, n_fft=n_fft, hop_length=hop) def torch_mag_stft(wave, n_fft=2048, hop=512): """ :param wave: :param n_fft: :param hop: :return: [B, C, T] """ Wave = torch_raw_stft(wave, n_fft, hop) return Wave.pow(2).sum(dim=-1).sqrt() def torch_spec_to_numpy_complex(Wave): """ :param Wave: :param hop: :return: You Better Split ME [..., x] """ Real = Wave[..., 0].cpu().numpy() Imag = Wave[..., 1].cpu().numpy() Combine = np.complex_(Real) + 1.0j * np.complex_(Imag) return Combine def torch_spec_to_numpy_arg_mag(Wave): Wave = torch_spec_to_numpy_complex(Wave) Mag, Arg = split_complex_spectra_into_magnitude_and_phase(Wave) return Mag, Arg if __name__ == '__main__': wave, sr = load_wave_file_to_numpy("test.wav") spec = mel(wave) plot_wave_and_spectrogram(wave, D=spec, y_axis="linear") print(spec.shape)
[ "matplotlib.pyplot.title", "numpy.abs", "torch.stft", "matplotlib.pyplot.margins", "numpy.angle", "librosa.istft", "matplotlib.pyplot.figure", "numpy.sin", "pydub.AudioSegment.from_file", "librosa.feature.melspectrogram", "librosa.feature.mfcc", "numpy.zeros_like", "matplotlib.pyplot.close", "torch.is_tensor", "librosa.stft", "matplotlib.pyplot.show", "numpy.complex_", "librosa.load", "numpy.cos", "librosa.display.specshow", "matplotlib.pyplot.subplot", "matplotlib.pyplot.plot", "numpy.array", "torch.tensor" ]
[((258, 314), 'librosa.load', 'librosa.load', (['file_path', '*args'], {'sr': 'sample_rate'}), '(file_path, *args, sr=sample_rate, **kwargs)\n', (270, 314), False, 'import librosa\n'), ((440, 451), 'numpy.angle', 'np.angle', (['D'], {}), '(D)\n', (448, 451), True, 'import numpy as np\n'), ((462, 471), 'numpy.abs', 'np.abs', (['D'], {}), '(D)\n', (468, 471), True, 'import numpy as np\n'), ((476, 531), 'librosa.display.specshow', 'librosa.display.specshow', (['abs'], {'y_axis': 'y_axis', 'cmap': 'cmap'}), '(abs, y_axis=y_axis, cmap=cmap)\n', (500, 531), False, 'import librosa\n'), ((624, 680), 'matplotlib.pyplot.title', 'plt.title', (['f"""Log-frequency power spectrogram of {title}"""'], {}), "(f'Log-frequency power spectrogram of {title}')\n", (633, 680), True, 'from matplotlib import pyplot as plt\n'), ((822, 849), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (832, 849), True, 'from matplotlib import pyplot as plt\n'), ((903, 923), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (914, 923), True, 'from matplotlib import pyplot as plt\n'), ((1007, 1027), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (1018, 1027), True, 'from matplotlib import pyplot as plt\n'), ((1032, 1060), 'matplotlib.pyplot.plot', 'plt.plot', (['numpy_wave'], {'lw': '(0.6)'}), '(numpy_wave, lw=0.6)\n', (1040, 1060), True, 'from matplotlib import pyplot as plt\n'), ((1065, 1086), 'matplotlib.pyplot.title', 'plt.title', (['"""Waveform"""'], {}), "('Waveform')\n", (1074, 1086), True, 'from matplotlib import pyplot as plt\n'), ((1091, 1107), 'matplotlib.pyplot.margins', 'plt.margins', ([], {'x': '(0)'}), '(x=0)\n', (1102, 1107), True, 'from matplotlib import pyplot as plt\n'), ((1112, 1122), 'matplotlib.pyplot.plot', 'plt.plot', ([], {}), '()\n', (1120, 1122), True, 'from matplotlib import pyplot as plt\n'), ((1127, 1137), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1135, 1137), True, 'from matplotlib import pyplot as plt\n'), ((1142, 1153), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1151, 1153), True, 'from matplotlib import pyplot as plt\n'), ((1234, 1288), 'librosa.stft', 'librosa.stft', (['wave'], {'n_fft': 'n_fft', 'hop_length': 'hop_length'}), '(wave, n_fft=n_fft, hop_length=hop_length)\n', (1246, 1288), False, 'import librosa\n'), ((1359, 1401), 'librosa.istft', 'librosa.istft', (['spec'], {'hop_length': 'hop_length'}), '(spec, hop_length=hop_length)\n', (1372, 1401), False, 'import librosa\n'), ((1666, 1705), 'numpy.zeros_like', 'np.zeros_like', (['magnitude'], {'dtype': 'complex'}), '(magnitude, dtype=complex)\n', (1679, 1705), True, 'import numpy as np\n'), ((2065, 2081), 'numpy.angle', 'np.angle', (['Target'], {}), '(Target)\n', (2073, 2081), True, 'import numpy as np\n'), ((2092, 2106), 'numpy.abs', 'np.abs', (['Source'], {}), '(Source)\n', (2098, 2106), True, 'import numpy as np\n'), ((2180, 2216), 'numpy.zeros_like', 'np.zeros_like', (['Target'], {'dtype': 'complex'}), '(Target, dtype=complex)\n', (2193, 2216), True, 'import numpy as np\n'), ((2362, 2398), 'pydub.AudioSegment.from_file', 'AudioSegment.from_file', (['file', 'format'], {}), '(file, format)\n', (2384, 2398), False, 'from pydub import AudioSegment\n'), ((3633, 3681), 'librosa.feature.mfcc', 'librosa.feature.mfcc', (['wave'], {'sr': 'sr', 'n_mfcc': 'n_mfcc'}), '(wave, sr=sr, n_mfcc=n_mfcc)\n', (3653, 3681), False, 'import librosa\n'), ((3922, 3980), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', (['wave'], {'sr': 'sr', 'n_mels': 'n_mels'}), '(wave, sr=sr, n_mels=n_mels)\n', (3952, 3980), False, 'import librosa\n'), ((4201, 4246), 'torch.stft', 'torch.stft', (['wave'], {'n_fft': 'n_fft', 'hop_length': 'hop'}), '(wave, n_fft=n_fft, hop_length=hop)\n', (4211, 4246), False, 'import torch\n'), ((562, 619), 'librosa.display.specshow', 'librosa.display.specshow', (['angle'], {'y_axis': 'y_axis', 'alpha': '(0.2)'}), '(angle, y_axis=y_axis, alpha=0.2)\n', (586, 619), False, 'import librosa\n'), ((1476, 1491), 'numpy.abs', 'np.abs', (['spectra'], {}), '(spectra)\n', (1482, 1491), True, 'import numpy as np\n'), ((1493, 1510), 'numpy.angle', 'np.angle', (['spectra'], {}), '(spectra)\n', (1501, 1510), True, 'import numpy as np\n'), ((1592, 1605), 'numpy.cos', 'np.cos', (['phase'], {}), '(phase)\n', (1598, 1605), True, 'import numpy as np\n'), ((1629, 1642), 'numpy.sin', 'np.sin', (['phase'], {}), '(phase)\n', (1635, 1642), True, 'import numpy as np\n'), ((2118, 2131), 'numpy.cos', 'np.cos', (['Angle'], {}), '(Angle)\n', (2124, 2131), True, 'import numpy as np\n'), ((2149, 2162), 'numpy.sin', 'np.sin', (['Angle'], {}), '(Angle)\n', (2155, 2162), True, 'import numpy as np\n'), ((2452, 2466), 'numpy.array', 'np.array', (['wave'], {}), '(wave)\n', (2460, 2466), True, 'import numpy as np\n'), ((3031, 3043), 'numpy.abs', 'np.abs', (['wave'], {}), '(wave)\n', (3037, 3043), True, 'import numpy as np\n'), ((4133, 4154), 'torch.is_tensor', 'torch.is_tensor', (['wave'], {}), '(wave)\n', (4148, 4154), False, 'import torch\n'), ((4171, 4189), 'torch.tensor', 'torch.tensor', (['wave'], {}), '(wave)\n', (4183, 4189), False, 'import torch\n'), ((4694, 4711), 'numpy.complex_', 'np.complex_', (['Real'], {}), '(Real)\n', (4705, 4711), True, 'import numpy as np\n'), ((3296, 3320), 'numpy.array', 'np.array', (['click_position'], {}), '(click_position)\n', (3304, 3320), True, 'import numpy as np\n'), ((4721, 4738), 'numpy.complex_', 'np.complex_', (['Imag'], {}), '(Imag)\n', (4732, 4738), True, 'import numpy as np\n')]
import random, os import numpy as np from torch.utils.data import Dataset from sentence_transformers import InputExample import csv from typing import List, Tuple, Optional, Union import copy class IMDB62AvDataset(Dataset): """Dataset for Author Verification on the IMDB62 Dataset.""" def __init__(self, data_file: str, dataset_size: int = None, use_cache: bool = True): """ Args: data_file (string): the path to the IMDB62 Dataset txt file """ self.data_file = data_file self.dataset_size = dataset_size # get data file raw_data = get_imdb_train_or_test(self.data_file) # now process the individual files into an actual dataset # add all data to a new object, then sample negative pairs to reach self.dataset_size self.data = transform_data_classification_to_siamese(raw_data, self.dataset_size) def __len__(self): return len(self.data) def __getitem__(self, idx): return self.data[idx] class IMDB62ContrastiveDataset(Dataset): """Dataset for Author Verification on the IMDB62 Dataset.""" def __init__(self, data_file: str, base_rate: float = 0.2 ): """ Args: data_file (string): the path to the IMDB62 Dataset txt file """ self.data_file = data_file self.base_rate = base_rate # get data file self.data = get_imdb_train_or_test(self.data_file, as_dict=True) def __len__(self): return sum([len(x) for x in self.data.values()]) def __getitem__(self, idx): # just sample randomly every time, with the desired precentage of same/diff auth # draw the first author auth1 = random.choice(list(self.data.keys())) text1 = random.choice(self.data[auth1]) if np.random.uniform() <= self.base_rate: # this needs to be a same author pair text2 = None while text1 == text2 or text2 is None: text2 = random.choice(self.data[auth1]) label = 1 else: auth2 = random.choice(list(self.data.keys())) text2 = random.choice(self.data[auth2]) label = 0 return InputExample(texts=[text1, text2], label=label) def get_training_for_contrastive_loss_imdb(data_file: str) -> List[InputExample]: raw_data = get_imdb_contrastive_train_file(data_file) # now make list of input examples for i in range(len(raw_data)): tmp = raw_data[i] raw_data[i] = InputExample(texts=[tmp[1], tmp[2]], label=int(tmp[0])) return raw_data def get_imdb_contrastive_train_file(data_file: str) -> List: raw_data = [] with open(data_file, 'r') as f: csv_reader = csv.reader(f) for row in csv_reader: if row[0] == "same": # skip header continue raw_data.append([row[0], row[1], row[2]]) return raw_data def get_training_for_triplet_loss_imdb(data_file: str) -> List[InputExample]: raw_data = get_imdb_train_or_test(data_file) # make list of InputExamples, return data = [] for i in raw_data: data.append(i) # InputExample(texts=[i[1]], label=int(i[0]))) return data def transform_data_classification_to_siamese(raw_data: List[Tuple[str, str]], dataset_size: int, output_type: str = 'InputExample') \ -> List[Union[Tuple[int, str], InputExample]]: data = [] for i in range(int(len(raw_data) / 2)): # get next two datapoints, and determine label based off of them being the same author or not dp1 = raw_data[2 * i] dp2 = raw_data[2 * i + 1] label = float(1) if dp1[0] == dp2[0] else float(0) # wrap data in proper output class if output_type == 'InputExample': data.append(InputExample(texts=[dp1[1], dp2[1]], label=label)) elif output_type == 'str': data.append((dp1[1], dp2[1], label)) else: assert False, 'transform_data_classification_to_siamese output type not known' # now randomly sample to increase dataset size - there are 2^62,000 combinations, so just randomly sample for i in range(dataset_size - len(data)): dp1 = random.choice(raw_data) dp2 = random.choice(raw_data) # make sure points aren't the same while (dp1[0] == dp2[0] and dp1[1] == dp2[1]): dp1 = random.choice(raw_data) dp2 = random.choice(raw_data) # add to the dataset label = float(1) if dp1[0] == dp2[0] else float(0) # wrap data in proper output class if output_type == 'InputExample': data.append(InputExample(texts=[dp1[1], dp2[1]], label=label)) elif output_type == 'str': data.append((dp1[1], dp2[1], label)) else: assert False, 'transform_data_classification_to_siamese output type not known' return data def get_imdb_test_for_sbert_evaluator(dataset_path: str, dataset_size: int) -> \ List[Union[Tuple[int, str], InputExample]]: sbert_evaluation_data = transform_data_classification_to_siamese(dataset_path, dataset_size, 'str') return sbert_evaluation_data def get_imdb_train_or_test(dataset_path: str, as_dict: bool = False) -> List[Tuple[str, str]]: data = {} if as_dict else [] with open(dataset_path, 'r') as f: lines = f.readlines() for line in lines: line = line.split('\t') if as_dict: if line[0] in data.keys(): data[line[0]].append(line[1]) else: data[line[0]] = [line[1]] else: data.append((line[0], line[1])) return data def get_imdb_as_dict(dataset_path: str) -> dict: # get the dataset as a dict, with key being author id and value being a list of contents raw_data = {} # transform user_id's as well label_transformer = {} label_count = 0 with open(dataset_path, 'r') as f: lines = f.readlines() for line in lines: line = line.split('\t') # we want to make sure that splitting on tab is the right thing, so check len of resultant object assert len(line) == 6, 'The split line, from the imdb62 dataset, has not given in the right num of ojbects' user_id = line[1] text = line[-1] if user_id not in label_transformer.keys(): label_transformer[user_id] = label_count label_count += 1 # change user_id to incrementing int user_id = str(label_transformer[user_id]) if user_id not in raw_data.keys(): raw_data[user_id] = [text] else: raw_data[user_id].append(text) return raw_data if __name__ == "__main__": # we want to split up the IMDB62 dataset into a train/test split # exactly how this is done differs widely, so we will just go with the classic 80/20 random split. # for each author, use 800 posts for training, and 200 for testing. import argparse # get command line args parser = argparse.ArgumentParser(description='Get args for building train/test splits of IMDB dataset') parser.add_argument('--dataset_path', metavar='dataset_path', type=str, help='Which dataset to use.') parser.add_argument('--dataset_save_path', metavar='dataset_save_path', type=str, help='Where to save new training and testing dataset') parser.add_argument('--what_for', metavar='what_for', type=str, default='triplet', help='set to contrastive to build dataset for contrastive loss, or triplet for triplet loss') parser.add_argument('--chunk', action='store_true', help='set this flag to chunk examples into lengths of 500 words') args = parser.parse_args() print(f'getting original dataset at {args.dataset_path}') # read the csv file, and becasue the dataset isn't very big, keep everything in memory data = get_imdb_as_dict(args.dataset_path) print('splitting dataset') # now split this into a train and test dict (for simplicity, just take first 800 training, last 200 test training_data, test_data = {}, {} for k, v in data.items(): training_data[k] = v[:800] test_data[k] = v[800:] training_data_nc = copy.deepcopy(training_data) # now chunk training if necessary if args.chunk: print('before chunk:') num_texts = sum([len(x) for x in training_data.values()]) num_auths = sum([1 for x in training_data.keys()]) all_texts, total_len = [], 0 for texts in training_data.values(): for text in texts: total_len += len(text) avg_txt_len = total_len / num_texts print(f'\tnum_texts: {num_texts}') print(f'\tnum_auths: {num_auths}') print(f'\ttext_len: {total_len}') print(f'\tavg_len: {avg_txt_len}') for auth, texts in training_data.items(): new_texts = [] for text in texts: split_text = text.split(' ') if len(split_text) > 500: for start_idx in range(0, len(split_text), 500): end_idx = start_idx + 500 if start_idx + 500 < len(split_text) else len(split_text) - 1 new_texts.append(' '.join(split_text[start_idx: end_idx])) else: new_texts.append(text) training_data[auth] = new_texts print('after chunk:') num_texts = sum([len(x) for x in training_data.values()]) num_auths = sum([1 for x in training_data.keys()]) all_texts, total_len = [], 0 for texts in training_data.values(): for text in texts: total_len += len(text) avg_txt_len = total_len / num_texts print(f'\tnum_texts: {num_texts}') print(f'\tnum_auths: {num_auths}') print(f'\ttext_len: {total_len}') print(f'\tavg_len: {avg_txt_len}') # now write this to a new file, imdb62-train.tsv and imdb62-test.tsv training_save_path = os.path.join(args.dataset_save_path, 'imdb62-train.txt') training_nc_save_path = os.path.join(args.dataset_save_path, 'imdb62-train-nc.txt') testing_save_path = os.path.join(args.dataset_save_path, 'imdb62-test.txt') if args.what_for == 'contrastive': # need to make a bunch of pairs somehow? # form these pairs into same, text1, text2 # how do we do this in a reasonable manor? # but we can still use the AaEvaluator to see how it does on a non split train set? # we have a dict, key'd on authors, and values is a list of texts, For this, I'll just create a bunch of tuples and start sampling? till I hit some desired number? # say we will select about .5 to be same author, and .5 to be different author? # but this is not really good in terms of base rate if we evaluate with the normal metrics? # actually randomly sampling should give an approximately 1/62 being same author, so should be good? but hten just predict not same and get pretty high accuracy # however, because this is a closed set identification task, I don't want to change evaluation, so I will just # pick a middle ground between 1/62 and 50%, say 25% being same author pairs, and run with it len_training_data = sum([len(v) for k, v in training_data.items()]) # how many samples do we want? A bit hard to tell, but lets say 4x the dataset size for now num_desired_pairs = 4*len_training_data # 25% being same author pairs num_same_pairs = int(0.25*num_desired_pairs) num_diff_paris = num_desired_pairs - num_same_pairs training_pairs = [] for i in range(num_same_pairs): auth = random.sample(training_data.keys(), 1)[0] # now sample two texts from that auth texts = random.sample(training_data[auth], 2) training_pairs.append([1, texts[0], texts[1]]) # now add different pairs auth1 = auth2 = None for i in range(num_diff_paris): while auth1 == auth2: auth1 = random.sample(training_data.keys(), 1)[0] auth2 = random.sample(training_data.keys(), 1)[0] text1 = random.sample(training_data[auth1], 1)[0] text2 = random.sample(training_data[auth2], 1)[0] training_pairs.append([0, text1, text2]) # now write this training file training_save_path = os.path.join(args.dataset_save_path, 'imdb62-contrastive-train.txt') testing_save_path = os.path.join(args.dataset_save_path, 'imdb62-contrastive-test.txt') print(f'saving new contrastive training dataset at {training_save_path}') with open(training_save_path, 'w') as f: writer = csv.writer(f) # write header writer.writerow(['same', 'text1', 'text2']) for dp in training_pairs: writer.writerow(dp) # do the same for testing len_test_data = sum([len(v) for k, v in test_data.items()]) # how many samples do we want? A bit hard to tell, but lets say 4x the dataset size for now num_desired_pairs = 4 * len_test_data # 25% being same author pairs num_same_pairs = int(0.25 * num_desired_pairs) num_diff_paris = num_desired_pairs - num_same_pairs testing_pairs = [] for i in range(num_same_pairs): auth = random.sample(test_data.keys(), 1)[0] # now sample two texts from that auth texts = random.sample(test_data[auth], 2) testing_pairs.append([1, texts[0], texts[1]]) # now add different pairs auth1 = auth2 = None for i in range(num_diff_paris): while auth1 == auth2: auth1 = random.sample(test_data.keys(), 1)[0] auth2 = random.sample(test_data.keys(), 1)[0] text1 = random.sample(test_data[auth1], 1)[0] text2 = random.sample(test_data[auth2], 1)[0] testing_pairs.append([0, text1, text2]) print(f'saving new contrastive test dataset at {testing_save_path}') with open(testing_save_path, 'w') as f: writer = csv.writer(f) # write header writer.writerow(['same', 'text1', 'text2']) for dp in testing_pairs: writer.writerow(dp) else: training_rows_written = 0 print(f'saving new training dataset at {training_save_path}') with open(training_save_path, 'w') as f: for k, v in training_data.items(): for content in v: f.write(k + '\t' + content) training_rows_written += 1 print(f'saving new training dataset at {training_nc_save_path}') with open(training_nc_save_path, 'w') as f: for k, v in training_data_nc.items(): for content in v: f.write(k + '\t' + content) # this no longer holds (i.e. chunking) # assert training_rows_written == 49_600, f'There were {training_rows_written} rows written, should have been 49,600.' testing_rows_written = 0 print(f'saving new testing dataset at {testing_save_path}') with open(testing_save_path, 'w') as f: for k, v in test_data.items(): for content in v: f.write(k + '\t' + content) testing_rows_written += 1 assert testing_rows_written == 12_400, f'There were {testing_rows_written} rows written, should have been 12,400.'
[ "numpy.random.uniform", "copy.deepcopy", "csv.reader", "argparse.ArgumentParser", "csv.writer", "random.sample", "sentence_transformers.InputExample", "random.choice", "os.path.join" ]
[((7360, 7459), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Get args for building train/test splits of IMDB dataset"""'}), "(description=\n 'Get args for building train/test splits of IMDB dataset')\n", (7383, 7459), False, 'import argparse\n'), ((8634, 8662), 'copy.deepcopy', 'copy.deepcopy', (['training_data'], {}), '(training_data)\n', (8647, 8662), False, 'import copy\n'), ((10435, 10491), 'os.path.join', 'os.path.join', (['args.dataset_save_path', '"""imdb62-train.txt"""'], {}), "(args.dataset_save_path, 'imdb62-train.txt')\n", (10447, 10491), False, 'import random, os\n'), ((10520, 10579), 'os.path.join', 'os.path.join', (['args.dataset_save_path', '"""imdb62-train-nc.txt"""'], {}), "(args.dataset_save_path, 'imdb62-train-nc.txt')\n", (10532, 10579), False, 'import random, os\n'), ((10604, 10659), 'os.path.join', 'os.path.join', (['args.dataset_save_path', '"""imdb62-test.txt"""'], {}), "(args.dataset_save_path, 'imdb62-test.txt')\n", (10616, 10659), False, 'import random, os\n'), ((1887, 1918), 'random.choice', 'random.choice', (['self.data[auth1]'], {}), '(self.data[auth1])\n', (1900, 1918), False, 'import random, os\n'), ((2334, 2381), 'sentence_transformers.InputExample', 'InputExample', ([], {'texts': '[text1, text2]', 'label': 'label'}), '(texts=[text1, text2], label=label)\n', (2346, 2381), False, 'from sentence_transformers import InputExample\n'), ((2861, 2874), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (2871, 2874), False, 'import csv\n'), ((4407, 4430), 'random.choice', 'random.choice', (['raw_data'], {}), '(raw_data)\n', (4420, 4430), False, 'import random, os\n'), ((4445, 4468), 'random.choice', 'random.choice', (['raw_data'], {}), '(raw_data)\n', (4458, 4468), False, 'import random, os\n'), ((12884, 12952), 'os.path.join', 'os.path.join', (['args.dataset_save_path', '"""imdb62-contrastive-train.txt"""'], {}), "(args.dataset_save_path, 'imdb62-contrastive-train.txt')\n", (12896, 12952), False, 'import random, os\n'), ((12981, 13048), 'os.path.join', 'os.path.join', (['args.dataset_save_path', '"""imdb62-contrastive-test.txt"""'], {}), "(args.dataset_save_path, 'imdb62-contrastive-test.txt')\n", (12993, 13048), False, 'import random, os\n'), ((1930, 1949), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1947, 1949), True, 'import numpy as np\n'), ((2265, 2296), 'random.choice', 'random.choice', (['self.data[auth2]'], {}), '(self.data[auth2])\n', (2278, 2296), False, 'import random, os\n'), ((4586, 4609), 'random.choice', 'random.choice', (['raw_data'], {}), '(raw_data)\n', (4599, 4609), False, 'import random, os\n'), ((4628, 4651), 'random.choice', 'random.choice', (['raw_data'], {}), '(raw_data)\n', (4641, 4651), False, 'import random, os\n'), ((12271, 12308), 'random.sample', 'random.sample', (['training_data[auth]', '(2)'], {}), '(training_data[auth], 2)\n', (12284, 12308), False, 'import random, os\n'), ((13203, 13216), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (13213, 13216), False, 'import csv\n'), ((13972, 14005), 'random.sample', 'random.sample', (['test_data[auth]', '(2)'], {}), '(test_data[auth], 2)\n', (13985, 14005), False, 'import random, os\n'), ((14642, 14655), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (14652, 14655), False, 'import csv\n'), ((2119, 2150), 'random.choice', 'random.choice', (['self.data[auth1]'], {}), '(self.data[auth1])\n', (2132, 2150), False, 'import random, os\n'), ((3995, 4044), 'sentence_transformers.InputExample', 'InputExample', ([], {'texts': '[dp1[1], dp2[1]]', 'label': 'label'}), '(texts=[dp1[1], dp2[1]], label=label)\n', (4007, 4044), False, 'from sentence_transformers import InputExample\n'), ((4850, 4899), 'sentence_transformers.InputExample', 'InputExample', ([], {'texts': '[dp1[1], dp2[1]]', 'label': 'label'}), '(texts=[dp1[1], dp2[1]], label=label)\n', (4862, 4899), False, 'from sentence_transformers import InputExample\n'), ((12658, 12696), 'random.sample', 'random.sample', (['training_data[auth1]', '(1)'], {}), '(training_data[auth1], 1)\n', (12671, 12696), False, 'import random, os\n'), ((12720, 12758), 'random.sample', 'random.sample', (['training_data[auth2]', '(1)'], {}), '(training_data[auth2], 1)\n', (12733, 12758), False, 'import random, os\n'), ((14346, 14380), 'random.sample', 'random.sample', (['test_data[auth1]', '(1)'], {}), '(test_data[auth1], 1)\n', (14359, 14380), False, 'import random, os\n'), ((14404, 14438), 'random.sample', 'random.sample', (['test_data[auth2]', '(1)'], {}), '(test_data[auth2], 1)\n', (14417, 14438), False, 'import random, os\n')]
from abc import abstractmethod, ABC from py_wake.site._site import Site, LocalWind from py_wake.wind_turbines import WindTurbines import numpy as np from py_wake.flow_map import FlowMap, HorizontalGrid class WindFarmModel(ABC): """Base class for RANS and engineering flow models""" def __init__(self, site, windTurbines): assert isinstance(site, Site) assert isinstance(windTurbines, WindTurbines) self.site = site self.windTurbines = windTurbines def __call__(self, x, y, h=None, type=0, wd=None, ws=None, yaw_ilk=None): # 一个类实例也可以成为类似函数这样能直接调用的对象,只要定义的时候有__call__()方法就可以 """Run the wind farm simulation Parameters ---------- x : array_like Wind turbine x positions y : array_like Wind turbine y positions h : array_like, optional Wind turbine hub heights type : int or array_like, optional Wind turbine type, default is 0 wd : int or array_like Wind direction(s) ws : int, float or array_like Wind speed(s) Returns ------- SimulationResult """ assert len(x) == len(y) # assert(断言)用于判断一个表达式,在表达式条件为false的时候触发异常。 # 断言可以在条件不满足程序运行的情况下直接返回错误,而不必等待程序运行后出现崩溃的情况 type, h, _ = self.windTurbines.get_defaults(len(x), type, h) # return np.asarray(type_i), np.asarray(h_i), np.asarray(d_i) wd, ws = self.site.get_defaults(wd, ws) if len(x) == 0: wd, ws = np.atleast_1d(wd), np.atleast_1d(ws) z = np.zeros((0, len(wd), len(ws))) localWind = LocalWind(z, z, z, z) return SimulationResult(self, localWind=localWind, x_i=x, y_i=y, h_i=h, type_i=type, yaw_ilk=yaw_ilk, wd=wd, ws=ws, WS_eff_ilk=z, TI_eff_ilk=z, power_ilk=z, ct_ilk=z) WS_eff_ilk, TI_eff_ilk, power_ilk, ct_ilk, localWind = self.calc_wt_interaction( x_i=x, y_i=y, h_i=h, type_i=type, yaw_ilk=yaw_ilk, wd=wd, ws=ws) return SimulationResult(self, localWind=localWind, x_i=x, y_i=y, h_i=h, type_i=type, yaw_ilk=yaw_ilk, wd=wd, ws=ws, WS_eff_ilk=WS_eff_ilk, TI_eff_ilk=TI_eff_ilk, power_ilk=power_ilk, ct_ilk=ct_ilk) @abstractmethod def calc_wt_interaction(self, x_i, y_i, h_i=None, type_i=None, yaw_ilk=None, wd=None, ws=None): """Calculate effective wind speed, turbulence intensity, power and thrust coefficient, and local site parameters Typical users should not call this function directly, but by calling the windFarmModel object (invokes the __call__() function above) which returns a nice SimulationResult object Parameters ---------- x_i : array_like X position of wind turbines y_i : array_like Y position of wind turbines h_i : array_like or None, optional Hub height of wind turbines\n If None, default, the standard hub height is used type_i : array_like or None, optional Wind turbine types\n If None, default, the first type is used (type=0) wd : int, float, array_like or None Wind directions(s)\n If None, default, the wake is calculated for site.default_wd ws : int, float, array_like or None Wind speed(s)\n If None, default, the wake is calculated for site.default_ws Returns ------- WS_eff_ilk : array_like Effective wind speeds [m/s] TI_eff_ilk : array_like Turbulence intensities. Should be effective, but not implemented yet power_ilk : array_like Power productions [w] ct_ilk : array_like Thrust coefficients localWind : LocalWind Local free-flow wind """ class SimulationResult(): """Simulation result returned when calling a WindFarmModel object""" def __init__(self, windFarmModel, localWind, x_i, y_i, h_i, type_i, yaw_ilk, wd, ws, WS_eff_ilk, TI_eff_ilk, power_ilk, ct_ilk): self.windFarmModel = windFarmModel self.localWind = localWind self.x_i = x_i self.y_i = y_i self.h_i = h_i self.type_i = type_i self.yaw_ilk = yaw_ilk self.WS_eff_ilk = WS_eff_ilk self.TI_eff_ilk = TI_eff_ilk self.power_ilk = power_ilk self.ct_ilk = ct_ilk self.wd = wd self.ws = ws def aep_ilk(self, normalize_probabilities=False, with_wake_loss=True): """Anual Energy Production of all turbines (i), wind directions (l) and wind speeds (k) in in GWh Parameters ---------- normalize_propabilities : Optional bool, defaults to False In case only a subset of all wind speeds and/or wind directions is simulated, this parameter determines whether the returned AEP represents the energy produced in the fraction of a year where these flow cases occur or a whole year of only these cases. If for example, wd=[0], then - False means that the AEP only includes energy from the faction of year\n with northern wind (359.5-0.5deg), i.e. no power is produced the rest of the year. - True means that the AEP represents a whole year of northen wind. with_wake_loss : Optional bool, defaults to True If True, wake loss is included, i.e. power is calculated using local effective wind speed\n If False, wake loss is neglected, i.e. power is calculated using local free flow wind speed """ P_ilk = self.localWind.P_ilk if normalize_probabilities: P_ilk /= P_ilk.sum() if with_wake_loss: return self.power_ilk * P_ilk * 24 * 365 * 1e-9 else: power_ilk = self.windFarmModel.windTurbines.power(self.localWind.WS_ilk, self.type_i) return power_ilk * P_ilk * 24 * 365 * 1e-9 def aep(self, normalize_probabilities=False, with_wake_loss=True): """Anual Energy Production (sum of all wind turbines, directions and speeds) in GWh. See aep_ilk """ return self.aep_ilk(normalize_probabilities, with_wake_loss).sum() def flow_map(self, grid=None, wd=None, ws=None): """Return a FlowMap object with WS_eff and TI_eff of all grid points Parameters ---------- grid : Grid or tuple(X, Y, x, y, h) Grid, e.g. HorizontalGrid or\n tuple(X, Y, x, y, h) where X, Y is the meshgrid for visualizing data\n and x, y, h are the flattened grid points See Also -------- pywake.wind_farm_models.flow_map.FlowMap """ if grid is None: grid = HorizontalGrid() if isinstance(grid, HorizontalGrid): grid = grid(self.x_i, self.y_i, self.h_i) if wd is None: wd = self.wd else: assert np.all(np.isin(wd, self.wd)), "All wd=%s not in simulation result" % wd if ws is None: ws = self.ws else: assert np.all(np.isin(ws, self.ws)), "All ws=%s not in simulation result (ws=%s)" % (ws, self.ws) wd, ws = np.atleast_1d(wd), np.atleast_1d(ws) l_indices = np.argwhere(wd[:, None] == self.wd)[:, 1] # wd对应函数参数3, self.wd是对应classSimulationResult初始化后的结果 k_indices = np.argwhere(ws[:, None] == self.ws)[:, 1] X, Y, x_j, y_j, h_j = grid # 为了搞清楚中间变量进行了修改 lWD = self.localWind.WD_ilk[:, l_indices][:, :, :] # 源代码lWD = self.localWind.WD_ilk[:, l_indices][:, :, k_indices] lWS = self.localWind.WS_ilk[:, l_indices][:, :, k_indices] lTI = self.localWind.TI_ilk[:, l_indices][:, :, k_indices] WSe = self.WS_eff_ilk[:, l_indices][:, :, k_indices] TIe = self.TI_eff_ilk[:, l_indices][:, :, k_indices] cti = self.ct_ilk[:, l_indices][:, :, k_indices] lw_j, WS_eff_jlk, TI_eff_jlk = self.windFarmModel._flow_map( x_j, y_j, h_j, self.x_i, self.y_i, self.h_i, self.type_i, self.yaw_ilk, lWD, lWS, lTI, WSe, TIe, cti, wd, ws) if self.yaw_ilk is not None: yaw_ilk = self.yaw_ilk[:, l_indices][:, :, k_indices] else: yaw_ilk = None return FlowMap(self, X, Y, lw_j, WS_eff_jlk, TI_eff_jlk, wd, ws, yaw_ilk=yaw_ilk) def main(): if __name__ == '__main__': from py_wake.examples.data.iea37 import IEA37Site, IEA37_WindTurbines from py_wake import IEA37SimpleBastankhahGaussian import matplotlib.pyplot as plt site = IEA37Site(16) x, y = site.initial_position.T windTurbines = IEA37_WindTurbines() # NOJ wake model wind_farm_model = IEA37SimpleBastankhahGaussian(site, windTurbines) simulation_result = wind_farm_model(x, y) fm = simulation_result.flow_map(wd=30) fm.plot_wake_map() plt.figure() fm.plot(fm.power_xylk()[:, :, 0, 0] * 1e-3, "Power [kW]") fm = simulation_result.flow_map(grid=HorizontalGrid(resolution=50)) plt.figure() fm.plot(fm.aep_xy(), "AEP [GWh]") plt.show() main()
[ "numpy.isin", "numpy.atleast_1d", "matplotlib.pyplot.show", "py_wake.flow_map.HorizontalGrid", "py_wake.flow_map.FlowMap", "numpy.argwhere", "matplotlib.pyplot.figure", "py_wake.site._site.LocalWind", "py_wake.IEA37SimpleBastankhahGaussian", "py_wake.examples.data.iea37.IEA37Site", "py_wake.examples.data.iea37.IEA37_WindTurbines" ]
[((8723, 8797), 'py_wake.flow_map.FlowMap', 'FlowMap', (['self', 'X', 'Y', 'lw_j', 'WS_eff_jlk', 'TI_eff_jlk', 'wd', 'ws'], {'yaw_ilk': 'yaw_ilk'}), '(self, X, Y, lw_j, WS_eff_jlk, TI_eff_jlk, wd, ws, yaw_ilk=yaw_ilk)\n', (8730, 8797), False, 'from py_wake.flow_map import FlowMap, HorizontalGrid\n'), ((9059, 9072), 'py_wake.examples.data.iea37.IEA37Site', 'IEA37Site', (['(16)'], {}), '(16)\n', (9068, 9072), False, 'from py_wake.examples.data.iea37 import IEA37Site, IEA37_WindTurbines\n'), ((9135, 9155), 'py_wake.examples.data.iea37.IEA37_WindTurbines', 'IEA37_WindTurbines', ([], {}), '()\n', (9153, 9155), False, 'from py_wake.examples.data.iea37 import IEA37Site, IEA37_WindTurbines\n'), ((9208, 9257), 'py_wake.IEA37SimpleBastankhahGaussian', 'IEA37SimpleBastankhahGaussian', (['site', 'windTurbines'], {}), '(site, windTurbines)\n', (9237, 9257), False, 'from py_wake import IEA37SimpleBastankhahGaussian\n'), ((9391, 9403), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9401, 9403), True, 'import matplotlib.pyplot as plt\n'), ((9555, 9567), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9565, 9567), True, 'import matplotlib.pyplot as plt\n'), ((9618, 9628), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9626, 9628), True, 'import matplotlib.pyplot as plt\n'), ((1658, 1679), 'py_wake.site._site.LocalWind', 'LocalWind', (['z', 'z', 'z', 'z'], {}), '(z, z, z, z)\n', (1667, 1679), False, 'from py_wake.site._site import Site, LocalWind\n'), ((7092, 7108), 'py_wake.flow_map.HorizontalGrid', 'HorizontalGrid', ([], {}), '()\n', (7106, 7108), False, 'from py_wake.flow_map import FlowMap, HorizontalGrid\n'), ((7550, 7567), 'numpy.atleast_1d', 'np.atleast_1d', (['wd'], {}), '(wd)\n', (7563, 7567), True, 'import numpy as np\n'), ((7569, 7586), 'numpy.atleast_1d', 'np.atleast_1d', (['ws'], {}), '(ws)\n', (7582, 7586), True, 'import numpy as np\n'), ((7607, 7642), 'numpy.argwhere', 'np.argwhere', (['(wd[:, None] == self.wd)'], {}), '(wd[:, None] == self.wd)\n', (7618, 7642), True, 'import numpy as np\n'), ((7729, 7764), 'numpy.argwhere', 'np.argwhere', (['(ws[:, None] == self.ws)'], {}), '(ws[:, None] == self.ws)\n', (7740, 7764), True, 'import numpy as np\n'), ((1549, 1566), 'numpy.atleast_1d', 'np.atleast_1d', (['wd'], {}), '(wd)\n', (1562, 1566), True, 'import numpy as np\n'), ((1568, 1585), 'numpy.atleast_1d', 'np.atleast_1d', (['ws'], {}), '(ws)\n', (1581, 1585), True, 'import numpy as np\n'), ((7296, 7316), 'numpy.isin', 'np.isin', (['wd', 'self.wd'], {}), '(wd, self.wd)\n', (7303, 7316), True, 'import numpy as np\n'), ((7449, 7469), 'numpy.isin', 'np.isin', (['ws', 'self.ws'], {}), '(ws, self.ws)\n', (7456, 7469), True, 'import numpy as np\n'), ((9516, 9545), 'py_wake.flow_map.HorizontalGrid', 'HorizontalGrid', ([], {'resolution': '(50)'}), '(resolution=50)\n', (9530, 9545), False, 'from py_wake.flow_map import FlowMap, HorizontalGrid\n')]
import pandas as pd import numpy as np import scipy import os, sys import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import pylab import matplotlib as mpl import seaborn as sns import analysis_utils from multiprocessing import Pool sys.path.append('../utils/') from game_utils import * in_dir = '../../' out_dir = '../../plots/' #out_dir = '../../figures/' # data_names = ['Behavioral Data', 'Goal Inference Model'] # data_dirs = ['new-processed-processed-1en01','goal-inference-simulations-processed-1en01'] # nominal_dirs = ['new-synthetic-processed-1en01','synthetic-goal-inference-simulations-processed-1en01'] # matched_dirs = ['new-synthetic-score-matched-processed-1en01','synthetic-score-matched-goal-inference-simulations-processed-1en01'] # subset = '1en01' # start = 1440 # groups = ['High Scoring','Low Scoring',''] # behaviors = ['Skilled',''] # score_cutoff = 0.75 # matched = [True, False] data_names = ['Goal Inference Noise Model'] data_dirs = ['parset-simulations-processed-1en01'] nominal_dirs = ['synthetic-parset-simulations-processed-1en01'] matched_dirs = ['synthetic-score-matched-parset-simulations-processed-1en01'] subset = '1en01' # data_names = ['Goal Inference Attention Model'] # data_dirs = ['goal-inference-attention-simulations-processed-1en01'] # nominal_dirs = ['synthetic-goal-inference-attention-simulations-processed-1en01'] # matched_dirs = ['synthetic-score-matched-goal-inference-attention-simulations-processed-1en01'] # subset = '1en01' # data_names = ['Social Heuristic'] # data_dirs = ['social-heuristic-simulations-processed-1en01'] # nominal_dirs = ['synthetic-social-heuristic-simulations-processed-1en01'] # matched_dirs = ['synthetic-score-matched-social-heuristic-simulations-processed-1en01'] # subset = '1en01' # data_names = ['Unconditional Social Heuristic'] # data_dirs = ['unconditional-social-heuristic-simulations-simulations-processed-1en01'] # nominal_dirs = ['synthetic-unconditional-social-heuristic-simulations-simulations-processed-1en01'] # matched_dirs = ['synthetic-score-matched-unconditional-social-heuristic-simulations-simulations-processed-1en01'] # subset = '1en01' start = 1440 groups = [''] behaviors = [''] score_cutoff = 0.75 matched = [True, False] def score(sub): return np.mean(sub['bg_val']) def speed(sub): return sum(sub['velocity'] > 3) > 0 def spinning(sub): return sum(sub['spinning']) > 0 def dist_to_mean_others(sub): return np.mean(sub['dist_to_mean_others']) def face_towards_after_away(sub): ignore_state = lambda sub, i: sub.iloc[i]['spinning'] this_state = lambda sub, i: sub.iloc[i]['ave_dist_others'] < sub.iloc[i]['dist_to_mean_others'] next_state = lambda sub, i: sub.iloc[i]['facing'] return analysis_utils.get_value(sub, ignore_state, this_state, next_state) def face_away_when_low(sub): start_index = 1 initial_condition = lambda sub, i: (sub.iloc[i]['ave_dist_others'] > sub.iloc[i]['dist_to_mean_others']) and sub.iloc[i]['bg_val'] < 1.0 while_condition = lambda sub, i: sub.iloc[i]['bg_val'] < 1.0 final_condition = lambda sub, i: (sub.iloc[i]['ave_dist_others'] < sub.iloc[i]['dist_to_mean_others']) return analysis_utils.get_while_value(sub, initial_condition, while_condition, final_condition, start_index) def facing_spinning(sub): start_index = 1 initial_condition = lambda sub, i: ~sub.iloc[i-1]['spinning'] and ~sub.iloc[i-1]['other_spinning'] and ~sub.iloc[i]['spinning'] and sub.iloc[i]['other_spinning'] while_condition = lambda sub, i: ~sub.iloc[i-1]['facing_spinning'] final_condition = lambda sub, i: sub.iloc[i]['facing_spinning'] return analysis_utils.get_while_value(sub, initial_condition, while_condition, final_condition, start_index) function_names = ['Score','Speed','Spinning','Distance to Mean of Other Positions','Average Time Before Facing Distant Group', 'Average Time Before Facing Away From Group After Low Score', 'Average Time Before Facing Spinning Players'] functions = [score, speed, spinning, dist_to_mean_others, face_towards_after_away, face_away_when_low, facing_spinning] compares = [False, False, False, True, True, True, True] # function_names = ['Distance to Mean of Other Positions', 'Average Time Before Facing Away From Group After Low Score', 'Average Time Before Facing Spinning Players'] # functions = [dist_to_mean_others, face_away_when_low, facing_spinning] # compares = [True, True, True] def plot_synthetic(args): data_ind, func_ind, group, behavior, match = args data_dir = in_dir + data_dirs[data_ind] function = functions[func_ind] if match: synthetic_dir = in_dir + matched_dirs[data_ind] else: synthetic_dir = in_dir + nominal_dirs[data_ind] games = [] ns = [] values = [] scores = [] sources = [] lengths = [] for t,game in enumerate(os.listdir(data_dir)): if game[-4:] != '.csv': continue if game.split('_')[-2].split('-')[1] != subset: continue data = pd.io.parsers.read_csv(data_dir + '/' + game) syn_data = pd.io.parsers.read_csv(synthetic_dir + '/' + game) if compares[func_ind]: if match: dfs = ['Interacting Groups','Matched Nominal Groups'] else: dfs = ['Interacting Groups','Random Nominal Groups'] else: dfs = ['Interacting Groups'] for df in dfs: if df == 'Interacting Groups': players = list(set(data[data['tick'] == start]['pid'].dropna())) else: players = list(set(syn_data[syn_data['tick'] == start]['pid'].dropna())) n = len(players) # if n == 6: # n = 5 for i,p in enumerate(players): if df == 'Interacting Groups': sub = data[data['pid'] == p] else: sub = syn_data[syn_data['pid'] == p] ignore = False if len(sub) < start: ignore = True sub = sub.iloc[start:].copy() if behavior == 'Skilled': if np.mean(sub['spinning']) == 0 or np.mean(sub['velocity']>3) == 0: ignore = True if group == 'High Scoring': if np.mean(sub['bg_val']) < score_cutoff: ignore = True if group == 'Low Scoring': if np.mean(sub['bg_val']) >= score_cutoff: ignore = True if ignore: values += [np.nan] else: val = function(sub) values += [val] games += [game] ns += [n] scores += [np.mean(sub['bg_val'])] sources += [df] lengths += [len(sub)] data = pd.DataFrame({'Game':games,'Score':scores,'Number of Players':ns,function_names[func_ind]:values,'Source':sources,'Lengths':lengths}) sns.set(font = 'serif', context = 'paper', style = 'white') sns.despine() try: g = sns.factorplot('Number of Players', function_names[func_ind], markers = ['o', 's'], linestyles = ['-','--'], data = data, kind='point', dodge = 0.15, order = sorted(set(data['Number of Players'])), hue = 'Source', legend = False) except: import pdb; pdb.set_trace() title = data_names[data_ind] + ', Noise Level: ' + subset filename = subset + '-' + function_names[func_ind].replace(" ", "") if group != '' or behavior != '': title += '\n' + group + ' ' + behavior + ' Players' if group != '': filename += '-' + group.replace(" ", "") if behavior != '': filename += '-' + behavior.replace(" ", "") if compares[func_ind]: filename += '-Matched' if match else '' filename += '-' + data_names[data_ind].replace(" ", "") #plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.125), ncol = 2) plt.legend(loc='lower center', ncol = 2) plt.subplots_adjust(top=0.85) g.fig.suptitle(title, size = 10) fig = plt.gcf() #fig.set_size_inches(5, 5) try: fig.savefig(out_dir + filename + '.pdf') except: import pdb; pdb.set_trace() plt.close(fig) pars = [] for i in range(len(data_dirs)): for j in range(len(functions)): for g in groups: for b in behaviors: if compares[j]: for m in matched: pars += [(i, j, g, b, m)] else: pars += [(i, j, g, b, False)] print(len(pars)) p = Pool(8) p.map(plot_synthetic, pars)
[ "sys.path.append", "pandas.DataFrame", "pandas.io.parsers.read_csv", "analysis_utils.get_while_value", "matplotlib.pyplot.close", "matplotlib.pyplot.legend", "seaborn.despine", "analysis_utils.get_value", "matplotlib.use", "numpy.mean", "pdb.set_trace", "multiprocessing.Pool", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.gcf", "seaborn.set", "os.listdir" ]
[((88, 109), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (102, 109), False, 'import matplotlib\n'), ((261, 289), 'sys.path.append', 'sys.path.append', (['"""../utils/"""'], {}), "('../utils/')\n", (276, 289), False, 'import os, sys\n'), ((9004, 9011), 'multiprocessing.Pool', 'Pool', (['(8)'], {}), '(8)\n', (9008, 9011), False, 'from multiprocessing import Pool\n'), ((2297, 2319), 'numpy.mean', 'np.mean', (["sub['bg_val']"], {}), "(sub['bg_val'])\n", (2304, 2319), True, 'import numpy as np\n'), ((2475, 2510), 'numpy.mean', 'np.mean', (["sub['dist_to_mean_others']"], {}), "(sub['dist_to_mean_others'])\n", (2482, 2510), True, 'import numpy as np\n'), ((2769, 2836), 'analysis_utils.get_value', 'analysis_utils.get_value', (['sub', 'ignore_state', 'this_state', 'next_state'], {}), '(sub, ignore_state, this_state, next_state)\n', (2793, 2836), False, 'import analysis_utils\n'), ((3211, 3316), 'analysis_utils.get_while_value', 'analysis_utils.get_while_value', (['sub', 'initial_condition', 'while_condition', 'final_condition', 'start_index'], {}), '(sub, initial_condition, while_condition,\n final_condition, start_index)\n', (3241, 3316), False, 'import analysis_utils\n'), ((3676, 3781), 'analysis_utils.get_while_value', 'analysis_utils.get_while_value', (['sub', 'initial_condition', 'while_condition', 'final_condition', 'start_index'], {}), '(sub, initial_condition, while_condition,\n final_condition, start_index)\n', (3706, 3781), False, 'import analysis_utils\n'), ((7156, 7304), 'pandas.DataFrame', 'pd.DataFrame', (["{'Game': games, 'Score': scores, 'Number of Players': ns, function_names[\n func_ind]: values, 'Source': sources, 'Lengths': lengths}"], {}), "({'Game': games, 'Score': scores, 'Number of Players': ns,\n function_names[func_ind]: values, 'Source': sources, 'Lengths': lengths})\n", (7168, 7304), True, 'import pandas as pd\n'), ((7299, 7352), 'seaborn.set', 'sns.set', ([], {'font': '"""serif"""', 'context': '"""paper"""', 'style': '"""white"""'}), "(font='serif', context='paper', style='white')\n", (7306, 7352), True, 'import seaborn as sns\n'), ((7363, 7376), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (7374, 7376), True, 'import seaborn as sns\n'), ((8304, 8342), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower center"""', 'ncol': '(2)'}), "(loc='lower center', ncol=2)\n", (8314, 8342), True, 'import matplotlib.pyplot as plt\n'), ((8349, 8378), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.85)'}), '(top=0.85)\n', (8368, 8378), True, 'import matplotlib.pyplot as plt\n'), ((8431, 8440), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (8438, 8440), True, 'import matplotlib.pyplot as plt\n'), ((8592, 8606), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (8601, 8606), True, 'import matplotlib.pyplot as plt\n'), ((4909, 4929), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (4919, 4929), False, 'import os, sys\n'), ((5107, 5152), 'pandas.io.parsers.read_csv', 'pd.io.parsers.read_csv', (["(data_dir + '/' + game)"], {}), "(data_dir + '/' + game)\n", (5129, 5152), True, 'import pandas as pd\n'), ((5172, 5222), 'pandas.io.parsers.read_csv', 'pd.io.parsers.read_csv', (["(synthetic_dir + '/' + game)"], {}), "(synthetic_dir + '/' + game)\n", (5194, 5222), True, 'import pandas as pd\n'), ((7661, 7676), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (7674, 7676), False, 'import pdb\n'), ((8567, 8582), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (8580, 8582), False, 'import pdb\n'), ((7038, 7060), 'numpy.mean', 'np.mean', (["sub['bg_val']"], {}), "(sub['bg_val'])\n", (7045, 7060), True, 'import numpy as np\n'), ((6550, 6572), 'numpy.mean', 'np.mean', (["sub['bg_val']"], {}), "(sub['bg_val'])\n", (6557, 6572), True, 'import numpy as np\n'), ((6693, 6715), 'numpy.mean', 'np.mean', (["sub['bg_val']"], {}), "(sub['bg_val'])\n", (6700, 6715), True, 'import numpy as np\n'), ((6362, 6386), 'numpy.mean', 'np.mean', (["sub['spinning']"], {}), "(sub['spinning'])\n", (6369, 6386), True, 'import numpy as np\n'), ((6395, 6423), 'numpy.mean', 'np.mean', (["(sub['velocity'] > 3)"], {}), "(sub['velocity'] > 3)\n", (6402, 6423), True, 'import numpy as np\n')]
import pickle from matplotlib import pyplot as plt from mpl_toolkits.mplot3d import Axes3D import numpy as np import cassie import time from tempfile import TemporaryFile FILE_PATH = "./hardware_logs/aslip_unified_no_delta_80_TS_only_sim/" FILE_NAME = "2020-01-27_10:26_logfinal" logs = pickle.load(open(FILE_PATH + FILE_NAME + ".pkl", "rb")) #load in file with cassie data # data = {"time": time_log, "output": output_log, "input": input_log, "state": state_log, "target_torques": target_torques_log,\ # "target_foot_residual": target_foot_residual_log} time = logs["time"] states_rl = np.array(logs["input"]) states = logs["state"] nn_output = logs["output"] trajectory_steps = logs["trajectory"] speeds = logs["speed"] numStates = len(states) pelvis = np.zeros((numStates, 3)) foot_pos_left = np.zeros((numStates, 6)) foot_pos_right = np.zeros((numStates, 6)) # trajectory_log = np.zeros((numStates, 10)) j=0 for s in states: pelvis[j, :] = s.pelvis.translationalVelocity[:] foot_pos_left[j, :] = np.reshape(np.asarray([s.leftFoot.position[:],s.leftFoot.position[:]]), (6)) foot_pos_right[j, :] = np.reshape(np.asarray([s.rightFoot.position[:],s.rightFoot.position[:]]), (6)) j += 1 # Save stuff for later SAVE_NAME = FILE_PATH + FILE_NAME + '.npz' # np.savez(SAVE_NAME, time = time, motor = motors, joint = joints, torques_measured=torques_mea, left_foot_force = ff_left, right_foot_force = ff_right, left_foot_pos = foot_pos_left, right_foot_pos = foot_pos_right, trajectory = trajectory_log) np.savez(SAVE_NAME, time = time, pelvis=pelvis, left_foot_pos = foot_pos_left, right_foot_pos = foot_pos_right) ########################################## # Plot everything (except for ref traj) ########################################## ax1 = plt.subplot(1,1,1) ax1.plot(time[:], speeds[:], label='speed command') ax1.plot(time[:], states_rl[:,61], label='ROM COM x velocity') ax1.plot(time[:], pelvis[:,0], label='pelvis x velocity') ax1.set_xlabel('Time') ax1.set_ylabel('m/s') ax1.legend(loc='upper right') ax1.set_title('Varying Vels') plt.show()
[ "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "numpy.asarray", "numpy.zeros", "numpy.array", "numpy.savez" ]
[((593, 616), 'numpy.array', 'np.array', (["logs['input']"], {}), "(logs['input'])\n", (601, 616), True, 'import numpy as np\n'), ((767, 791), 'numpy.zeros', 'np.zeros', (['(numStates, 3)'], {}), '((numStates, 3))\n', (775, 791), True, 'import numpy as np\n'), ((808, 832), 'numpy.zeros', 'np.zeros', (['(numStates, 6)'], {}), '((numStates, 6))\n', (816, 832), True, 'import numpy as np\n'), ((850, 874), 'numpy.zeros', 'np.zeros', (['(numStates, 6)'], {}), '((numStates, 6))\n', (858, 874), True, 'import numpy as np\n'), ((1533, 1642), 'numpy.savez', 'np.savez', (['SAVE_NAME'], {'time': 'time', 'pelvis': 'pelvis', 'left_foot_pos': 'foot_pos_left', 'right_foot_pos': 'foot_pos_right'}), '(SAVE_NAME, time=time, pelvis=pelvis, left_foot_pos=foot_pos_left,\n right_foot_pos=foot_pos_right)\n', (1541, 1642), True, 'import numpy as np\n'), ((1779, 1799), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (1790, 1799), True, 'from matplotlib import pyplot as plt\n'), ((2076, 2086), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2084, 2086), True, 'from matplotlib import pyplot as plt\n'), ((1032, 1092), 'numpy.asarray', 'np.asarray', (['[s.leftFoot.position[:], s.leftFoot.position[:]]'], {}), '([s.leftFoot.position[:], s.leftFoot.position[:]])\n', (1042, 1092), True, 'import numpy as np\n'), ((1136, 1198), 'numpy.asarray', 'np.asarray', (['[s.rightFoot.position[:], s.rightFoot.position[:]]'], {}), '([s.rightFoot.position[:], s.rightFoot.position[:]])\n', (1146, 1198), True, 'import numpy as np\n')]
import numpy as np import os from PIL import Image def convert_to_10class(d): d_mod = np.zeros((len(d), 10), dtype=np.float32) for num, contents in enumerate(d): d_mod[num][int(contents)] = 1.0 # debug # print("d_mod[100] =", d_mod[100]) # print("d_mod[200] =", d_mod[200]) return d_mod def make_1_img(img_batch): # for debug for num, ele in enumerate(img_batch): if num != 0: continue img_tmp = ele img_tmp = np.tile(img_tmp, (1, 1, 3)) * 255 img_tmp = img_tmp.astype(np.uint8) image_PIL = Image.fromarray(img_tmp) image_PIL.save("./out_images_tripleGAN/debug_img_" + ".png") return def make_output_img(image_array, sample_num_h, out_image_dir, epoch): # print("image_array.shape =", image_array.shape) # print("np.max(image_array) = ", np.max(image_array)) # print("np.min(image_array) = ", np.min(image_array)) # print("np.mean(image_array) = ", np.mean(image_array)) wide_image = np.zeros((28 * sample_num_h, 28 * sample_num_h, 1), dtype=np.float32) for h in range(sample_num_h): for w in range(sample_num_h): for h_mnist in range(28): for w_mnist in range(28): value_ = image_array[h * sample_num_h + w][h_mnist][w_mnist][0] if value_ < 0: wide_image[h * 28 + h_mnist][w * 28 + w_mnist][0] = 0.0 elif value_ > 1: wide_image[h * 28 + h_mnist][w * 28 + w_mnist][0] = 1.0 else: wide_image[h * 28 + h_mnist][w * 28 + w_mnist][0] = value_ wide_image = np.tile(wide_image, (1, 1, 3)) * 255 wide_image = wide_image.astype(np.uint8) wide_image_PIL = Image.fromarray(wide_image) wide_image_PIL.save(out_image_dir + "/resultImage_" + str(epoch) + ".png") small_image = (np.tile(image_array[0], (1, 1, 3)) * 255).astype(np.uint8) small_image_PIL = Image.fromarray(small_image) small_image_PIL.save(out_image_dir + "/resultImageSmall_" + str(epoch) + ".png") return
[ "PIL.Image.fromarray", "numpy.zeros", "numpy.tile" ]
[((1018, 1087), 'numpy.zeros', 'np.zeros', (['(28 * sample_num_h, 28 * sample_num_h, 1)'], {'dtype': 'np.float32'}), '((28 * sample_num_h, 28 * sample_num_h, 1), dtype=np.float32)\n', (1026, 1087), True, 'import numpy as np\n'), ((1786, 1813), 'PIL.Image.fromarray', 'Image.fromarray', (['wide_image'], {}), '(wide_image)\n', (1801, 1813), False, 'from PIL import Image\n'), ((1994, 2022), 'PIL.Image.fromarray', 'Image.fromarray', (['small_image'], {}), '(small_image)\n', (2009, 2022), False, 'from PIL import Image\n'), ((585, 609), 'PIL.Image.fromarray', 'Image.fromarray', (['img_tmp'], {}), '(img_tmp)\n', (600, 609), False, 'from PIL import Image\n'), ((1683, 1713), 'numpy.tile', 'np.tile', (['wide_image', '(1, 1, 3)'], {}), '(wide_image, (1, 1, 3))\n', (1690, 1713), True, 'import numpy as np\n'), ((488, 515), 'numpy.tile', 'np.tile', (['img_tmp', '(1, 1, 3)'], {}), '(img_tmp, (1, 1, 3))\n', (495, 515), True, 'import numpy as np\n'), ((1913, 1947), 'numpy.tile', 'np.tile', (['image_array[0]', '(1, 1, 3)'], {}), '(image_array[0], (1, 1, 3))\n', (1920, 1947), True, 'import numpy as np\n')]
import math import random import cv2 import mmcv import numpy as np from mmhuman3d.core.conventions.keypoints_mapping import get_flip_pairs from mmhuman3d.utils.demo_utils import box2cs from ..builder import PIPELINES from .transforms import ( _rotate_smpl_pose, affine_transform, get_affine_transform, ) def get_bbox(bbox_xywh, w, h): """Obtain bbox in xyxy format given bbox in xywh format and applying clipping to ensure bbox is within image bounds. Args: xywh (list): bbox in format (x, y, w, h). w (int): image width h (int): image height Returns: xyxy (numpy.ndarray): Converted bboxes in format (xmin, ymin, xmax, ymax). """ bbox_xywh = bbox_xywh.reshape(1, 4) xmin, ymin, xmax, ymax = bbox_clip_xyxy(bbox_xywh_to_xyxy(bbox_xywh), w, h) bbox = np.array([xmin, ymin, xmax, ymax]) return bbox def heatmap2coord(pred_jts, pred_scores, hm_shape, bbox, output_3d=False, mean_bbox_scale=None): """Retrieve predicted keypoints and scores from heatmap.""" hm_width, hm_height = hm_shape ndims = pred_jts.dim() assert ndims in [2, 3], 'Dimensions of input heatmap should be 2 or 3' if ndims == 2: pred_jts = pred_jts.unsqueeze(0) pred_scores = pred_scores.unsqueeze(0) coords = pred_jts.cpu().numpy() coords = coords.astype(float) pred_scores = pred_scores.cpu().numpy() pred_scores = pred_scores.astype(float) coords[:, :, 0] = (coords[:, :, 0] + 0.5) * hm_width coords[:, :, 1] = (coords[:, :, 1] + 0.5) * hm_height preds = np.zeros_like(coords) # transform bbox to scale xmin, ymin, xmax, ymax = bbox w = xmax - xmin h = ymax - ymin center = np.array([xmin + w * 0.5, ymin + h * 0.5]) scale = np.array([w, h]) # Transform back for i in range(coords.shape[0]): for j in range(coords.shape[1]): preds[i, j, 0:2] = transform_preds(coords[i, j, 0:2], center, scale, [hm_width, hm_height]) if output_3d: if mean_bbox_scale is not None: zscale = scale[0] / mean_bbox_scale preds[i, j, 2] = coords[i, j, 2] / zscale else: preds[i, j, 2] = coords[i, j, 2] # maxvals = np.ones((*preds.shape[:2], 1), dtype=float) # score_mul = 1 if norm_name == 'sigmoid' else 5 return preds, pred_scores def transform_preds(coords, center, scale, output_size): """Transform heatmap coordinates to image coordinates.""" target_coords = np.zeros(coords.shape) trans = get_affine_transform( center, scale, 0, output_size, inv=1, pixel_std=1) target_coords[0:2] = affine_transform(coords[0:2], trans) return target_coords def bbox_xywh_to_xyxy(xywh): """Convert bounding boxes from format (x, y, w, h) to (xmin, ymin, xmax, ymax) Args: xywh (list, tuple or numpy.ndarray): bbox in format (x, y, w, h). If numpy.ndarray is provided, we expect multiple bounding boxes with shape `(N, 4)`. Returns: xyxy (tuple or numpy.ndarray): Converted bboxes in format (xmin, ymin, xmax, ymax). Return numpy.ndarray if input is in the same format. """ if isinstance(xywh, (tuple, list)): if not len(xywh) == 4: raise IndexError( 'Bounding boxes must have 4 elements, given {}'.format( len(xywh))) w, h = np.maximum(xywh[2] - 1, 0), np.maximum(xywh[3] - 1, 0) return (xywh[0], xywh[1], xywh[0] + w, xywh[1] + h) elif isinstance(xywh, np.ndarray): if not xywh.size % 4 == 0: raise IndexError( 'Bounding boxes must have n * 4 elements, given {}'.format( xywh.shape)) xyxy = np.hstack( (xywh[:, :2], xywh[:, :2] + np.maximum(0, xywh[:, 2:4] - 1))) return xyxy else: raise TypeError( 'Expect input xywh a list, tuple or numpy.ndarray, given {}'. format(type(xywh))) def bbox_clip_xyxy(xyxy, width, height): """Clip bounding box with format (xmin, ymin, xmax, ymax) to `(0, 0, width, height)`. Args: xyxy (list, tuple or numpy.ndarray): bbox in format (xmin, ymin, xmax, ymax). If numpy.ndarray is provided, we expect multiple bounding boxes with shape `(N, 4)`. width (int or float): Boundary width. height (int or float): Boundary height. Returns: xyxy (list, tuple or numpy.ndarray): clipped bbox in format (xmin, ymin, xmax, ymax) and input type """ if isinstance(xyxy, (tuple, list)): if not len(xyxy) == 4: raise IndexError( 'Bounding boxes must have 4 elements, given {}'.format( len(xyxy))) x1 = np.minimum(width - 1, np.maximum(0, xyxy[0])) y1 = np.minimum(height - 1, np.maximum(0, xyxy[1])) x2 = np.minimum(width - 1, np.maximum(0, xyxy[2])) y2 = np.minimum(height - 1, np.maximum(0, xyxy[3])) return (x1, y1, x2, y2) elif isinstance(xyxy, np.ndarray): if not xyxy.size % 4 == 0: raise IndexError( 'Bounding boxes must have n * 4 elements, given {}'.format( xyxy.shape)) x1 = np.minimum(width - 1, np.maximum(0, xyxy[:, 0])) y1 = np.minimum(height - 1, np.maximum(0, xyxy[:, 1])) x2 = np.minimum(width - 1, np.maximum(0, xyxy[:, 2])) y2 = np.minimum(height - 1, np.maximum(0, xyxy[:, 3])) return np.hstack((x1, y1, x2, y2)) else: raise TypeError( 'Expect input xywh a list, tuple or numpy.ndarray, given {}'. format(type(xyxy))) def cam2pixel(cam_coord, f, c): """Convert coordinates from camera to image frame given f and c Args: cam_coord (np.ndarray): Coordinates in camera frame f (list): focal length, fx, fy c (list): principal point offset, x0, y0 Returns: img_coord (np.ndarray): Coordinates in image frame """ x = cam_coord[:, 0] / (cam_coord[:, 2] + 1e-8) * f[0] + c[0] y = cam_coord[:, 1] / (cam_coord[:, 2] + 1e-8) * f[1] + c[1] z = cam_coord[:, 2] img_coord = np.concatenate((x[:, None], y[:, None], z[:, None]), 1) return img_coord def get_intrinsic_matrix(f, c, inv=False): """Get intrisic matrix (or its inverse) given f and c. Args: f (list): focal length, fx, fy c (list): principal point offset, x0, y0 inv (bool): Store True to get inverse. Default: False. Returns: intrinsic matrix (np.ndarray): 3x3 intrinsic matrix or its inverse """ intrinsic_metrix = np.zeros((3, 3)).astype(np.float32) intrinsic_metrix[0, 0] = f[0] intrinsic_metrix[0, 2] = c[0] intrinsic_metrix[1, 1] = f[1] intrinsic_metrix[1, 2] = c[1] intrinsic_metrix[2, 2] = 1 if inv: intrinsic_metrix = np.linalg.inv(intrinsic_metrix).astype(np.float32) return intrinsic_metrix def aa_to_quat_numpy(axis_angle): """Convert rotations given as axis/angle to quaternions. Args: axis_angle: Rotations given as a vector in axis angle form, as a np.ndarray of shape (..., 3), where the magnitude is the angle turned anticlockwise in radians around the vector's direction. Returns: quaternions with real part first, as np.ndarray of shape (..., 4). """ angles = np.linalg.norm(axis_angle, ord=2, axis=-1, keepdims=True) half_angles = 0.5 * angles eps = 1e-6 small_angles = np.abs(angles) < eps sin_half_angles_over_angles = np.empty_like(angles) sin_half_angles_over_angles[~small_angles] = ( np.sin(half_angles[~small_angles]) / angles[~small_angles]) # for x small, sin(x/2) is about x/2 - (x/2)^3/6 # so sin(x/2)/x is about 1/2 - (x*x)/48 sin_half_angles_over_angles[small_angles] = ( 0.5 - (angles[small_angles] * angles[small_angles]) / 48) quaternions = np.concatenate( [np.cos(half_angles), axis_angle * sin_half_angles_over_angles], axis=-1) return quaternions def flip_thetas(thetas, theta_pairs): """Flip thetas. Args: thetas (np.ndarray): joints in shape (num_thetas, 3) theta_pairs (list): flip pairs for thetas Returns: thetas_flip (np.ndarray): flipped thetas with shape (num_thetas, 3) """ thetas_flip = thetas.copy() # reflect horizontally thetas_flip[:, 1] = -1 * thetas_flip[:, 1] thetas_flip[:, 2] = -1 * thetas_flip[:, 2] # change left-right parts for pair in theta_pairs: thetas_flip[pair[0], :], thetas_flip[pair[1], :] = \ thetas_flip[pair[1], :], thetas_flip[pair[0], :].copy() return thetas_flip def flip_joints_3d(joints_3d, joints_3d_visible, width, flip_pairs): """Flip 3d joints. Args: joints_3d (np.ndarray): joints in shape (N, 3, 2) width (int): Image width joint_pairs (list): flip pairs for joints Returns: joints_3d_flipped (np.ndarray): flipped joints with shape (N, 3, 2) joints_3d_visible_flipped (np.ndarray): visibility of (N, 3, 2) """ assert len(joints_3d) == len(joints_3d_visible) joints_3d[:, 0] = width - joints_3d[:, 0] - 1 joints_3d_flipped = joints_3d.copy() joints_3d_visible_flipped = joints_3d_visible.copy() # Swap left-right parts for left, right in flip_pairs: joints_3d_flipped[left, :] = joints_3d[right, :] joints_3d_flipped[right, :] = joints_3d[left, :] joints_3d_visible_flipped[left, :] = joints_3d_visible[right, :] joints_3d_visible_flipped[right, :] = joints_3d_visible[left, :] joints_3d_flipped = joints_3d_flipped * joints_3d_visible_flipped return joints_3d_flipped, joints_3d_visible_flipped def flip_xyz_joints_3d(joints_3d, flip_pairs): """Flip 3d xyz joints. Args: joints_3d (np.ndarray): Joints in shape (N, 3) joint_pairs (list): flip pairs for joints Returns: joints_3d_flipped (np.ndarray): flipped joints with shape (N, 3) """ joints_3d[:, 0] = -1 * joints_3d[:, 0] joints_3d_flipped = joints_3d.copy() # change left-right parts for left, right in flip_pairs: joints_3d_flipped[left, :] = joints_3d[right, :] joints_3d_flipped[right, :] = joints_3d[left, :] return joints_3d_flipped def flip_twist(twist_phi, twist_weight, twist_pairs): """Flip twist and weight. Args: twist_phi (np.ndarray): twist in shape (num_twist, 2) twist_weight (np.ndarray): weight in shape (num_twist, 2) twist_pairs (list): flip pairs for twist Returns: twist_flip (np.ndarray): flipped twist with shape (num_twist, 2) weight_flip (np.ndarray): flipped weights with shape (num_twist, 2) """ # twist_flip = -1 * twist_phi.copy() # 23 x 2 twist_flip = np.zeros_like(twist_phi) weight_flip = twist_weight.copy() twist_flip[:, 0] = twist_phi[:, 0].copy() # cos twist_flip[:, 1] = -1 * twist_phi[:, 1].copy() # sin for pair in twist_pairs: idx0 = pair[0] - 1 idx1 = pair[1] - 1 twist_flip[idx0, :], twist_flip[idx1, :] = \ twist_flip[idx1, :], twist_flip[idx0, :].copy() weight_flip[idx0, :], weight_flip[idx1, :] = \ weight_flip[idx1, :], weight_flip[idx0, :].copy() return twist_flip, weight_flip def _center_scale_to_box(center, scale): """Flip twist and weight. Args: joints_3d (np.ndarray): Joints in shape (N, 3) joint_pairs (list): flip pairs for joints Returns: joints_3d_flipped (np.ndarray): flipped joints with shape (N, 3) """ pixel_std = 1.0 w = scale[0] * pixel_std h = scale[1] * pixel_std xmin = center[0] - w * 0.5 ymin = center[1] - h * 0.5 xmax = xmin + w ymax = ymin + h bbox = [xmin, ymin, xmax, ymax] return bbox @PIPELINES.register_module() class RandomDPG(object): """Add dpg for data augmentation, including random crop and random sample Required keys: 'bbox', 'ann_info Modifies key: 'bbox', 'center', 'scale' Args: dpg_prob (float): Probability of dpg """ def __init__(self, dpg_prob): self.dpg_prob = dpg_prob def __call__(self, results): if np.random.rand() > self.dpg_prob: return results bbox = results['bbox'] imgwidth = results['ann_info']['width'] imgheight = results['ann_info']['height'] PatchScale = random.uniform(0, 1) width = bbox[2] - bbox[0] ht = bbox[3] - bbox[1] if PatchScale > 0.85: ratio = ht / width if (width < ht): patchWidth = PatchScale * width patchHt = patchWidth * ratio else: patchHt = PatchScale * ht patchWidth = patchHt / ratio xmin = bbox[0] + random.uniform(0, 1) * (width - patchWidth) ymin = bbox[1] + random.uniform(0, 1) * (ht - patchHt) xmax = xmin + patchWidth + 1 ymax = ymin + patchHt + 1 else: xmin = max( 1, min(bbox[0] + np.random.normal(-0.0142, 0.1158) * width, imgwidth - 3)) ymin = max( 1, min(bbox[1] + np.random.normal(0.0043, 0.068) * ht, imgheight - 3)) xmax = min( max(xmin + 2, bbox[2] + np.random.normal(0.0154, 0.1337) * width), imgwidth - 3) ymax = min( max(ymin + 2, bbox[3] + np.random.normal(-0.0013, 0.0711) * ht), imgheight - 3) results['bbox'] = [xmin, ymin, xmax, ymax] center, scale = box2cs( xmin, ymin, xmax - xmin, ymax - ymin, aspect_ratio=1.0, scale_mult=1.0) results['center'] = center results['scale'] = scale return results @PIPELINES.register_module() class HybrIKRandomFlip: """Data augmentation with random image flip. Required keys: 'img', 'keypoints3d', 'keypoints3d_vis', 'center', and 'ann_info', 'has_smpl' Additional keys required if has_smpl: 'keypoints3d17', 'keypoints3d17_vis', 'keypoints3d_relative', 'keypoints3d17_relative', 'pose' Modifies key: 'img', 'keypoints3d', 'keypoints3d_vis', 'center', 'pose' Additional keys modified if has_smpl: 'keypoints3d17', 'keypoints3d17_vis', 'keypoints3d_relative', 'keypoints3d17_relative', 'pose' Args: flip_prob (float): probability of the image being flipped. Default: 0.5 flip_pairs (list[int]): list of left-right keypoint pairs for flipping """ def __init__(self, flip_prob=0.5, flip_pairs=None): assert 0 <= flip_prob <= 1 self.flip_prob = flip_prob self.flip_pairs = flip_pairs def __call__(self, results): """Perform data augmentation with random image flip.""" if np.random.rand() > self.flip_prob: results['is_flipped'] = np.array([0]) return results results['is_flipped'] = np.array([1]) # flip image for key in results.get('img_fields', ['img']): results[key] = mmcv.imflip(results[key], direction='horizontal') width = results['img'][:, ::-1, :].shape[1] # flip bbox center center = results['center'] center[0] = width - 1 - center[0] results['center'] = center keypoints3d = results['keypoints3d'] keypoints3d_vis = results['keypoints3d_vis'] keypoints3d, keypoints3d_vis = flip_joints_3d(keypoints3d, keypoints3d_vis, width, self.flip_pairs) if results['has_smpl']: pose = results['pose'] smpl_flip_pairs = get_flip_pairs('smpl') pose = flip_thetas(pose, smpl_flip_pairs) keypoints3d17 = results['keypoints3d17'] keypoints3d17_vis = results['keypoints3d17_vis'] keypoints3d17_relative = results['keypoints3d17_relative'] keypoints3d_relative = results['keypoints3d_relative'] keypoints3d17, keypoints3d17_vis = flip_joints_3d( keypoints3d17, keypoints3d17_vis, width, self.flip_pairs) keypoints3d17_relative = flip_xyz_joints_3d( keypoints3d17_relative, self.flip_pairs) keypoints3d_relative = flip_xyz_joints_3d(keypoints3d_relative, self.flip_pairs) twist_phi, twist_weight = results['target_twist'], results[ 'target_twist_weight'] results['target_twist'], results[ 'target_twist_weight'] = flip_twist(twist_phi, twist_weight, smpl_flip_pairs) results['keypoints3d17_relative'] = keypoints3d17_relative.astype( np.float32) results['keypoints3d_relative'] = keypoints3d_relative.astype( np.float32) results['keypoints3d17'] = keypoints3d17.astype(np.float32) results['keypoints3d17_vis'] = keypoints3d17_vis.astype(np.float32) results['pose'] = pose.astype(np.float32) results['keypoints3d'] = keypoints3d.astype(np.float32) results['keypoints3d_vis'] = keypoints3d_vis.astype(np.float32) return results @PIPELINES.register_module() class HybrIKAffine: """Affine transform the image to get input image. Affine transform the 2D keypoints, 3D kepoints and IUV image too. Required keys: 'img', 'keypoints3d', 'keypoints3d_vis', 'pose', 'ann_info', 'scale', 'keypoints3d17', 'keypoints3d17_vis', 'rotation' and 'center'. Modifies key: 'img', 'keypoints3d','keypoints3d_vis', 'pose', 'keypoints3d17', 'keypoints3d17_vis' """ def __init__(self, img_res): self.image_size = np.array([img_res, img_res]) def __call__(self, results): img = results['img'] keypoints3d = results['keypoints3d'] num_joints = len(keypoints3d) keypoints3d_vis = results['keypoints3d_vis'] has_smpl = results['has_smpl'] c = results['center'] s = results['scale'] r = results['rotation'] trans = get_affine_transform(c, s, r, self.image_size, pixel_std=1) img = cv2.warpAffine( img, trans, (int(self.image_size[0]), int(self.image_size[1])), flags=cv2.INTER_LINEAR) for i in range(num_joints): if keypoints3d_vis[i, 0] > 0.0: keypoints3d[i, 0:2] = affine_transform(keypoints3d[i, 0:2], trans) if has_smpl: keypoints3d17 = results['keypoints3d17'] keypoints3d17_vis = results['keypoints3d17_vis'] for i in range(17): if keypoints3d17_vis[i, 0] > 0.0: keypoints3d17[i, 0:2] = affine_transform( keypoints3d17[i, 0:2], trans) results['keypoints3d17'] = keypoints3d17 results['keypoints3d17_vis'] = keypoints3d17_vis # to rotate poses pose = results['pose'] pose = _rotate_smpl_pose(pose.reshape(-1), r) results['pose'] = pose.reshape(24, 3) results['img'] = img.astype(np.float32) results['keypoints3d_vis'] = keypoints3d_vis.astype(np.float32) results['keypoints3d'] = keypoints3d.astype(np.float32) return results @PIPELINES.register_module() class RandomOcclusion: """Add random occlusion. Add random occlusion based on occlusion probability. Args: occlusion_prob (float): probability of the image having occlusion. Default: 0.5 """ def __init__(self, occlusion_prob=0.5): self.occlusion_prob = occlusion_prob def __call__(self, results): if np.random.rand() > self.occlusion_prob: return results xmin, ymin, xmax, ymax = results['bbox'] imgwidth = results['ann_info']['width'] imgheight = results['ann_info']['height'] img = results['img'] area_min = 0.0 area_max = 0.7 synth_area = (random.random() * (area_max - area_min) + area_min) * (xmax - xmin) * ( ymax - ymin) ratio_min = 0.3 ratio_max = 1 / 0.3 synth_ratio = (random.random() * (ratio_max - ratio_min) + ratio_min) synth_h = math.sqrt(synth_area * synth_ratio) synth_w = math.sqrt(synth_area / synth_ratio) synth_xmin = random.random() * ((xmax - xmin) - synth_w - 1) + xmin synth_ymin = random.random() * ((ymax - ymin) - synth_h - 1) + ymin if synth_xmin >= 0 and synth_ymin >= 0 and \ synth_xmin + synth_w < imgwidth and \ synth_ymin + synth_h < imgheight: synth_xmin = int(synth_xmin) synth_ymin = int(synth_ymin) synth_w = int(synth_w) synth_h = int(synth_h) img[synth_ymin:synth_ymin + synth_h, synth_xmin:synth_xmin + synth_w, :] = np.random.rand(synth_h, synth_w, 3) * 255 results['img'] = img return results @PIPELINES.register_module() class GenerateHybrIKTarget: """Generate the targets required for training. Required keys: 'keypoints3d', 'keypoints3d_vis', 'ann_info', 'depth_factor' Additional keys if has_smpl: 'keypoints3d17', 'keypoints3d17_vis', 'keypoints3d_relative', 'keypoints3d17_relative' Add keys: 'target_uvd_29', 'target_xyz_24', 'target_weight_24', 'target_weight_29', 'target_xyz_17', 'target_weight_17', 'target_theta', 'target_beta', 'target_smpl_weight', 'target_theta_weight', trans_inv', 'bbox' """ def __init__(self, img_res, test_mode): self.test_mode = test_mode self.image_size = np.array([img_res, img_res]) def _integral_uvd_target_generator(self, joints_3d, num_joints, patch_height, patch_width, depth_factor, test_mode=False): target_weight = np.ones((num_joints, 3), dtype=np.float32) target_weight[:, 0] = joints_3d[:, 0, 1] target_weight[:, 1] = joints_3d[:, 0, 1] target_weight[:, 2] = joints_3d[:, 0, 1] target = np.zeros((num_joints, 3), dtype=np.float32) target[:, 0] = joints_3d[:, 0, 0] / patch_width - 0.5 target[:, 1] = joints_3d[:, 1, 0] / patch_height - 0.5 target[:, 2] = joints_3d[:, 2, 0] / depth_factor target_weight[target[:, 0] > 0.5] = 0 target_weight[target[:, 0] < -0.5] = 0 target_weight[target[:, 1] > 0.5] = 0 target_weight[target[:, 1] < -0.5] = 0 target_weight[target[:, 2] > 0.5] = 0 target_weight[target[:, 2] < -0.5] = 0 target = target.reshape((-1)) target_weight = target_weight.reshape((-1)) return target, target_weight def _integral_target_generator(self, joints_3d, num_joints, patch_height, patch_width, depth_factor): target_weight = np.ones((num_joints, 3), dtype=np.float32) target_weight[:, 0] = joints_3d[:, 0, 1] target_weight[:, 1] = joints_3d[:, 0, 1] target_weight[:, 2] = joints_3d[:, 0, 1] target = np.zeros((num_joints, 3), dtype=np.float32) target[:, 0] = joints_3d[:, 0, 0] / patch_width - 0.5 target[:, 1] = joints_3d[:, 1, 0] / patch_height - 0.5 target[:, 2] = joints_3d[:, 2, 0] / depth_factor target_weight[target[:, 0] > 0.5] = 0 target_weight[target[:, 0] < -0.5] = 0 target_weight[target[:, 1] > 0.5] = 0 target_weight[target[:, 1] < -0.5] = 0 target_weight[target[:, 2] > 0.5] = 0 target_weight[target[:, 2] < -0.5] = 0 target = target.reshape((-1)) target_weight = target_weight.reshape((-1)) return target, target_weight def _integral_xyz_target_generator(self, joints_3d, joints_3d_vis, num_joints, depth_factor): target_weight = np.ones((num_joints, 3), dtype=np.float32) target_weight[:, 0] = joints_3d_vis[:, 0] target_weight[:, 1] = joints_3d_vis[:, 1] target_weight[:, 2] = joints_3d_vis[:, 2] target = np.zeros((num_joints, 3), dtype=np.float32) target[:, 0] = joints_3d[:, 0] / int(depth_factor) target[:, 1] = joints_3d[:, 1] / int(depth_factor) target[:, 2] = joints_3d[:, 2] / int(depth_factor) target = target.reshape((-1)) target_weight = target_weight.reshape((-1)) return target, target_weight def _integral_target_generator_coco(self, joints_3d, num_joints, patch_height, patch_width): target_weight = np.ones((num_joints, 2), dtype=np.float32) target_weight[:, 0] = joints_3d[:, 0, 1] target_weight[:, 1] = joints_3d[:, 0, 1] target = np.zeros((num_joints, 2), dtype=np.float32) target[:, 0] = joints_3d[:, 0, 0] / patch_width - 0.5 target[:, 1] = joints_3d[:, 1, 0] / patch_height - 0.5 target = target.reshape((-1)) target_weight = target_weight.reshape((-1)) return target, target_weight def __call__(self, results): has_smpl = results['has_smpl'] inp_h, inp_w = self.image_size[0], self.image_size[1] keypoints3d = results['keypoints3d'] num_joints = len(keypoints3d) keypoints3d_vis = results['keypoints3d_vis'] depth_factor = results['depth_factor'] c = results['center'] s = results['scale'] r = results['rotation'] # generate new keys trans_inv = get_affine_transform( c, s, r, self.image_size, inv=True, pixel_std=1).astype(np.float32) results['trans_inv'] = trans_inv.astype(np.float32) bbox = _center_scale_to_box(c, s) results['bbox'] = np.array(bbox, dtype=np.float32) if has_smpl: theta = results['pose'] # aa to quat results['target_theta'] = aa_to_quat_numpy(theta).reshape( 24 * 4).astype(np.float32) theta_24_weights = np.ones((24, 4)) results['target_theta_weight'] = theta_24_weights.reshape( 24 * 4).astype(np.float32) results['target_beta'] = results['beta'].astype(np.float32) results['target_smpl_weight'] = np.ones(1).astype(np.float32) keypoints3d17_vis = results['keypoints3d17_vis'] keypoints3d17_relative = results['keypoints3d17_relative'] joints24_relative_3d = results['keypoints3d_relative'][:24, :] gt_joints_29 = np.zeros((29, 3, 2), dtype=np.float32) gt_joints_29[:, :, 0] = keypoints3d.copy() gt_joints_29[:, :, 1] = keypoints3d_vis.copy() target_uvd_29, target_weight_29 = \ self._integral_uvd_target_generator( gt_joints_29, 29, inp_h, inp_w, depth_factor) target_xyz_17, target_weight_17 = \ self._integral_xyz_target_generator( keypoints3d17_relative, keypoints3d17_vis, 17, depth_factor) target_xyz_24, target_weight_24 = \ self._integral_xyz_target_generator( joints24_relative_3d, keypoints3d_vis[:24, :], 24, depth_factor) target_weight_29 *= keypoints3d_vis.reshape(-1) target_weight_24 *= keypoints3d_vis[:24, :].reshape(-1) target_weight_17 *= keypoints3d17_vis.reshape(-1) results['target_uvd_29'] = target_uvd_29.astype(np.float32) results['target_xyz_24'] = target_xyz_24.astype(np.float32) results['target_weight_29'] = target_weight_29.astype(np.float32) results['target_weight_24'] = target_weight_24.astype(np.float32) results['target_xyz_17'] = target_xyz_17.astype(np.float32) results['target_weight_17'] = target_weight_17.astype(np.float32) else: label_uvd_29 = np.zeros((29, 3)) label_xyz_24 = np.zeros((24, 3)) label_uvd_29_mask = np.zeros((29, 3)) label_xyz_17 = np.zeros((17, 3)) label_xyz_17_mask = np.zeros((17, 3)) gt_joints = np.zeros((num_joints, 3, 2), dtype=np.float32) gt_joints[:, :, 0] = keypoints3d.copy() gt_joints[:, :, 1] = keypoints3d_vis.copy() mask_idx = [1, 2, 6, 9, 10, 11] if results['ann_info']['dataset_name'] == 'coco': target, target_weight = self._integral_target_generator_coco( gt_joints, num_joints, inp_h, inp_w) label_jts_origin = target * target_weight label_jts_mask_origin = target_weight label_jts_origin = label_jts_origin.reshape(num_joints, 2) label_jts_mask_origin = label_jts_mask_origin.reshape( num_joints, 2) label_jts_origin[mask_idx] = label_jts_origin[mask_idx] * 0 label_jts_mask_origin[ mask_idx] = label_jts_origin[mask_idx] * 0 label_uvd_29 = np.hstack([label_jts_origin, np.zeros([29, 1])]) label_uvd_29_mask = np.hstack( [label_jts_mask_origin, np.zeros([29, 1])]) elif results['ann_info']['dataset_name'] == 'mpi_inf_3dhp': if not self.test_mode: target, target_weight = self._integral_target_generator( gt_joints, num_joints, inp_h, inp_w, depth_factor) target_weight *= keypoints3d_vis.reshape(-1) label_jts_origin = target * target_weight label_jts_mask_origin = target_weight label_jts_origin = label_jts_origin.reshape(num_joints, 3) label_jts_mask_origin = label_jts_mask_origin.reshape( num_joints, 3) label_jts_origin[mask_idx] = label_jts_origin[mask_idx] * 0 label_jts_mask_origin[ mask_idx] = label_jts_origin[mask_idx] * 0 label_uvd_29 = label_jts_origin label_uvd_29_mask = label_jts_mask_origin label_uvd_29 = label_uvd_29.reshape(-1) label_xyz_24 = label_xyz_24.reshape(-1) label_uvd_24_mask = label_uvd_29_mask[:24, :].reshape(-1) label_uvd_29_mask = label_uvd_29_mask.reshape(-1) label_xyz_17 = label_xyz_17.reshape(-1) label_xyz_17_mask = label_xyz_17_mask.reshape(-1) results['target_uvd_29'] = label_uvd_29.astype(np.float32) results['target_xyz_24'] = label_xyz_24.astype(np.float32) results['target_weight_24'] = label_uvd_24_mask.astype(np.float32) results['target_weight_29'] = label_uvd_29_mask.astype(np.float32) results['target_xyz_17'] = label_xyz_17.astype(np.float32) results['target_weight_17'] = label_xyz_17_mask.astype(np.float32) results['target_theta'] = np.zeros(24 * 4).astype(np.float32) results['target_beta'] = np.zeros(10).astype(np.float32) results['target_smpl_weight'] = np.zeros(1).astype(np.float32) results['target_theta_weight'] = np.zeros(24 * 4).astype( np.float32) return results @PIPELINES.register_module() class NewKeypointsSelection: """Select keypoints. Modifies specified keys Args: map (dict): keypoints and index for selection """ def __init__(self, maps): self.maps = maps def __call__(self, results): """Perform keypoints selection.""" for map in self.maps: for keypoint in map['keypoints']: keypoints_index = map['keypoints_index'] if keypoint in results: results[keypoint] = results[keypoint][..., keypoints_index, :] return results
[ "numpy.abs", "numpy.maximum", "numpy.ones", "numpy.sin", "numpy.linalg.norm", "numpy.random.normal", "numpy.zeros_like", "numpy.empty_like", "mmhuman3d.core.conventions.keypoints_mapping.get_flip_pairs", "math.sqrt", "mmcv.imflip", "numpy.hstack", "random.random", "numpy.linalg.inv", "numpy.cos", "numpy.concatenate", "mmhuman3d.utils.demo_utils.box2cs", "random.uniform", "numpy.zeros", "numpy.array", "numpy.random.rand" ]
[((842, 876), 'numpy.array', 'np.array', (['[xmin, ymin, xmax, ymax]'], {}), '([xmin, ymin, xmax, ymax])\n', (850, 876), True, 'import numpy as np\n'), ((1679, 1700), 'numpy.zeros_like', 'np.zeros_like', (['coords'], {}), '(coords)\n', (1692, 1700), True, 'import numpy as np\n'), ((1818, 1860), 'numpy.array', 'np.array', (['[xmin + w * 0.5, ymin + h * 0.5]'], {}), '([xmin + w * 0.5, ymin + h * 0.5])\n', (1826, 1860), True, 'import numpy as np\n'), ((1873, 1889), 'numpy.array', 'np.array', (['[w, h]'], {}), '([w, h])\n', (1881, 1889), True, 'import numpy as np\n'), ((2692, 2714), 'numpy.zeros', 'np.zeros', (['coords.shape'], {}), '(coords.shape)\n', (2700, 2714), True, 'import numpy as np\n'), ((6360, 6415), 'numpy.concatenate', 'np.concatenate', (['(x[:, None], y[:, None], z[:, None])', '(1)'], {}), '((x[:, None], y[:, None], z[:, None]), 1)\n', (6374, 6415), True, 'import numpy as np\n'), ((7597, 7654), 'numpy.linalg.norm', 'np.linalg.norm', (['axis_angle'], {'ord': '(2)', 'axis': '(-1)', 'keepdims': '(True)'}), '(axis_angle, ord=2, axis=-1, keepdims=True)\n', (7611, 7654), True, 'import numpy as np\n'), ((7775, 7796), 'numpy.empty_like', 'np.empty_like', (['angles'], {}), '(angles)\n', (7788, 7796), True, 'import numpy as np\n'), ((11084, 11108), 'numpy.zeros_like', 'np.zeros_like', (['twist_phi'], {}), '(twist_phi)\n', (11097, 11108), True, 'import numpy as np\n'), ((7720, 7734), 'numpy.abs', 'np.abs', (['angles'], {}), '(angles)\n', (7726, 7734), True, 'import numpy as np\n'), ((7856, 7890), 'numpy.sin', 'np.sin', (['half_angles[~small_angles]'], {}), '(half_angles[~small_angles])\n', (7862, 7890), True, 'import numpy as np\n'), ((12729, 12749), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (12743, 12749), False, 'import random\n'), ((14025, 14103), 'mmhuman3d.utils.demo_utils.box2cs', 'box2cs', (['xmin', 'ymin', '(xmax - xmin)', '(ymax - ymin)'], {'aspect_ratio': '(1.0)', 'scale_mult': '(1.0)'}), '(xmin, ymin, xmax - xmin, ymax - ymin, aspect_ratio=1.0, scale_mult=1.0)\n', (14031, 14103), False, 'from mmhuman3d.utils.demo_utils import box2cs\n'), ((15430, 15443), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (15438, 15443), True, 'import numpy as np\n'), ((18320, 18348), 'numpy.array', 'np.array', (['[img_res, img_res]'], {}), '([img_res, img_res])\n', (18328, 18348), True, 'import numpy as np\n'), ((20949, 20984), 'math.sqrt', 'math.sqrt', (['(synth_area * synth_ratio)'], {}), '(synth_area * synth_ratio)\n', (20958, 20984), False, 'import math\n'), ((21003, 21038), 'math.sqrt', 'math.sqrt', (['(synth_area / synth_ratio)'], {}), '(synth_area / synth_ratio)\n', (21012, 21038), False, 'import math\n'), ((22353, 22381), 'numpy.array', 'np.array', (['[img_res, img_res]'], {}), '([img_res, img_res])\n', (22361, 22381), True, 'import numpy as np\n'), ((22769, 22811), 'numpy.ones', 'np.ones', (['(num_joints, 3)'], {'dtype': 'np.float32'}), '((num_joints, 3), dtype=np.float32)\n', (22776, 22811), True, 'import numpy as np\n'), ((22977, 23020), 'numpy.zeros', 'np.zeros', (['(num_joints, 3)'], {'dtype': 'np.float32'}), '((num_joints, 3), dtype=np.float32)\n', (22985, 23020), True, 'import numpy as np\n'), ((23777, 23819), 'numpy.ones', 'np.ones', (['(num_joints, 3)'], {'dtype': 'np.float32'}), '((num_joints, 3), dtype=np.float32)\n', (23784, 23819), True, 'import numpy as np\n'), ((23985, 24028), 'numpy.zeros', 'np.zeros', (['(num_joints, 3)'], {'dtype': 'np.float32'}), '((num_joints, 3), dtype=np.float32)\n', (23993, 24028), True, 'import numpy as np\n'), ((24781, 24823), 'numpy.ones', 'np.ones', (['(num_joints, 3)'], {'dtype': 'np.float32'}), '((num_joints, 3), dtype=np.float32)\n', (24788, 24823), True, 'import numpy as np\n'), ((24992, 25035), 'numpy.zeros', 'np.zeros', (['(num_joints, 3)'], {'dtype': 'np.float32'}), '((num_joints, 3), dtype=np.float32)\n', (25000, 25035), True, 'import numpy as np\n'), ((25503, 25545), 'numpy.ones', 'np.ones', (['(num_joints, 2)'], {'dtype': 'np.float32'}), '((num_joints, 2), dtype=np.float32)\n', (25510, 25545), True, 'import numpy as np\n'), ((25662, 25705), 'numpy.zeros', 'np.zeros', (['(num_joints, 2)'], {'dtype': 'np.float32'}), '((num_joints, 2), dtype=np.float32)\n', (25670, 25705), True, 'import numpy as np\n'), ((26651, 26683), 'numpy.array', 'np.array', (['bbox'], {'dtype': 'np.float32'}), '(bbox, dtype=np.float32)\n', (26659, 26683), True, 'import numpy as np\n'), ((3595, 3621), 'numpy.maximum', 'np.maximum', (['(xywh[2] - 1)', '(0)'], {}), '(xywh[2] - 1, 0)\n', (3605, 3621), True, 'import numpy as np\n'), ((3623, 3649), 'numpy.maximum', 'np.maximum', (['(xywh[3] - 1)', '(0)'], {}), '(xywh[3] - 1, 0)\n', (3633, 3649), True, 'import numpy as np\n'), ((4966, 4988), 'numpy.maximum', 'np.maximum', (['(0)', 'xyxy[0]'], {}), '(0, xyxy[0])\n', (4976, 4988), True, 'import numpy as np\n'), ((5026, 5048), 'numpy.maximum', 'np.maximum', (['(0)', 'xyxy[1]'], {}), '(0, xyxy[1])\n', (5036, 5048), True, 'import numpy as np\n'), ((5085, 5107), 'numpy.maximum', 'np.maximum', (['(0)', 'xyxy[2]'], {}), '(0, xyxy[2])\n', (5095, 5107), True, 'import numpy as np\n'), ((5145, 5167), 'numpy.maximum', 'np.maximum', (['(0)', 'xyxy[3]'], {}), '(0, xyxy[3])\n', (5155, 5167), True, 'import numpy as np\n'), ((5679, 5706), 'numpy.hstack', 'np.hstack', (['(x1, y1, x2, y2)'], {}), '((x1, y1, x2, y2))\n', (5688, 5706), True, 'import numpy as np\n'), ((6822, 6838), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (6830, 6838), True, 'import numpy as np\n'), ((8172, 8191), 'numpy.cos', 'np.cos', (['half_angles'], {}), '(half_angles)\n', (8178, 8191), True, 'import numpy as np\n'), ((12516, 12532), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (12530, 12532), True, 'import numpy as np\n'), ((15285, 15301), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (15299, 15301), True, 'import numpy as np\n'), ((15356, 15369), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (15364, 15369), True, 'import numpy as np\n'), ((15548, 15597), 'mmcv.imflip', 'mmcv.imflip', (['results[key]'], {'direction': '"""horizontal"""'}), "(results[key], direction='horizontal')\n", (15559, 15597), False, 'import mmcv\n'), ((16204, 16226), 'mmhuman3d.core.conventions.keypoints_mapping.get_flip_pairs', 'get_flip_pairs', (['"""smpl"""'], {}), "('smpl')\n", (16218, 16226), False, 'from mmhuman3d.core.conventions.keypoints_mapping import get_flip_pairs\n'), ((20353, 20369), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (20367, 20369), True, 'import numpy as np\n'), ((26912, 26928), 'numpy.ones', 'np.ones', (['(24, 4)'], {}), '((24, 4))\n', (26919, 26928), True, 'import numpy as np\n'), ((27426, 27464), 'numpy.zeros', 'np.zeros', (['(29, 3, 2)'], {'dtype': 'np.float32'}), '((29, 3, 2), dtype=np.float32)\n', (27434, 27464), True, 'import numpy as np\n'), ((28837, 28854), 'numpy.zeros', 'np.zeros', (['(29, 3)'], {}), '((29, 3))\n', (28845, 28854), True, 'import numpy as np\n'), ((28882, 28899), 'numpy.zeros', 'np.zeros', (['(24, 3)'], {}), '((24, 3))\n', (28890, 28899), True, 'import numpy as np\n'), ((28932, 28949), 'numpy.zeros', 'np.zeros', (['(29, 3)'], {}), '((29, 3))\n', (28940, 28949), True, 'import numpy as np\n'), ((28977, 28994), 'numpy.zeros', 'np.zeros', (['(17, 3)'], {}), '((17, 3))\n', (28985, 28994), True, 'import numpy as np\n'), ((29027, 29044), 'numpy.zeros', 'np.zeros', (['(17, 3)'], {}), '((17, 3))\n', (29035, 29044), True, 'import numpy as np\n'), ((29070, 29116), 'numpy.zeros', 'np.zeros', (['(num_joints, 3, 2)'], {'dtype': 'np.float32'}), '((num_joints, 3, 2), dtype=np.float32)\n', (29078, 29116), True, 'import numpy as np\n'), ((5449, 5474), 'numpy.maximum', 'np.maximum', (['(0)', 'xyxy[:, 0]'], {}), '(0, xyxy[:, 0])\n', (5459, 5474), True, 'import numpy as np\n'), ((5512, 5537), 'numpy.maximum', 'np.maximum', (['(0)', 'xyxy[:, 1]'], {}), '(0, xyxy[:, 1])\n', (5522, 5537), True, 'import numpy as np\n'), ((5574, 5599), 'numpy.maximum', 'np.maximum', (['(0)', 'xyxy[:, 2]'], {}), '(0, xyxy[:, 2])\n', (5584, 5599), True, 'import numpy as np\n'), ((5637, 5662), 'numpy.maximum', 'np.maximum', (['(0)', 'xyxy[:, 3]'], {}), '(0, xyxy[:, 3])\n', (5647, 5662), True, 'import numpy as np\n'), ((7065, 7096), 'numpy.linalg.inv', 'np.linalg.inv', (['intrinsic_metrix'], {}), '(intrinsic_metrix)\n', (7078, 7096), True, 'import numpy as np\n'), ((20875, 20890), 'random.random', 'random.random', ([], {}), '()\n', (20888, 20890), False, 'import random\n'), ((21060, 21075), 'random.random', 'random.random', ([], {}), '()\n', (21073, 21075), False, 'import random\n'), ((21136, 21151), 'random.random', 'random.random', ([], {}), '()\n', (21149, 21151), False, 'import random\n'), ((21600, 21635), 'numpy.random.rand', 'np.random.rand', (['synth_h', 'synth_w', '(3)'], {}), '(synth_h, synth_w, 3)\n', (21614, 21635), True, 'import numpy as np\n'), ((13134, 13154), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (13148, 13154), False, 'import random\n'), ((13207, 13227), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (13221, 13227), False, 'import random\n'), ((27160, 27170), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (27167, 27170), True, 'import numpy as np\n'), ((31940, 31956), 'numpy.zeros', 'np.zeros', (['(24 * 4)'], {}), '(24 * 4)\n', (31948, 31956), True, 'import numpy as np\n'), ((32013, 32025), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (32021, 32025), True, 'import numpy as np\n'), ((32089, 32100), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (32097, 32100), True, 'import numpy as np\n'), ((32165, 32181), 'numpy.zeros', 'np.zeros', (['(24 * 4)'], {}), '(24 * 4)\n', (32173, 32181), True, 'import numpy as np\n'), ((3989, 4020), 'numpy.maximum', 'np.maximum', (['(0)', '(xywh[:, 2:4] - 1)'], {}), '(0, xywh[:, 2:4] - 1)\n', (3999, 4020), True, 'import numpy as np\n'), ((20666, 20681), 'random.random', 'random.random', ([], {}), '()\n', (20679, 20681), False, 'import random\n'), ((30000, 30017), 'numpy.zeros', 'np.zeros', (['[29, 1]'], {}), '([29, 1])\n', (30008, 30017), True, 'import numpy as np\n'), ((30132, 30149), 'numpy.zeros', 'np.zeros', (['[29, 1]'], {}), '([29, 1])\n', (30140, 30149), True, 'import numpy as np\n'), ((13411, 13444), 'numpy.random.normal', 'np.random.normal', (['(-0.0142)', '(0.1158)'], {}), '(-0.0142, 0.1158)\n', (13427, 13444), True, 'import numpy as np\n'), ((13562, 13593), 'numpy.random.normal', 'np.random.normal', (['(0.0043)', '(0.068)'], {}), '(0.0043, 0.068)\n', (13578, 13593), True, 'import numpy as np\n'), ((13720, 13752), 'numpy.random.normal', 'np.random.normal', (['(0.0154)', '(0.1337)'], {}), '(0.0154, 0.1337)\n', (13736, 13752), True, 'import numpy as np\n'), ((13877, 13910), 'numpy.random.normal', 'np.random.normal', (['(-0.0013)', '(0.0711)'], {}), '(-0.0013, 0.0711)\n', (13893, 13910), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- import sys import os sys.path.insert(1, os.path.abspath(os.path.curdir)) import numpy as np from models import Connect4ActionMaskModel from config.connect4_config import Connect3Config from utils.learning_behaviour_utils import LSTM_model,split_train_val,\ minimax_vs_minimax_connect3_single_game,return_one_hot,\ minimax_vs_minimax_connect3_single_game_plus_outcome,count_elem_in_dataset from config.learning_behaviour_config import Config from config.custom_config import Config as C from env.connect4_multiagent_env import Connect4Env from tensorflow import keras from numpy import random from ray.rllib.agents.ppo import PPOTrainer from config.trainer_config import TrainerConfig if __name__ == "__main__": _ = Connect4ActionMaskModel data_dir = C.DATA_DIR lstm_hidden = Config.LSTM_HIDDEN[-2] # best weights batch_size = Config.BATCH_SIZE lstm_timesteps = Config.LSTM_TIMESTEPS outcome_as_feature = Config.OUTCOME_AS_FEATURE if outcome_as_feature: features_len = Config.FEATURES_LEN_2 else: features_len = Config.FEATURES_LEN output_len = Config.OUTPUT_LEN best_weights_npy = os.path.join(data_dir,"lstm_best_weights.npy") lstm_weights = np.load(best_weights_npy,allow_pickle=True) number_of_evaluation_games = Config.NUMBER_OF_EVALUATION_GAMES # 100 number_of_games_to_test = Config.NUMBER_OF_GAMES_TO_TEST #[1,2,3,4,5] depth_list = Config.DEPTH_LIST # [1,4,6] number_of_stochastic_moves = 6 sequence_len = lstm_timesteps npy_weights_file = os.path.join(data_dir,"weights.npy") weights = np.load(npy_weights_file,allow_pickle=True)[()] play = True trainer_obj = PPOTrainer( config=TrainerConfig.PPO_TRAINER_CONNECT3, ) model = trainer_obj.get_policy("player1").model # ============================================================================= # TEST THE MODEL # ============================================================================= import tensorflow as tf lstm_model = LSTM_model(batch_size,(lstm_timesteps,features_len),output_len,lstm_hidden,False) # generate a fake input to define the model stucture and then load the weights # [batch,timestep,features] # random_input = np.random.rand(1,lstm_timesteps,features_len) random_input = np.random.rand(1,lstm_timesteps,features_len) random_input = random_input.astype('float32') lstm_model(random_input) lstm_model.set_weights(lstm_weights[()]) # ============================================================================= # SETTINGS # ============================================================================= randomize = True player1_ID = Connect3Config.PLAYER1_ID player2_ID = Connect3Config.PLAYER2_ID player1 = Connect3Config.PLAYER1 player2 = Connect3Config.PLAYER2 game = Connect4Env(None,width=Connect3Config.WIDTH, height=Connect3Config.HEIGHT, n_actions=Connect3Config.N_ACTIONS, connect=Connect3Config.CONNECT, ) possible_answ = ["y","n"] def indx_to_lvl(indx): if indx < 4: return 1 elif indx < 8: return 4 elif indx <12: return 6 def lvl_to_indx(lvl): if lvl == 0: return random.choice(range(4)) elif lvl == 1: return random.choice(range(4,8)) elif lvl == 2: return random.choice(range(8,12)) number_of_games = 0 while play: if number_of_games == 0: w2_indx = np.random.choice(range(len(weights))) w2_key = list(weights.keys())[w2_indx] w2 = weights[w2_key] lvl = indx_to_lvl(w2_indx) # lvl = weights.keys().index(w2) else: w2_indx = lvl_to_indx(int(predicted_indx)) w2_key = list(weights.keys())[w2_indx] w2 = weights[w2_key] lvl = indx_to_lvl(w2_indx) print("You are facing the opponent of level " + str(lvl)) full_game = [] timestep = 0 game_over = False actions = {} if randomize: starting_player = random.choice([player1_ID, player2_ID]) else: starting_player = player1_ID game.reset(starting_player=starting_player,randomize=False) board_plus_action_total = [] while not game_over: if timestep == 0: print(game) timestep += 1 actual_player = game.current_player board = game.board board_p2 = game.board_p2 #game.render() if actual_player == player1_ID: action_mask = game.get_moves(False) choosing_action = True act = None action_mask_1 = [x+1 for x in action_mask] while choosing_action: act = input("select an action:") act = int(act) if act in action_mask_1: choosing_action = False act = act-1 flattened_board = np.ndarray.flatten(board) board_plus_actions = np.append(flattened_board,float(act)) board_plus_action_total.append([board_plus_actions]) actions[player1] = act _, rew, done, _ = game.step(actions) print(game) # game.render() elif actual_player == player2_ID: input_dict = {"obs": {}} action_mask = game.get_moves(True) input_dict["obs"]["state"] = board_p2 #reshaped_board input_dict["obs"]["action_mask"] = action_mask action_logits, _ = model.forward(input_dict, None, None) if timestep > number_of_stochastic_moves: act = np.argmax(action_logits[0]) elif timestep <= number_of_stochastic_moves: action_prob = [np.exp(single_log)/sum(np.exp(action_logits[0])) for single_log in action_logits[0]] act = np.random.choice([0,1,2,3,4],1,p=action_prob)[0] actions[player2] = act _, rew, done, _ = game.step(actions) print(game) #game.render() else: raise ValueError("Player index is not valid, should be 0 or 1") if done["__all__"]: # ADD ENCODED GAME TO THE LISt if rew[player1] == 1: print("Player 1 won!!!") elif rew[player1] == -1: print("Player 2 won!!!") elif rew[player1] == 0: print("Draw") if len(board_plus_action_total) < sequence_len: print("Game finished too early, restarting...") timestep = 0 game.reset(randomize=True) game_over = False continue game_over = True board_plus_action_and_outcome = board_plus_action_total for j in range(len(board_plus_action_and_outcome)-(sequence_len-1)): full_game.append([]) full_game[-1].append(board_plus_action_and_outcome[j:j+sequence_len]) full_game = np.asarray(full_game) full_game = np.squeeze(full_game) if len(full_game.shape) == 2: full_game = np.expand_dims(full_game, axis=0) full_game = full_game.astype("float32") y = lstm_model(full_game,training=False) predicted_values = tf.math.reduce_mean(y,axis=0) predicted_indx = tf.math.argmax(predicted_values) answ = None waiting_answ = True while waiting_answ: answ = input("Do you want to play another game?\n") if answ in possible_answ: waiting_answ = False else: print("command unknown please only print y or n") if answ == "y": play = True number_of_games += 1 elif answ == "n": play = False print("Thanks you for playing")
[ "os.path.abspath", "numpy.load", "tensorflow.math.reduce_mean", "tensorflow.math.argmax", "env.connect4_multiagent_env.Connect4Env", "numpy.argmax", "utils.learning_behaviour_utils.LSTM_model", "numpy.asarray", "ray.rllib.agents.ppo.PPOTrainer", "numpy.expand_dims", "numpy.exp", "numpy.random.choice", "numpy.random.rand", "numpy.squeeze", "os.path.join", "numpy.ndarray.flatten" ]
[((64, 95), 'os.path.abspath', 'os.path.abspath', (['os.path.curdir'], {}), '(os.path.curdir)\n', (79, 95), False, 'import os\n'), ((1175, 1222), 'os.path.join', 'os.path.join', (['data_dir', '"""lstm_best_weights.npy"""'], {}), "(data_dir, 'lstm_best_weights.npy')\n", (1187, 1222), False, 'import os\n'), ((1241, 1285), 'numpy.load', 'np.load', (['best_weights_npy'], {'allow_pickle': '(True)'}), '(best_weights_npy, allow_pickle=True)\n', (1248, 1285), True, 'import numpy as np\n'), ((1576, 1613), 'os.path.join', 'os.path.join', (['data_dir', '"""weights.npy"""'], {}), "(data_dir, 'weights.npy')\n", (1588, 1613), False, 'import os\n'), ((1721, 1774), 'ray.rllib.agents.ppo.PPOTrainer', 'PPOTrainer', ([], {'config': 'TrainerConfig.PPO_TRAINER_CONNECT3'}), '(config=TrainerConfig.PPO_TRAINER_CONNECT3)\n', (1731, 1774), False, 'from ray.rllib.agents.ppo import PPOTrainer\n'), ((2079, 2169), 'utils.learning_behaviour_utils.LSTM_model', 'LSTM_model', (['batch_size', '(lstm_timesteps, features_len)', 'output_len', 'lstm_hidden', '(False)'], {}), '(batch_size, (lstm_timesteps, features_len), output_len,\n lstm_hidden, False)\n', (2089, 2169), False, 'from utils.learning_behaviour_utils import LSTM_model, split_train_val, minimax_vs_minimax_connect3_single_game, return_one_hot, minimax_vs_minimax_connect3_single_game_plus_outcome, count_elem_in_dataset\n'), ((2370, 2417), 'numpy.random.rand', 'np.random.rand', (['(1)', 'lstm_timesteps', 'features_len'], {}), '(1, lstm_timesteps, features_len)\n', (2384, 2417), True, 'import numpy as np\n'), ((2927, 3074), 'env.connect4_multiagent_env.Connect4Env', 'Connect4Env', (['None'], {'width': 'Connect3Config.WIDTH', 'height': 'Connect3Config.HEIGHT', 'n_actions': 'Connect3Config.N_ACTIONS', 'connect': 'Connect3Config.CONNECT'}), '(None, width=Connect3Config.WIDTH, height=Connect3Config.HEIGHT,\n n_actions=Connect3Config.N_ACTIONS, connect=Connect3Config.CONNECT)\n', (2938, 3074), False, 'from env.connect4_multiagent_env import Connect4Env\n'), ((1627, 1671), 'numpy.load', 'np.load', (['npy_weights_file'], {'allow_pickle': '(True)'}), '(npy_weights_file, allow_pickle=True)\n', (1634, 1671), True, 'import numpy as np\n'), ((7712, 7733), 'numpy.asarray', 'np.asarray', (['full_game'], {}), '(full_game)\n', (7722, 7733), True, 'import numpy as np\n'), ((7754, 7775), 'numpy.squeeze', 'np.squeeze', (['full_game'], {}), '(full_game)\n', (7764, 7775), True, 'import numpy as np\n'), ((8021, 8051), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (8040, 8051), True, 'import tensorflow as tf\n'), ((8076, 8108), 'tensorflow.math.argmax', 'tf.math.argmax', (['predicted_values'], {}), '(predicted_values)\n', (8090, 8108), True, 'import tensorflow as tf\n'), ((4268, 4307), 'numpy.random.choice', 'random.choice', (['[player1_ID, player2_ID]'], {}), '([player1_ID, player2_ID])\n', (4281, 4307), False, 'from numpy import random\n'), ((7838, 7871), 'numpy.expand_dims', 'np.expand_dims', (['full_game'], {'axis': '(0)'}), '(full_game, axis=0)\n', (7852, 7871), True, 'import numpy as np\n'), ((5274, 5299), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['board'], {}), '(board)\n', (5292, 5299), True, 'import numpy as np\n'), ((6121, 6148), 'numpy.argmax', 'np.argmax', (['action_logits[0]'], {}), '(action_logits[0])\n', (6130, 6148), True, 'import numpy as np\n'), ((6356, 6407), 'numpy.random.choice', 'np.random.choice', (['[0, 1, 2, 3, 4]', '(1)'], {'p': 'action_prob'}), '([0, 1, 2, 3, 4], 1, p=action_prob)\n', (6372, 6407), True, 'import numpy as np\n'), ((6245, 6263), 'numpy.exp', 'np.exp', (['single_log'], {}), '(single_log)\n', (6251, 6263), True, 'import numpy as np\n'), ((6268, 6292), 'numpy.exp', 'np.exp', (['action_logits[0]'], {}), '(action_logits[0])\n', (6274, 6292), True, 'import numpy as np\n')]
import pandas as pd import numpy as np from scipy.stats import norm import matplotlib.pyplot as plt from numpy import genfromtxt import os def gaus(feature): data = genfromtxt('./dataset/modified_weather/'+ feature +'.csv', delimiter=',') data = data.ravel() # clear nan values data = data[~np.isnan(data)] data = data.tolist() data = sorted(data) df = pd.DataFrame(data) a = df.describe() max_bound = (int(a.loc["max"])) upper_mid = (int(a.loc["75%"])) middle = (int(a.loc["50%"])) lower_mid = (int(a.loc["25%"])) min_bound = (int(a.loc["min"])) df = pd.read_csv('./dataset/modified_weather/' + feature +'.csv') # Change csv file inputs with labels, excluding datetime (first column) for i in range(1, len(df.columns)): city = df.columns[i] # print(city) boo1 = (df[city] >= min_bound) & (df[city] < lower_mid) boo2 = (df[city] >= lower_mid) & (df[city] < middle) boo3 = (df[city] >= middle) & (df[city] < upper_mid) boo4 = (df[city] >= upper_mid) & (df[city] <= max_bound) df.loc[boo1, [df.columns[i]]] = 1 df.loc[boo2, [df.columns[i]]] = 2 df.loc[boo3, [df.columns[i]]] = 3 df.loc[boo4, [df.columns[i]]] = 4 if not os.path.exists("./dataset/modified_weather/labelled_weather"): os.makedirs("./dataset/modified_weather/labelled_weather") df.to_csv('./dataset/modified_weather/labelled_weather/' + feature + '.csv', index=False, encoding="utf-8") return def restore_desc(): df = pd.read_csv("./dataset/modified_weather/weather_description.csv", delimiter=',') columns = df.columns[1:] s = set() for index, row in df.iterrows(): for i in columns: s.add(row[i]) dic = {} count = 1 for i in s: dic[i] = count count += 1 for col in columns: df[col] = df[col].map(dic) if not os.path.exists("./dataset/modified_weather/labelled_weather"): os.makedirs("./dataset/modified_weather/labelled_weather") df.to_csv('./dataset/modified_weather/labelled_weather/weather_description.csv', index=False, encoding="utf-8") def generate_labelled(): gaus("humidity") gaus("pressure") gaus("temperature") gaus("wind_speed") restore_desc()
[ "pandas.DataFrame", "os.makedirs", "pandas.read_csv", "os.path.exists", "numpy.genfromtxt", "numpy.isnan" ]
[((182, 257), 'numpy.genfromtxt', 'genfromtxt', (["('./dataset/modified_weather/' + feature + '.csv')"], {'delimiter': '""","""'}), "('./dataset/modified_weather/' + feature + '.csv', delimiter=',')\n", (192, 257), False, 'from numpy import genfromtxt\n'), ((400, 418), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (412, 418), True, 'import pandas as pd\n'), ((640, 701), 'pandas.read_csv', 'pd.read_csv', (["('./dataset/modified_weather/' + feature + '.csv')"], {}), "('./dataset/modified_weather/' + feature + '.csv')\n", (651, 701), True, 'import pandas as pd\n'), ((1625, 1710), 'pandas.read_csv', 'pd.read_csv', (['"""./dataset/modified_weather/weather_description.csv"""'], {'delimiter': '""","""'}), "('./dataset/modified_weather/weather_description.csv', delimiter=','\n )\n", (1636, 1710), True, 'import pandas as pd\n'), ((1332, 1393), 'os.path.exists', 'os.path.exists', (['"""./dataset/modified_weather/labelled_weather"""'], {}), "('./dataset/modified_weather/labelled_weather')\n", (1346, 1393), False, 'import os\n'), ((1404, 1462), 'os.makedirs', 'os.makedirs', (['"""./dataset/modified_weather/labelled_weather"""'], {}), "('./dataset/modified_weather/labelled_weather')\n", (1415, 1462), False, 'import os\n'), ((2020, 2081), 'os.path.exists', 'os.path.exists', (['"""./dataset/modified_weather/labelled_weather"""'], {}), "('./dataset/modified_weather/labelled_weather')\n", (2034, 2081), False, 'import os\n'), ((2092, 2150), 'os.makedirs', 'os.makedirs', (['"""./dataset/modified_weather/labelled_weather"""'], {}), "('./dataset/modified_weather/labelled_weather')\n", (2103, 2150), False, 'import os\n'), ((323, 337), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (331, 337), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- from __future__ import annotations import warnings import collections from typing import Dict, Optional from dataclasses import dataclass, field import numpy as np import pandas as pd # type: ignore import xarray as xr from shapely.geometry import LineString # type: ignore from shapely.geometry.base import BaseGeometry # type: ignore with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) import geopandas as gpd # type: ignore from .base import _TimeStepResolver from ..types import StrOrPath @dataclass class Edges(_TimeStepResolver): """Class for extracting results on the edges of the simulation grid for flexible mesh (``'fm'``) models. Use in conjunction with the :class:`.Result` class. >>> from snl_d3d_cec_verify import Result >>> data_dir = getfixture('data_dir') >>> result = Result(data_dir) >>> result.edges.extract_sigma(-1, 0.5) #doctest: +ELLIPSIS geometry u1 ... n0 n1 0 LINESTRING (0.00000 1.00000, 0.00000 2.00000) 9.753143e-01 ... 1.0 -0.0 ... :param nc_path: path to the ``.nc`` file containing results :param n_steps: number of time steps in the simulation """ _t_steps: Dict[int, pd.Timestamp] = field(default_factory=dict, init=False, repr=False) _frame: Optional[gpd.GeoDataFrame] = field(default=None, init=False, repr=False) def extract_sigma(self, t_step: int, value: float, goem: Optional[BaseGeometry] = None ) -> gpd.GeoDataFrame: """Extract data from the grid edges for a given time step and sigma level (:code:`sigma`). Available data is: * :code:`u1`: velocity, in metres per second * :code:`k`: turbulent kinetic energy, in metres squared per second squared * :code:`n0`: edge normal x-coordinate * :code:`n1`: edge normal y-coordinate Results are returned as a :class:`geopandas.GeoDataFrame`, either for all of the edges or for the result of the intersection with :code:`geom` if set. For example: >>> from shapely.geometry import LineString >>> from snl_d3d_cec_verify import Result >>> data_dir = getfixture('data_dir') >>> result = Result(data_dir) >>> line = LineString([(6, 2), (10, 2)]) >>> result.edges.extract_sigma(-1, 0.5, line) geometry u1 $k$ 0 POINT (10.00000 2.00000) 0.991826 -0.004130 1 POINT (6.00000 2.00000) 0.991709 -0.004194 2 POINT (7.00000 2.00000) 0.974911 -0.004177 3 POINT (8.00000 2.00000) 0.992091 -0.004168 4 POINT (9.00000 2.00000) 0.976797 -0.004141 :param t_step: Time step index :param sigma: sigma level :param goem: Optional shapely geometry, where data is extracted on the intersection with the grid edges using the :meth:`object.intersection` method. :raises IndexError: if the time-step index (``t_step``) is out of range :return: Returns a :class:`geopandas.GeoDataFrame` with :class:`LineString` geometries for each edge or the result of the intersection with :code:`geom` if set. """ t_step = self._resolve_t_step(t_step) if t_step not in self._t_steps: self._load_t_step(t_step) assert self._frame is not None gdf = self._frame.copy() gdf['wkt'] = gdf['geometry'].apply(lambda geom: geom.wkt) gdf = gdf.set_index(['wkt', 'time']) gdf = gdf.xs(self._t_steps[t_step], level=1) data = collections.defaultdict(list) for _, group in gdf.groupby(by="wkt"): geometry = group["geometry"].values[0] n0 = group["n0"].values[0] n1 = group["n1"].values[0] gframe = group.set_index("sigma") gframe = gframe.drop("geometry", axis=1) df = pd.DataFrame(gframe) svalues = df.reindex(df.index.union([value]) ).interpolate('slinear', fill_value="extrapolate", limit_direction="both").loc[value] data["geometry"].append(geometry) data["u1"].append(svalues["u1"]) data["$k$"].append(svalues["turkin1"]) data["n0"].append(n0) data["n1"].append(n1) gframe = gpd.GeoDataFrame(data) if goem is None: return gframe pdata = {} pfilter = gframe.intersection(goem).geom_type == "Point" pdata["geometry"] = gframe.intersection(goem)[pfilter] pdata["u1"] = gframe[pfilter]["u1"] pdata["$k$"] = gframe[pfilter]["$k$"] gframe = gpd.GeoDataFrame(pdata) gframe["wkt"] = gframe["geometry"].apply(lambda geom: geom.wkt) gframe = gframe.drop_duplicates(["wkt"]) gframe = gframe.drop("wkt", axis=1) return gframe.reset_index(drop=True) def _load_t_step(self, t_step: int): t_step = self._resolve_t_step(t_step) if t_step in self._t_steps: return frame = _map_to_edges_geoframe(self.nc_path, t_step) if self._frame is None: self._frame = frame else: self._frame = pd.concat([self._frame, frame], ignore_index=True) self._t_steps[t_step] = pd.Timestamp(frame["time"].unique().take(0)) def _map_to_edges_geoframe(map_path: StrOrPath, t_step: int = None) -> gpd.GeoDataFrame: data = collections.defaultdict(list) with xr.open_dataset(map_path) as ds: if t_step is None: t_steps = tuple(range(len(ds.time))) else: t_steps = (t_step,) for istep in t_steps: time = ds.time[istep].values.take(0) edge_node_values = ds.mesh2d_edge_nodes.values edge_face_values = ds.mesh2d_edge_faces.values node_x_values = ds.mesh2d_node_x.values node_y_values = ds.mesh2d_node_y.values face_x_values = ds.mesh2d_face_x.values face_y_values = ds.mesh2d_face_y.values layer_sigma_values = ds.mesh2d_layer_sigma.values interface_sigma_values = ds.mesh2d_interface_sigma.values u1_values = ds.mesh2d_u1.values tke_values = ds.mesh2d_turkin1.values for iedge in ds.mesh2d_nEdges.values: points = [] two = (0, 1) for inode in two: index = edge_node_values[iedge, inode] - 1 x = node_x_values[index] y = node_y_values[index] p = np.array((x, y)) points.append(p) line = LineString(points) linevec = points[1] - points[0] normvec = np.array((-linevec[1], linevec[0])) points = [] faces = [] for iface in two: index = int(edge_face_values[iedge, iface]) - 1 if index < 0: p = np.array(line.centroid.coords) else: x = face_x_values[index] y = face_y_values[index] p = np.array((x, y)) points.append(p) faces.append(index) facevec = points[1] - points[0] normvec *= np.dot(facevec, normvec) normvec /= np.linalg.norm(normvec) tke = np.nan for ilayer in ds.mesh2d_nLayers.values: sigma = layer_sigma_values[ilayer] u1 = u1_values[istep, iedge, ilayer] data["geometry"].append(line) data["sigma"].append(sigma) data["time"].append(time) data["u1"].append(u1) data["turkin1"].append(tke) data["n0"].append(normvec[0]) data["n1"].append(normvec[1]) data["f0"].append(faces[0]) data["f1"].append(faces[1]) u1 = np.nan for iinterface in ds.mesh2d_nInterfaces.values: sigma = interface_sigma_values[iinterface] tke = tke_values[istep, iedge, iinterface] data["geometry"].append(line) data["sigma"].append(sigma) data["time"].append(time) data["u1"].append(u1) data["turkin1"].append(tke) data["n0"].append(normvec[0]) data["n1"].append(normvec[1]) data["f0"].append(faces[0]) data["f1"].append(faces[1]) gdf = gpd.GeoDataFrame(data) return gdf[["geometry", "sigma", "time", "u1", "turkin1", "n0", "n1", "f0", "f1"]]
[ "pandas.DataFrame", "warnings.filterwarnings", "xarray.open_dataset", "dataclasses.field", "collections.defaultdict", "geopandas.GeoDataFrame", "shapely.geometry.LineString", "warnings.catch_warnings", "numpy.array", "numpy.linalg.norm", "numpy.dot", "pandas.concat" ]
[((369, 394), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (392, 394), False, 'import warnings\n'), ((400, 462), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (423, 462), False, 'import warnings\n'), ((1344, 1395), 'dataclasses.field', 'field', ([], {'default_factory': 'dict', 'init': '(False)', 'repr': '(False)'}), '(default_factory=dict, init=False, repr=False)\n', (1349, 1395), False, 'from dataclasses import dataclass, field\n'), ((1529, 1572), 'dataclasses.field', 'field', ([], {'default': 'None', 'init': '(False)', 'repr': '(False)'}), '(default=None, init=False, repr=False)\n', (1534, 1572), False, 'from dataclasses import dataclass, field\n'), ((6208, 6237), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (6231, 6237), False, 'import collections\n'), ((9843, 9865), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['data'], {}), '(data)\n', (9859, 9865), True, 'import geopandas as gpd\n'), ((4098, 4127), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (4121, 4127), False, 'import collections\n'), ((4980, 5002), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['data'], {}), '(data)\n', (4996, 5002), True, 'import geopandas as gpd\n'), ((5332, 5355), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['pdata'], {}), '(pdata)\n', (5348, 5355), True, 'import geopandas as gpd\n'), ((6252, 6277), 'xarray.open_dataset', 'xr.open_dataset', (['map_path'], {}), '(map_path)\n', (6267, 6277), True, 'import xarray as xr\n'), ((4442, 4462), 'pandas.DataFrame', 'pd.DataFrame', (['gframe'], {}), '(gframe)\n', (4454, 4462), True, 'import pandas as pd\n'), ((5902, 5952), 'pandas.concat', 'pd.concat', (['[self._frame, frame]'], {'ignore_index': '(True)'}), '([self._frame, frame], ignore_index=True)\n', (5911, 5952), True, 'import pandas as pd\n'), ((7528, 7546), 'shapely.geometry.LineString', 'LineString', (['points'], {}), '(points)\n', (7538, 7546), False, 'from shapely.geometry import LineString\n'), ((7621, 7656), 'numpy.array', 'np.array', (['(-linevec[1], linevec[0])'], {}), '((-linevec[1], linevec[0]))\n', (7629, 7656), True, 'import numpy as np\n'), ((8342, 8366), 'numpy.dot', 'np.dot', (['facevec', 'normvec'], {}), '(facevec, normvec)\n', (8348, 8366), True, 'import numpy as np\n'), ((8394, 8417), 'numpy.linalg.norm', 'np.linalg.norm', (['normvec'], {}), '(normvec)\n', (8408, 8417), True, 'import numpy as np\n'), ((7434, 7450), 'numpy.array', 'np.array', (['(x, y)'], {}), '((x, y))\n', (7442, 7450), True, 'import numpy as np\n'), ((7952, 7982), 'numpy.array', 'np.array', (['line.centroid.coords'], {}), '(line.centroid.coords)\n', (7960, 7982), True, 'import numpy as np\n'), ((8135, 8151), 'numpy.array', 'np.array', (['(x, y)'], {}), '((x, y))\n', (8143, 8151), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- import requests from bs4 import BeautifulSoup import time import os import numpy as np from nltk.tokenize import word_tokenize import unicodedata import re import pickle GREEK_STOP = ['αδιακοπα', 'αι', 'ακομα', 'ακομη', 'ακριβως', 'αληθεια', 'αληθινα', 'αλλα', 'αλλαχου', 'αλλες', 'αλλη', 'αλλην', 'αλλης', 'αλλιως', 'αλλιωτικα', 'αλλο', 'αλλοι', 'αλλοιως', 'αλλοιωτικα', 'αλλον', 'αλλος', 'αλλοτε', 'αλλου', 'αλλους', 'αλλων', 'αμα', 'αμεσα', 'αμεσως', 'αν', 'ανα', 'αναμεσα', 'αναμεταξυ', 'ανευ', 'αντι', 'αντιπερα', 'αντις', 'ανω', 'ανωτερω', 'αξαφνα', 'απ’', 'απεναντι', 'απο', 'αποψε', 'αρα', 'αραγε', 'αργα', 'αργοτερο', 'αρκετα', 'αρχικα', 'ας', 'αυριο', 'αυτα', 'αυτες', 'αυτη', 'αυτην', 'αυτης', 'αυτο', 'αυτοι', 'αυτον', 'αυτος', 'αυτου', 'αυτους', 'αυτων', 'αφοτου', 'αφου', 'βεβαια', 'βεβαιως', 'βεβαιοτατα', 'γι’', 'για', 'γρηγορα', 'γυρω', '∆α', '∆ε', '∆εινα', '∆εν', '∆εξια', '∆ηθεν', '∆ηλα∆η', 'δεν', 'δες', 'δεσ', 'δι’', 'δια', 'διαρκως', 'δικα', 'δικο', 'δικοι', 'δικοι', 'δικου', 'δικους', 'διολου', 'διπλα', 'διχως', 'εαν', 'εαυτο', 'εαυτον', 'εαυτου', 'εαυτους', 'εαυτων', 'εγκαιρα', 'εγκαιρως', 'εγω', 'εδω', 'ειδεμη', 'ειθε', 'ειμαι', 'ειμαστε', 'ειναι', 'εις', 'εισαι', 'εισαστε', 'ειστε', 'ειτε', 'ειχα', 'ειχαμε', 'ειχαν', 'ειχατε', 'ειχε', 'ειχες', 'εκαστα', 'εκαστες', 'εκαστη', 'εκαστην', 'εκαστης', 'εκαστο', 'εκαστοι', 'εκαστον', 'εκαστος', 'εκαστου', 'εκαστους', 'εκαστων', 'εκει', 'εκεινα', 'εκεινες', 'εκεινη', 'εκεινην', 'εκεινης', 'εκεινο', 'εκεινοι', 'εκεινον', 'εκεινος', 'εκεινου', 'εκεινους', 'εκεινων', 'εκτος', 'εμας', 'εμεις', 'εμενα', 'εμπρος', 'εν', 'ενα', 'εναν', 'ενας', 'ενος', 'εντελως', 'εντος', 'εντωμεταξυ', 'ενω', 'εξ', 'εξαφνα', 'εξης', 'εξισου', 'εξω', 'εο', 'επανω', 'επειδη', 'επειτα', 'επι', 'επισης', 'επομενως', 'εσας', 'εσεις', 'εσενα', 'εστω', 'εσυ', 'ετερα', 'ετεραι', 'ετερας', 'ετερες', 'ετερη', 'ετερης', 'ετερο', 'ετεροι', 'ετερον', 'ετερος', 'ετερου', 'ετερους', 'ετερων', 'ετουτα', 'ετουτες', 'ετουτη', 'ετουτην', 'ετουτης', 'ετουτο', 'ετουτοι', 'ετουτον', 'ετουτος', 'ετουτου', 'ετουτους', 'ετουτων', 'ετσι', 'ευγε', 'ευθυς', 'ευτυχως', 'εφεξης', 'εχει', 'εχεις', 'εχετε', 'εχθες', 'εχομε', 'εχουμε', 'εχουν', 'εχτες', 'εχω', 'εως', 'Η', 'η', 'η', 'ηδη', 'ημασταν', 'ημαστε', 'ημουν', 'ησασταν', 'ησαστε', 'ησουν', 'ηταν', 'ητανε', 'ητοι', 'ηττον', 'θα', 'ι', 'ιδια', 'ιδιαν', 'ιδιας', 'ιδιες', 'ιδιο', 'ιδιοι', 'ιδιον', 'ιδιος', 'ιδιου', 'ιδιους', 'ιδιων', 'ιδιως', 'ισαμε', 'ισια', 'ισως', 'καθε', 'καθεμια', 'καθεμιας', 'καθενα', 'καθενας', 'καθενος', 'καθετι', 'καθολου', 'καθως', 'και', 'κακα', 'κακως', 'καλα', 'καλως', 'καμια', 'καμιαν', 'καμιας', 'καμποσα', 'καμποσες', 'καμποση', 'καμποσην', 'καμποσης', 'καμποσο', 'καμποσοι', 'καμποσον', 'καμποσος', 'καμποσου', 'καμποσους', 'καμποσων', 'κανεις', 'κανει', 'καντε', 'καναμε', 'κανενα', 'κανεναν', 'κανενας', 'κανενος', 'καποια', 'καποιαν', 'καποιας', 'καποιες', 'καποιο', 'καποιοι', 'καποιον', 'καποιος', 'καποιου', 'καποιους', 'καποιων', 'καποτε', 'καπου', 'καπως', 'κατ’', 'κατα', 'κατι', 'κατιτι', 'κατοπιν', 'κατω', 'κιολας', 'κλπ.', 'κοντα', 'κτλ.', 'κυριως', 'λιγακι', 'λιγο', 'λιγοτερο', 'λογω', 'λοιπα', 'λοιπον', 'μα', 'μαζι', 'μακαρι', 'μακρια', 'μαλιστα', 'μαλλον', 'μας', 'με', 'μεθαυριο', 'μειον', 'μελει', 'μελλεται', 'μεμιας', 'μεν', 'μερικα', 'μερικες', 'μερικοι', 'μερικους', 'μερικων', 'μεσα', 'μετ', 'μετα', 'μεταξυ', 'μεχρι', 'μη', 'μηδε', 'μην', 'μηπως', 'μητε', 'μια', 'μιαν', 'μιας', 'μολις', 'μολονοτι', 'μοναχα', 'μονες', 'μονη', 'μονη', 'μονης', 'μονο', 'μονοι', 'μονομιας', 'μονος', 'μονου', 'μονους', 'μονων', 'μου', 'μπορει', 'μπορουν', 'μπραβο', 'μπρος', 'να', 'ναι', 'νωρις', 'ξανα', 'ξαφνικα', 'ο', 'οε', 'οι', 'ολα', 'ολες', 'ολη', 'ολην', 'ολης', 'ολο', 'ολογυρα', 'ολοι', 'ολον', 'ολος', 'ολοτελα', 'ολου', 'ολους', 'ολων', 'ολως', 'ολωσδιολου', 'ομως', 'οποια', 'οποιαδηποτε', 'οποιαν', 'οποιανδηποτε', 'οποιας', 'οποιοδηποτε', 'οποιες', 'οποιεσδηποτε', 'οποιο', 'οποιοδηποτε', 'οποιοι', 'οποιον', 'οποιονδηποτε', 'οποιος', 'οποιοσδηποτε', 'οποιου', 'οποιουδηποτε', 'οποιους', 'οποιων', 'οποιωνδηποτε', 'οποτε', 'οποτεδηποτε', 'οπου', 'οπουδηποτε', 'οπως', 'ορισμενα', 'ορισμενες', 'ορισμενων', 'ορισμενως', 'οσα', 'οσαδηποτε', 'οσες', 'οσεσδηποτε', 'οση', 'οσηδηποτε', 'οσην', 'οσηνδηποτε', 'οσης', 'οσησδηποτε', 'οσο', 'οσοδηποτε', 'οσοι', 'οσοιδηποτε', 'οσον', 'οσονδηποτε', 'οσος', 'οσοσδηποτε', 'οσου', 'οσουδηποτε', 'οσους', 'οσουσδηποτε', 'οσων', 'οσωνδηποτε', 'οταν', 'οτι', 'οτιδηποτε', 'οτου', 'ου', 'ουδε', 'ουτε', 'παλι', 'παντοτε', 'παντου', 'παντως', 'παρα', 'περα', 'περι', 'περιπου', 'περισσοτερο', 'περσι', 'περυσι', 'πια', 'πιθανον', 'πιο', 'πισω', 'πλαι', 'πλεον', 'πλην', 'ποια', 'ποιαν', 'ποιας', 'ποιες', 'ποιο', 'ποιοι', 'ποιον', 'ποιος', 'ποιου', 'ποιους', 'ποιων', 'πολυ', 'ποσες', 'ποση', 'ποσην', 'ποσης', 'ποσοι', 'ποσος', 'ποσους', 'ποτε', 'που', 'πουθε', 'πουθενα', 'πρεπει', 'πριν', 'προ', 'προκειμενου', 'προκειται', 'προπερσι', 'προς', 'προτου', 'προχθες', 'προχτες', 'πρωτυτερα', 'πως', 'σαν', 'σας', 'σε', 'σεις', 'σημερα', 'σιγα', 'σου', 'στα', 'στη', 'στην', 'στης', 'στις', 'στο', 'στον', 'στου', 'στους', 'στων', 'συγχρονως', 'συν', 'συναμα', 'συνεπως', 'συνηθως', 'συχνα', 'συχνες', 'συχνη', 'συχνην', 'συχνης', 'συχνο', 'συχνοι', 'συχνον', 'συχνος', 'συχνου', 'συχνους', 'συχνων', 'σχεδον', 'σωστα', 'τα', 'ταδε', 'ταυτα', 'ταυτες', 'ταυτη', 'ταυτην', 'ταυτης', 'ταυτο', 'ταυτον', 'ταυτος', 'ταυτου', 'ταυτων', 'ταχα', 'ταχατε', 'τελικα', 'τελικως', 'τες', 'τετοια', 'τετοιαν', 'τετοιας', 'τετοιες', 'τετοιο', 'τετοιοι', 'τετοιον', 'τετοιος', 'τετοιου', 'τετοιους', 'τετοιων', 'τη', 'την', 'της', 'τι', 'τιποτα', 'τιποτε', 'τις', 'το', 'τοι', 'τον', 'τος', 'τοσα', 'τοσες', 'τοση', 'τοσην', 'τοσης', 'τοσο', 'τοσοι', 'τοσον', 'τοσος', 'τοσου', 'τοσους', 'τοσων', 'τοτε', 'του', 'τουλαχιστο', 'τουλαχιστον', 'τους', 'τουτα', 'τουτες', 'τουτη', 'τουτην', 'τουτης', 'τουτο', 'τουτοι', 'τουτοις', 'τουτον', 'τουτος', 'τουτου', 'τουτους', 'τουτων', 'τυχον', 'των', 'τωρα', 'υπ’', 'υπερ', 'υπο', 'υποψη', 'υποψιν', 'υστερα', 'φετος', 'χαμηλα', 'χθες', 'χτες', 'χωρις', 'χωριστα', 'ψηλα', 'ω', 'ωραια', 'ως', 'ωσαν', 'ωσοτου', 'ωσπου', 'ωστε', 'ωστοσο', 'ωχ', '-', '+', '=', '«', '»', '(', ')', '[', ']', ';', '?', '<', '> ', 'βιντεο', 'video', 'vid', 'RT', 'retweet'] tags = ['!', '"', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=', '>', '?', '[', ']', '^', '_', '`', '{', '|', '}', '~', '«', '»', '€'] Tfidf_vect = pickle.load(open("vectorizer.pickle", "rb")) def word_spliter(page_text): global GREEK_STOP, tags page_text = page_text.encode('utf-8') page_text = page_text.lower() for tag in tags: page_text=page_text.replace(tag, '') page_text = ', '.join(word_tokenize(page_text)) # Afairesh tonwn def strip_accents(s): return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn') page_text = strip_accents(page_text.decode('utf-8')) page_text = page_text.split(",") final_words = [] for word in page_text: if word.encode('utf-8') not in GREEK_STOP: final_words.append(word) return page_text def prediction_NB(page): global Tfidf_vect Naive = pickle.load(open('NB_model.sav', 'rb')) TestX=Tfidf_vect.transform(page) predictions_NB = Naive.predict(TestX) predictions_NB_proba = Naive.predict_proba(TestX) proba = predictions_NB_proba[:, 0] proba = np.mean(proba) return proba def prediction_SVM(page): global Tfidf_vect SVM = pickle.load(open('SVM_model.sav', 'rb')) TestX = Tfidf_vect.transform(page) predictions_SVM = SVM.predict(TestX) predictions_SVM_proba = SVM.predict_proba(TestX) proba = predictions_SVM_proba[:, 0] proba = np.mean(proba) return proba def focused_crawler(seed_url): URL_ranking = dict() checked=[] crawled_count = 0 frontier_urls = [seed_url] frontier_url = seed_url URL_ranking.update( {seed_url : 0.0} ) seen_urls = [] newpath = r'Logs' if not os.path.exists(newpath): os.makedirs(newpath) # if not os.path.exists(newpath): # os.makedirs(newpath) crawler_log = open("Logs/crawler_log.txt","w") crawler_log.write("Seed : "+seed_url+"\n\n") crawled_count+=1 crawler_log.write(str(crawled_count)+") "+seed_url+"\n\n") # flag is True if the limit of 1000 URLS has not been reached flag = True print(str(crawled_count)+") "+seed_url) source_code = requests.get(seed_url) plain_text = source_code.text soup = BeautifulSoup(plain_text,"html.parser") #ama 8eloume na to katebasoume ginetai apo dw # name = seed_url[seed_url.rfind('/')+1:] # name = open("Raw_HTML_Downloads/"+(str(crawled_count))+") "+name+".txt","w") # name.write(str(soup.prettify(encoding='utf-8'))) # name.close() while(flag): extracted_urls = [] # Traversing through all the URLs to be crawled as pointed by the Frontier while (frontier_url is not None): # Enter only if limit of 1000 URLs not reached if flag: time.sleep(1) # get the soup source_code = requests.get(frontier_url) if source_code == None: continue plain_text = source_code.text soup = BeautifulSoup(plain_text,"html.parser") url = frontier_url extracted_urls.append(url) #to bazoume sta katebasmena crawled_count += 1 crawler_log.write(str(crawled_count) + ") " + url + "\n") try: URL_ranking[url] except Exception: checked.append(frontierbyValue[0]) frontierbyValue.pop(0) frontier_url=frontierbyValue[0][0] continue print(str(crawled_count) + ") " + url+' ' + str(URL_ranking[url])) URL_ranking.pop(url) # bgale to url apo th lista page = soup.find_all('p') content = ' '.join(item.text for item in page) new_page=content#str(content.encode('utf-8')) page = word_spliter(new_page) # #choose Naive Bayes or SVM to rate rate = prediction_NB(page) #rate = prediction_SVM(page) for link in soup.find_all('a', href=re.compile('')): if not link['href'].startswith(seed_url): continue if not link['href'].endswith('.html'): continue if crawled_count < 1000 and flag: href_url = link['href'] if ':' in href_url: url = href_url # URL should not be in either of Frontier, Extracted or Seen lists and should not be Wiki Main Page too if url not in frontier_urls and url not in extracted_urls and url not in seen_urls: URL_ranking.update({url:rate}) frontierbyValue = sorted(URL_ranking.items(), reverse=True, key=lambda x: x[1]) frontier_url=frontierbyValue[0][0] if len(frontierbyValue)>200: frontierbyValue = frontierbyValue[:len(frontierbyValue) // 2] #pare th prwth mish URL_ranking=dict(frontierbyValue) # meiwse sth mesh to le3iko else: frontier_url = frontierbyValue[1][0] seen_urls.append(extracted_urls) if len(extracted_urls) == 0: flag = False break frontier_urls = extracted_urls crawler_log.write("\n") if flag: print("Searched") crawler_log.write("--------\n") crawler_log.write("Logistics :\n\n") crawler_log.write("Number of matching searches : "+str(crawled_count)+"\n") seed_url = 'https://www.sport24.gr' focused_crawler(seed_url)
[ "unicodedata.normalize", "os.makedirs", "unicodedata.category", "os.path.exists", "time.sleep", "numpy.mean", "requests.get", "bs4.BeautifulSoup", "nltk.tokenize.word_tokenize", "re.compile" ]
[((7697, 7711), 'numpy.mean', 'np.mean', (['proba'], {}), '(proba)\n', (7704, 7711), True, 'import numpy as np\n'), ((7993, 8007), 'numpy.mean', 'np.mean', (['proba'], {}), '(proba)\n', (8000, 8007), True, 'import numpy as np\n'), ((8664, 8686), 'requests.get', 'requests.get', (['seed_url'], {}), '(seed_url)\n', (8676, 8686), False, 'import requests\n'), ((8726, 8766), 'bs4.BeautifulSoup', 'BeautifulSoup', (['plain_text', '"""html.parser"""'], {}), "(plain_text, 'html.parser')\n", (8739, 8766), False, 'from bs4 import BeautifulSoup\n'), ((7037, 7061), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['page_text'], {}), '(page_text)\n', (7050, 7061), False, 'from nltk.tokenize import word_tokenize\n'), ((8248, 8271), 'os.path.exists', 'os.path.exists', (['newpath'], {}), '(newpath)\n', (8262, 8271), False, 'import os\n'), ((8275, 8295), 'os.makedirs', 'os.makedirs', (['newpath'], {}), '(newpath)\n', (8286, 8295), False, 'import os\n'), ((9226, 9239), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (9236, 9239), False, 'import time\n'), ((9278, 9304), 'requests.get', 'requests.get', (['frontier_url'], {}), '(frontier_url)\n', (9290, 9304), False, 'import requests\n'), ((9387, 9427), 'bs4.BeautifulSoup', 'BeautifulSoup', (['plain_text', '"""html.parser"""'], {}), "(plain_text, 'html.parser')\n", (9400, 9427), False, 'from bs4 import BeautifulSoup\n'), ((7133, 7164), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFD"""', 's'], {}), "('NFD', s)\n", (7154, 7164), False, 'import unicodedata\n'), ((7176, 7199), 'unicodedata.category', 'unicodedata.category', (['c'], {}), '(c)\n', (7196, 7199), False, 'import unicodedata\n'), ((10220, 10234), 're.compile', 're.compile', (['""""""'], {}), "('')\n", (10230, 10234), False, 'import re\n')]
"""One dimensional dataset, Gaussian with sinus wave mean, variance increasing in x Taken from paper: "https://arxiv.org/abs/1906.01620" """ import logging from pathlib import Path import csv import numpy as np import torch.utils.data import matplotlib.pyplot as plt class GaussianSinus(torch.utils.data.Dataset): """1D sinusoidal data with noise, increasing with x""" def __init__(self, store_file, train=True, range_=(-3, 3), reuse_data=False, n_samples=1000): super(GaussianSinus).__init__() self._log = logging.getLogger(self.__class__.__name__) self.n_samples = n_samples self.range = range_ self.train = train self.file = Path(store_file) if self.file.exists() and reuse_data: self.validate_dataset() else: self._log.info("Sampling new data") self.sample_new_data() def __len__(self): return self.n_samples def __getitem__(self, index): sample = None with self.file.open(newline="") as csv_file: csv_reader = csv.reader(csv_file, delimiter=",", quotechar="|") for count, row in enumerate(csv_reader): if count == index: sample = row break inputs = sample[:-1] targets = sample[-1] return (np.array(inputs, dtype=np.float32), np.array([targets], dtype=np.float32)) @staticmethod def x_to_y_mapping(x): """Noisy mapping defining the dataset""" mu = np.sin(x) sigma = 0.15 * 1 / (1 + np.exp(-x)) y = np.random.multivariate_normal(mean=mu, cov=(np.diag(sigma))) return y, mu, sigma def sample_new_data(self): self.file.parent.mkdir(parents=True, exist_ok=True) lower, upper = self.range if self.train: x = np.random.uniform(low=lower, high=upper, size=self.n_samples) else: x = np.random.uniform(low=lower, high=upper, size=self.n_samples) y, _, _ = self.x_to_y_mapping(x) combined_data = np.column_stack((x, y)) np.random.shuffle(combined_data) np.savetxt(self.file, combined_data, delimiter=",") def validate_dataset(self): with self.file.open(newline="") as csv_file: csv_reader = csv.reader(csv_file, delimiter=",", quotechar="|") assert self.n_samples - 1 == sum(1 for row in csv_reader) def get_full_data(self, sorted_=False): """Get full dataset as numpy array""" tmp_raw_data = list() with self.file.open(newline="") as csv_file: csv_reader = csv.reader(csv_file, delimiter=",", quotechar="|") tmp_raw_data = [data for data in csv_reader] full_data = np.array(tmp_raw_data, dtype=float) if sorted_: full_data = full_data[full_data[:, 0].argsort()] return full_data def plot_reg_data(data, ax): inputs = data[:, :-1] targets = data[:, -1] ax.scatter(inputs, targets) plt.show() def plot_uncert(ax, data, x): inputs = data[:, :-1] targets = data[:, -1] mu = np.sin(x) sigma = 0.15 * 1 / (1 + np.exp(-x)) ax.scatter(inputs, targets) ax.plot(x, mu, "r-", label="$\mu(x)$") ax.fill_between(x, mu + sigma, mu - sigma, facecolor="blue", alpha=0.5, label="$\mu(x) \pm \sigma(x)$") plt.legend(prop={'size': 20}) plt.show() def main(): _, ax = plt.subplots() dataset = GaussianSinus(store_file=Path("data/1d_gauss_sinus_1000")) start = -3 end = 3 step = 0.25 x_length = int((end - start) / step) x = np.arange(start=start, stop=end, step=step, dtype=np.float) plot_uncert(ax, dataset.get_full_data(), x) if __name__ == "__main__": main()
[ "numpy.random.uniform", "matplotlib.pyplot.show", "numpy.random.shuffle", "csv.reader", "matplotlib.pyplot.legend", "numpy.savetxt", "pathlib.Path", "numpy.sin", "numpy.arange", "numpy.array", "numpy.exp", "numpy.column_stack", "numpy.diag", "matplotlib.pyplot.subplots", "logging.getLogger" ]
[((3117, 3127), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3125, 3127), True, 'import matplotlib.pyplot as plt\n'), ((3221, 3230), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (3227, 3230), True, 'import numpy as np\n'), ((3559, 3588), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'prop': "{'size': 20}"}), "(prop={'size': 20})\n", (3569, 3588), True, 'import matplotlib.pyplot as plt\n'), ((3593, 3603), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3601, 3603), True, 'import matplotlib.pyplot as plt\n'), ((3630, 3644), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3642, 3644), True, 'import matplotlib.pyplot as plt\n'), ((3810, 3869), 'numpy.arange', 'np.arange', ([], {'start': 'start', 'stop': 'end', 'step': 'step', 'dtype': 'np.float'}), '(start=start, stop=end, step=step, dtype=np.float)\n', (3819, 3869), True, 'import numpy as np\n'), ((620, 662), 'logging.getLogger', 'logging.getLogger', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (637, 662), False, 'import logging\n'), ((774, 790), 'pathlib.Path', 'Path', (['store_file'], {}), '(store_file)\n', (778, 790), False, 'from pathlib import Path\n'), ((1631, 1640), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1637, 1640), True, 'import numpy as np\n'), ((2172, 2195), 'numpy.column_stack', 'np.column_stack', (['(x, y)'], {}), '((x, y))\n', (2187, 2195), True, 'import numpy as np\n'), ((2204, 2236), 'numpy.random.shuffle', 'np.random.shuffle', (['combined_data'], {}), '(combined_data)\n', (2221, 2236), True, 'import numpy as np\n'), ((2245, 2296), 'numpy.savetxt', 'np.savetxt', (['self.file', 'combined_data'], {'delimiter': '""","""'}), "(self.file, combined_data, delimiter=',')\n", (2255, 2296), True, 'import numpy as np\n'), ((2856, 2891), 'numpy.array', 'np.array', (['tmp_raw_data'], {'dtype': 'float'}), '(tmp_raw_data, dtype=float)\n', (2864, 2891), True, 'import numpy as np\n'), ((1159, 1209), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""', 'quotechar': '"""|"""'}), "(csv_file, delimiter=',', quotechar='|')\n", (1169, 1209), False, 'import csv\n'), ((1432, 1466), 'numpy.array', 'np.array', (['inputs'], {'dtype': 'np.float32'}), '(inputs, dtype=np.float32)\n', (1440, 1466), True, 'import numpy as np\n'), ((1484, 1521), 'numpy.array', 'np.array', (['[targets]'], {'dtype': 'np.float32'}), '([targets], dtype=np.float32)\n', (1492, 1521), True, 'import numpy as np\n'), ((1951, 2012), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'lower', 'high': 'upper', 'size': 'self.n_samples'}), '(low=lower, high=upper, size=self.n_samples)\n', (1968, 2012), True, 'import numpy as np\n'), ((2044, 2105), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'lower', 'high': 'upper', 'size': 'self.n_samples'}), '(low=lower, high=upper, size=self.n_samples)\n', (2061, 2105), True, 'import numpy as np\n'), ((2408, 2458), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""', 'quotechar': '"""|"""'}), "(csv_file, delimiter=',', quotechar='|')\n", (2418, 2458), False, 'import csv\n'), ((2728, 2778), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""', 'quotechar': '"""|"""'}), "(csv_file, delimiter=',', quotechar='|')\n", (2738, 2778), False, 'import csv\n'), ((3259, 3269), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (3265, 3269), True, 'import numpy as np\n'), ((3684, 3716), 'pathlib.Path', 'Path', (['"""data/1d_gauss_sinus_1000"""'], {}), "('data/1d_gauss_sinus_1000')\n", (3688, 3716), False, 'from pathlib import Path\n'), ((1673, 1683), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1679, 1683), True, 'import numpy as np\n'), ((1741, 1755), 'numpy.diag', 'np.diag', (['sigma'], {}), '(sigma)\n', (1748, 1755), True, 'import numpy as np\n')]
import numpy as np from pytest import approx, raises import context # noqa from src import line_search from src.least_squares import least_squares class TestGoldenSection: def test_correct_1d(self): """ Check if one dimensional problems which are well specified are solved correctly. """ solution = line_search.goldensection( func=lambda x: (x - 4)**2, # minimum at x = 4 x=3, # start dx=2, # search direction: -f'(3) ) assert solution['x'] == approx(4) solution = line_search.goldensection( func=lambda x: (x - 4)**2, # minimum at x = 4 x=5, # start dx=-2, # search direction: -f'(3) ) assert solution['x'] == approx(4) def test_correct_nd(self): """ Check if n-dimensional problems which are well specified are solved correctly. """ for n in range(2, 10): A = np.random.uniform( low=-1, high=1, size=(n, n) ) b = np.random.uniform( low=-1, high=1, size=(n, 1) ) LS = least_squares(A, b) # Least squares instance ||Ax-b||^2 x0 = np.random.uniform( low=-1, high=1, size=(n, 1) ) # pick random starting point dx = 2*A.T@A@x0 - 2*A.T@b # derivative of x^TA^TAx-2x^TA^Tb-b^Tb solution = line_search.goldensection( func=lambda x: LS(x), x=x0, dx=-dx, ) eps = 1e-3 # small deviation # now check that we have found the minimum in this search direction # since function is convex we just have to check small deviations # from the solution assert LS(solution['x']) < LS(solution['x'] + eps*dx) assert LS(solution['x']) < LS(solution['x'] - eps*dx) def test_undefined_start(self): """ Check behaviour when an undefined start point is given. Search should not fail silently, an explicit error is expected. """ with raises(ZeroDivisionError): line_search.goldensection( func=lambda x: 1/x, x=0, # division by zero dx=1, # dummy value, no sensible search dir. at start point ) with raises((ValueError, TypeError)): line_search.goldensection( func=lambda x: x**2 if abs(x) < 1 else None, x=2, # function undefined at this value dx=-4, # good search direction ) def test_bad_search_direction(self): """ Check that search fail explicitly when a bad search direction is given and a minimum cannot be bracketed. """ with raises(ValueError): line_search.goldensection( func=lambda x: (x - 4)**2, # minimum at x = 4 x=3, # start dx=-1, # bad search direction; away from minimum )
[ "numpy.random.uniform", "src.least_squares.least_squares", "src.line_search.goldensection", "pytest.raises", "pytest.approx" ]
[((339, 404), 'src.line_search.goldensection', 'line_search.goldensection', ([], {'func': '(lambda x: (x - 4) ** 2)', 'x': '(3)', 'dx': '(2)'}), '(func=lambda x: (x - 4) ** 2, x=3, dx=2)\n', (364, 404), False, 'from src import line_search\n'), ((569, 635), 'src.line_search.goldensection', 'line_search.goldensection', ([], {'func': '(lambda x: (x - 4) ** 2)', 'x': '(5)', 'dx': '(-2)'}), '(func=lambda x: (x - 4) ** 2, x=5, dx=-2)\n', (594, 635), False, 'from src import line_search\n'), ((539, 548), 'pytest.approx', 'approx', (['(4)'], {}), '(4)\n', (545, 548), False, 'from pytest import approx, raises\n'), ((770, 779), 'pytest.approx', 'approx', (['(4)'], {}), '(4)\n', (776, 779), False, 'from pytest import approx, raises\n'), ((970, 1016), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(n, n)'}), '(low=-1, high=1, size=(n, n))\n', (987, 1016), True, 'import numpy as np\n'), ((1095, 1141), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(n, 1)'}), '(low=-1, high=1, size=(n, 1))\n', (1112, 1141), True, 'import numpy as np\n'), ((1221, 1240), 'src.least_squares.least_squares', 'least_squares', (['A', 'b'], {}), '(A, b)\n', (1234, 1240), False, 'from src.least_squares import least_squares\n'), ((1296, 1342), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(n, 1)'}), '(low=-1, high=1, size=(n, 1))\n', (1313, 1342), True, 'import numpy as np\n'), ((2230, 2255), 'pytest.raises', 'raises', (['ZeroDivisionError'], {}), '(ZeroDivisionError)\n', (2236, 2255), False, 'from pytest import approx, raises\n'), ((2269, 2327), 'src.line_search.goldensection', 'line_search.goldensection', ([], {'func': '(lambda x: 1 / x)', 'x': '(0)', 'dx': '(1)'}), '(func=lambda x: 1 / x, x=0, dx=1)\n', (2294, 2327), False, 'from src import line_search\n'), ((2477, 2508), 'pytest.raises', 'raises', (['(ValueError, TypeError)'], {}), '((ValueError, TypeError))\n', (2483, 2508), False, 'from pytest import approx, raises\n'), ((2922, 2940), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (2928, 2940), False, 'from pytest import approx, raises\n'), ((2954, 3020), 'src.line_search.goldensection', 'line_search.goldensection', ([], {'func': '(lambda x: (x - 4) ** 2)', 'x': '(3)', 'dx': '(-1)'}), '(func=lambda x: (x - 4) ** 2, x=3, dx=-1)\n', (2979, 3020), False, 'from src import line_search\n')]
import numpy as np # from numpy import linalg as LA import matplotlib.pyplot as plt # import networkx as nx # from scipy import integrate from dynamic_systems import Dynamics import reservoir NODES = 400 TIME_STEP = 0.1 TRAINING_TIME = 260 TRANSIENT_TIME = 200 TEST_TIME = 100 INPUTS = [0] OUTPUTS = [1, 2] INITIAL_CONDITION = (0.0, 0.0, 0.0) # PARAMETER CONSTANTS D = 20 #the average degree of a reservoir node SPECTRAL_RADIUS = 1.0 SIGMA = 1.0 LEAKAGE_RATE = 1.0 BIAS_CONSTANT = 1.0 BETA = 0.00005 total_time = TRANSIENT_TIME + TRAINING_TIME + TEST_TIME # options for dynamic systems: rossler, lorenz, modified_lorenz. dynamic_system = Dynamics.rossler DATA = reservoir.gen_data(dynamic_system, INITIAL_CONDITION, total_time, TIME_STEP) input_train, output_train, input_test, output_test = reservoir.split_data(DATA, INPUTS, OUTPUTS, TRAINING_TIME, TRANSIENT_TIME, TIME_STEP) A = reservoir.gen_A(NODES, D, SPECTRAL_RADIUS) W_in = reservoir.gen_Win(NODES, len(INPUTS), SIGMA) W_out, R, rbar, sbar = reservoir.train(input_train, output_train, A, W_in, NODES, len(INPUTS), len(OUTPUTS), TRAINING_TIME, LEAKAGE_RATE, BIAS_CONSTANT, BETA, TIME_STEP) s_t = reservoir.test(input_test, W_out, A, W_in, NODES, len(INPUTS), len(OUTPUTS), LEAKAGE_RATE, BIAS_CONSTANT, R, rbar, sbar) print("RMS error in calculation of y: ", np.sqrt(np.mean((s_t[0,:]-output_test[:1000, 0])**2))) print("RMS error in calculation of z: ", np.sqrt(np.mean((s_t[1,:]-output_test[:1000, 1])**2))) # This block of code is used to test the variability of the ridge regression parameter. # betas = [0, 1e-10, 1e-9, 1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1] # RMSy = np.zeros(12) # RMSz = np.zeros(12) # for idx, BETA in enumerate(betas): # W_out, R, rbar, sbar = reservoir.train(input_train, output_train, A, W_in, NODES, len(INPUTS), len(OUTPUTS), TRAINING_TIME, LEAKAGE_RATE, BIAS_CONSTANT, BETA, TIME_STEP) # s_t = reservoir.test(input_test, W_out, A, W_in, NODES, len(INPUTS), len(OUTPUTS), LEAKAGE_RATE, BIAS_CONSTANT, R, rbar, sbar) # RMSy[idx] = np.sqrt(np.mean((s_t[0,:]-output_test[:1000, 0])**2)) # RMSz[idx] = np.sqrt(np.mean((s_t[1,:]-output_test[:1000, 1])**2)) # # print(RMSy) # print(RMSz) # # fig = plt.figure() # plt.subplot(2,1,1) # plt.loglog(betas, RMSy) # plt.title("RMSE in the y-component") # plt.ylabel('RMS') # # plt.subplot(2,1,2) # plt.loglog(betas, RMSz) # plt.title("RMSE in the z-component") # plt.ylabel('RMS') # plt.xlabel('beta') # # plt.show() # Visualize t = np.arange(0, TEST_TIME, TIME_STEP) fig = plt.figure(figsize=(30,20)) plt.subplot(3, 1, 1) plt.plot(t, input_test[:1000]) plt.ylabel('Input x') plt.subplot(3, 1, 2) plt.plot(t, s_t[0,:], 'r', label='Reservoir output') plt.plot(t, output_test[:1000, 0], 'b--', label='Actual y') plt.ylabel('output and actual y') plt.legend(loc='upper right') plt.subplot(3, 1, 3) plt.plot(t, s_t[1,:], 'r', label='Reservoir output') plt.plot(t, output_test[:1000, 1], 'b--', label='Actual z') plt.xlabel('time (s)') plt.ylabel('output and actual z') plt.legend(loc='upper right') plt.show()
[ "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "reservoir.split_data", "reservoir.gen_data", "matplotlib.pyplot.figure", "numpy.mean", "numpy.arange", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "reservoir.gen_A" ]
[((667, 743), 'reservoir.gen_data', 'reservoir.gen_data', (['dynamic_system', 'INITIAL_CONDITION', 'total_time', 'TIME_STEP'], {}), '(dynamic_system, INITIAL_CONDITION, total_time, TIME_STEP)\n', (685, 743), False, 'import reservoir\n'), ((797, 886), 'reservoir.split_data', 'reservoir.split_data', (['DATA', 'INPUTS', 'OUTPUTS', 'TRAINING_TIME', 'TRANSIENT_TIME', 'TIME_STEP'], {}), '(DATA, INPUTS, OUTPUTS, TRAINING_TIME, TRANSIENT_TIME,\n TIME_STEP)\n', (817, 886), False, 'import reservoir\n'), ((887, 929), 'reservoir.gen_A', 'reservoir.gen_A', (['NODES', 'D', 'SPECTRAL_RADIUS'], {}), '(NODES, D, SPECTRAL_RADIUS)\n', (902, 929), False, 'import reservoir\n'), ((2494, 2528), 'numpy.arange', 'np.arange', (['(0)', 'TEST_TIME', 'TIME_STEP'], {}), '(0, TEST_TIME, TIME_STEP)\n', (2503, 2528), True, 'import numpy as np\n'), ((2536, 2564), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(30, 20)'}), '(figsize=(30, 20))\n', (2546, 2564), True, 'import matplotlib.pyplot as plt\n'), ((2564, 2584), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (2575, 2584), True, 'import matplotlib.pyplot as plt\n'), ((2585, 2615), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'input_test[:1000]'], {}), '(t, input_test[:1000])\n', (2593, 2615), True, 'import matplotlib.pyplot as plt\n'), ((2616, 2637), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Input x"""'], {}), "('Input x')\n", (2626, 2637), True, 'import matplotlib.pyplot as plt\n'), ((2639, 2659), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (2650, 2659), True, 'import matplotlib.pyplot as plt\n'), ((2660, 2713), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 's_t[0, :]', '"""r"""'], {'label': '"""Reservoir output"""'}), "(t, s_t[0, :], 'r', label='Reservoir output')\n", (2668, 2713), True, 'import matplotlib.pyplot as plt\n'), ((2713, 2772), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'output_test[:1000, 0]', '"""b--"""'], {'label': '"""Actual y"""'}), "(t, output_test[:1000, 0], 'b--', label='Actual y')\n", (2721, 2772), True, 'import matplotlib.pyplot as plt\n'), ((2773, 2806), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""output and actual y"""'], {}), "('output and actual y')\n", (2783, 2806), True, 'import matplotlib.pyplot as plt\n'), ((2807, 2836), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (2817, 2836), True, 'import matplotlib.pyplot as plt\n'), ((2838, 2858), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (2849, 2858), True, 'import matplotlib.pyplot as plt\n'), ((2859, 2912), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 's_t[1, :]', '"""r"""'], {'label': '"""Reservoir output"""'}), "(t, s_t[1, :], 'r', label='Reservoir output')\n", (2867, 2912), True, 'import matplotlib.pyplot as plt\n'), ((2912, 2971), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'output_test[:1000, 1]', '"""b--"""'], {'label': '"""Actual z"""'}), "(t, output_test[:1000, 1], 'b--', label='Actual z')\n", (2920, 2971), True, 'import matplotlib.pyplot as plt\n'), ((2972, 2994), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (s)"""'], {}), "('time (s)')\n", (2982, 2994), True, 'import matplotlib.pyplot as plt\n'), ((2995, 3028), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""output and actual z"""'], {}), "('output and actual z')\n", (3005, 3028), True, 'import matplotlib.pyplot as plt\n'), ((3029, 3058), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (3039, 3058), True, 'import matplotlib.pyplot as plt\n'), ((3060, 3070), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3068, 3070), True, 'import matplotlib.pyplot as plt\n'), ((1329, 1378), 'numpy.mean', 'np.mean', (['((s_t[0, :] - output_test[:1000, 0]) ** 2)'], {}), '((s_t[0, :] - output_test[:1000, 0]) ** 2)\n', (1336, 1378), True, 'import numpy as np\n'), ((1425, 1474), 'numpy.mean', 'np.mean', (['((s_t[1, :] - output_test[:1000, 1]) ** 2)'], {}), '((s_t[1, :] - output_test[:1000, 1]) ** 2)\n', (1432, 1474), True, 'import numpy as np\n')]
# Copyright <NAME> 2017 """ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import numpy as np import pyopencl as cl import os import math import pytest from test import test_common from test.test_common import offset_type def test_memcpy(context, q, int_data, int_data_gpu): ll_code = """ declare void @_Z6memcpyPvPKvm(i8*, i8*, i64) define void @mykernel(i32* %data) { %1 = bitcast i32* %data to i8* %2 = getelementptr i32, i32* %data, i32 8 %3 = bitcast i32* %2 to i8* call void @_Z6memcpyPvPKvm(i8 *%3, i8 *%1, i64 32) ret void } """ cl_code = test_common.ll_to_cl(ll_code, 'mykernel', num_clmems=1) print('cl_code', cl_code) for i in range(8): int_data[i] = 3 + i cl.enqueue_copy(q, int_data_gpu, int_data) kernel = test_common.build_kernel(context, cl_code, 'mykernel') kernel(q, (32,), (32,), int_data_gpu, offset_type(0), offset_type(0), cl.LocalMemory(32)) from_gpu = np.copy(int_data) cl.enqueue_copy(q, from_gpu, int_data_gpu) q.finish() for i in range(8): print(i, from_gpu[8 + i]) assert from_gpu[8 + i] == 3 + i
[ "numpy.copy", "pyopencl.enqueue_copy", "test.test_common.build_kernel", "test.test_common.offset_type", "test.test_common.ll_to_cl", "pyopencl.LocalMemory" ]
[((1051, 1106), 'test.test_common.ll_to_cl', 'test_common.ll_to_cl', (['ll_code', '"""mykernel"""'], {'num_clmems': '(1)'}), "(ll_code, 'mykernel', num_clmems=1)\n", (1071, 1106), False, 'from test import test_common\n'), ((1192, 1234), 'pyopencl.enqueue_copy', 'cl.enqueue_copy', (['q', 'int_data_gpu', 'int_data'], {}), '(q, int_data_gpu, int_data)\n', (1207, 1234), True, 'import pyopencl as cl\n'), ((1248, 1302), 'test.test_common.build_kernel', 'test_common.build_kernel', (['context', 'cl_code', '"""mykernel"""'], {}), "(context, cl_code, 'mykernel')\n", (1272, 1302), False, 'from test import test_common\n'), ((1412, 1429), 'numpy.copy', 'np.copy', (['int_data'], {}), '(int_data)\n', (1419, 1429), True, 'import numpy as np\n'), ((1434, 1476), 'pyopencl.enqueue_copy', 'cl.enqueue_copy', (['q', 'from_gpu', 'int_data_gpu'], {}), '(q, from_gpu, int_data_gpu)\n', (1449, 1476), True, 'import pyopencl as cl\n'), ((1345, 1359), 'test.test_common.offset_type', 'offset_type', (['(0)'], {}), '(0)\n', (1356, 1359), False, 'from test.test_common import offset_type\n'), ((1361, 1375), 'test.test_common.offset_type', 'offset_type', (['(0)'], {}), '(0)\n', (1372, 1375), False, 'from test.test_common import offset_type\n'), ((1377, 1395), 'pyopencl.LocalMemory', 'cl.LocalMemory', (['(32)'], {}), '(32)\n', (1391, 1395), True, 'import pyopencl as cl\n')]
#!/usr/bin/env python import matplotlib.pyplot as plt import numpy as np import argparse params = {'axes.labelsize': 14, 'axes.titlesize': 16, 'xtick.labelsize': 12, 'ytick.labelsize': 12, 'legend.fontsize': 14} plt.rcParams.update(params) if __name__ == '__main__': msg = "plot model" parser = argparse.ArgumentParser(description=msg) parser.add_argument('file_model', default=None, help='file of the model') parser.add_argument('--out', default=None, help=' output figure name') parser.add_argument('--xlim', nargs=2, type=float, default=[None, None]) parser.add_argument('--zmax', type=float, help='zmax for showing') args = parser.parse_args() file_model = args.file_model file_out = args.out zmax = args.zmax xlim = args.xlim model = np.loadtxt(file_model) z = model[:, 1] z = np.append(z, z[-1]*1.2) vs = model[:, 3] vs = np.append(vs, vs[-1]) _, ax = plt.subplots() ax.step(vs, z, 'k-') if zmax: ax.set_ylim([0.0, zmax]) else: ax.set_ylim([0.0, z[-1]]) if xlim[0] is not None: print(xlim) ax.set_xlim(xlim) ax.invert_yaxis() ax.set_xlabel('Vs (km/s)') ax.set_ylabel('Depth (km)') plt.tight_layout() if file_out: plt.savefig(file_out, dpi=300) plt.show()
[ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.show", "argparse.ArgumentParser", "numpy.append", "matplotlib.pyplot.rcParams.update", "numpy.loadtxt", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig" ]
[((253, 280), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['params'], {}), '(params)\n', (272, 280), True, 'import matplotlib.pyplot as plt\n'), ((346, 386), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'msg'}), '(description=msg)\n', (369, 386), False, 'import argparse\n'), ((831, 853), 'numpy.loadtxt', 'np.loadtxt', (['file_model'], {}), '(file_model)\n', (841, 853), True, 'import numpy as np\n'), ((882, 907), 'numpy.append', 'np.append', (['z', '(z[-1] * 1.2)'], {}), '(z, z[-1] * 1.2)\n', (891, 907), True, 'import numpy as np\n'), ((936, 957), 'numpy.append', 'np.append', (['vs', 'vs[-1]'], {}), '(vs, vs[-1])\n', (945, 957), True, 'import numpy as np\n'), ((971, 985), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (983, 985), True, 'import matplotlib.pyplot as plt\n'), ((1264, 1282), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1280, 1282), True, 'import matplotlib.pyplot as plt\n'), ((1344, 1354), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1352, 1354), True, 'import matplotlib.pyplot as plt\n'), ((1309, 1339), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_out'], {'dpi': '(300)'}), '(file_out, dpi=300)\n', (1320, 1339), True, 'import matplotlib.pyplot as plt\n')]
import numpy as np from PIL import Image import h5py from astropy.io import fits from astropy.table import Table from astropy.convolution import convolve from astropy.cosmology import WMAP7 as cosmo import astropy.units as u from astropy import wcs import glob import pickle import os import matplotlib.pyplot as plt #Set up data paths path = '/Volumes/Data_Disk/sky_maps/eagle/no-dust/' division = ['premerger', 'postmerger', 'nomerger'] #'premerger', 'postmerger', 'merge', 'nomerger' no_dim = len(division) extn = '.hdf5' path_psf = '/Volumes/Data_Disk/sky_maps/sdss/eagle-nodust2-' redshift_file = '/Volumes/Data_Disk/catalogues/sdss/darg_2010_mergers.fits' z_col = 'specz1' noise_path = '/Volumes/Data_Disk/sky_maps/sdss/noise/' do_redshift = True #Correct surface brightness for redshift do_reproject = True #Match physical resolution with angular resolution at redshift do_convolve = True #Convolve image with SDSS PSF do_noise = True #Add real SDSS noise high_z = False #Only use redshifts above ~0.03 so the simulated image is always made smaller when reprojected #Name output file to match what has been done if do_redshift: path_psf += 'z'#'-do_redshift' if do_reproject: path_psf += 'r'#'-do_reproject' if do_convolve: path_psf += 'c'#'-do_convolve' if do_noise: path_psf += 'n'#'-do_noise' path_psf += '/' #Channels and projections to use ch = np.array(['i', 'r', 'g']) #gri colours (but it's irg to fit rgb) projection = np.array(['_00', '_01', '_02', '_03', '_04', '_05']) scale = 1.0 d_eagle = 10e-6 #Distance, in Mpc, that the eagle objects are px = 1.0 #Not really needed. It's just a placeholder that's needed #Plot example images plot = False #Make output folders (if needed) if not os.path.exists(path_psf): os.makedirs(path_psf) for div in division: if not os.path.exists(path_psf+div): os.makedirs(path_psf+div) #Get dict of all EAGLE objects to make stamps objs = {} for div in division: objs[div] = glob.glob(path+div+'/*'+extn) noise = glob.glob(noise_path+'*.pkl') #Load PSF psf_scale = 1.0 psf_g = fits.getdata('/Volumes/Data_Disk/sky_maps/sdss/psf/sdss_psf_g.fits', 0) psf_g = np.array(psf_g, dtype='float') psf_g -= 1000 #Remove 0pt bias psf_g /= psf_scale print(np.max(psf_g), np.min(psf_g), np.sum(psf_g)) plt.imshow(psf_g) plt.show() psf_r = fits.getdata('/Volumes/Data_Disk/sky_maps/sdss/psf/sdss_psf_r.fits', 0) psf_r = np.array(psf_r, dtype='float') psf_r -= 1000 psf_r /= psf_scale print(np.max(psf_r), np.min(psf_r), np.sum(psf_r)) plt.imshow(psf_r) plt.show() psf_i = fits.getdata('/Volumes/Data_Disk/sky_maps/sdss/psf/sdss_psf_i.fits', 0) psf_i = np.array(psf_i, dtype='float') psf_i -= 1000 psf_i /= psf_scale print(np.max(psf_i), np.min(psf_i), np.sum(psf_i)) plt.imshow(psf_i) plt.show() #Load observation data (for redshifts) data = fits.getdata(redshift_file, 1) table = Table(data) from reproject import reproject_interp as repro def img_func(I, beta=1.0, up=None, lo=0.0): if up == None: up = np.max(i) fI = np.arcsinh(I/beta)/np.arcsinh((up-lo)/beta) bad = np.where(I < lo) fI[bad] = 0.0 bad = np.where(i > up) fI[bad] = 1.0 return fI #Scale to SDSS resolution blank = np.zeros((256,256)) #WCS for new projection wcs_new = wcs.WCS(naxis=2) wcs_new.wcs.crpix = [blank.shape[0]//2, blank.shape[1]//2] wcs_new.wcs.cdelt = np.array([0.00011, 0.00011]) wcs_new.wcs.crval = [0., 0.] wcs_new.wcs.ctype = ["RA---TAN", "DEC--TAN"] #WCS for EAGLE projection wcs_egl = wcs.WCS(naxis=2) wcs_egl.wcs.crpix = [blank.shape[0]//2, blank.shape[1]//2] wcs_egl.wcs.cdelt = np.array([px/3600, px/3600]) wcs_egl.wcs.crval = [0., 0.] wcs_egl.wcs.ctype = ["RA---TAN", "DEC--TAN"] #Loop over the divisions/classes for div in division: print(div) tenpct = len(objs[div])//10 #Store redshifts obj_name = [] obj_zsft = [] #Loop over objects in div for i in range(0, len(objs[div])): if plot: i = np.random.randint(len(objs[div])) print(objs[div][i]) if i % tenpct == 0: print(i, 'of', len(objs[div])) #Loop over the simulation projections for p in projection: #Load data, has to be reloaded for reasons (IDK) f = h5py.File(objs[div][i], 'r') px = 1.0 #get redshift and determine effective resolution of EAGLE if high_z: while px/3600 > 0.00011: rand_idx = np.random.randint(len(table)) kpc_as = cosmo.kpc_proper_per_arcmin(table[z_col][rand_idx]).to(u.kpc/u.arcsec) px = ( ((60*u.kpc) / kpc_as)/256 ).value #calculate pixel size at redshift else: rand_idx = np.random.randint(len(table)) kpc_as = cosmo.kpc_proper_per_arcmin(table[z_col][rand_idx]).to(u.kpc/u.arcsec) px = ( ((60*u.kpc) / kpc_as)/256 ).value #calculate pixel size at redshift wcs_egl.wcs.cdelt = np.array([px/3600, px/3600]) #update eagle WCS obj_name.append(objs[div][i][len(path)+len(div)+1:-len(extn)]+p) obj_zsft.append(table[z_col][rand_idx]) if do_redshift: r = np.array(f[ch[0]+p]) #Load data g = np.array(f[ch[1]+p]) b = np.array(f[ch[2]+p]) if plot: print('Origional') print(np.max(np.dstack((r, g, b)))) plt.imshow(np.dstack((r[96:160,96:160]/np.max(r[96:160,96:160]), g[96:160,96:160]/np.max(g[96:160,96:160]), b[96:160,96:160]/np.max(b[96:160,96:160])))) plt.show() print('Pixel size of', round(px,3), ' arcsec at z =', table[z_col][rand_idx]) #correct brightness for distance d_redsh = cosmo.luminosity_distance(table[z_col][rand_idx]).value r *= (d_eagle*d_eagle)/(d_redsh*d_redsh) g *= (d_eagle*d_eagle)/(d_redsh*d_redsh) b *= (d_eagle*d_eagle)/(d_redsh*d_redsh) #Convert from maggies to mJy (do now to prevent overflow) r *= 3.631e6 g *= 3.631e6 b *= 3.631e6 #Scale to match g-band observations r *= scale g *= scale b *= scale else: r = np.array(f[ch[0]+p]) #Load data g = np.array(f[ch[1]+p]) b = np.array(f[ch[2]+p]) r *= 3.631e6 #Convert from maggies to mJy g *= 3.631e6 b *= 3.631e6 r *= scale #Scale to match g-band observations g *= scale b *= scale if plot: print('Origional') print(np.max(np.dstack((r, g, b)))) plt.imshow(np.dstack((r[96:160,96:160]/np.max(r[96:160,96:160]), g[96:160,96:160]/np.max(g[96:160,96:160]), b[96:160,96:160]/np.max(b[96:160,96:160])))) plt.show() #Reproject if do_reproject: #reproject from EAGLE to new WCS r, _ = repro((r,wcs_egl), wcs_new, shape_out=(256,256)) r = r[96:160,96:160] #Crop to center 64 pixels bad = np.where(np.isnan(r)) #Find nans... r[bad] = 0.0 #...and replace with zeros g, _ = repro((g,wcs_egl), wcs_new, shape_out=(256,256)) g = g[96:160,96:160] bad = np.where(np.isnan(g)) g[bad] = 0.0 b, _ = repro((b,wcs_egl), wcs_new, shape_out=(256,256)) b = b[96:160,96:160] bad = np.where(np.isnan(b)) b[bad] = 0.0 if plot: print('Reprojected') print(np.max(np.dstack((r, g, b)))) plt.imshow(np.dstack((r/np.max(r), g/np.max(g), b/np.max(b)))) plt.show() else: r = r[96:160,96:160] #If not reprojecting, just crop to center 64 pixels g = g[96:160,96:160] b = b[96:160,96:160] #Convolve with PSF if do_convolve: #Convolve EAGLE image with SDSS PSF r = convolve(r, psf_i) g = convolve(g, psf_r) b = convolve(b, psf_g) if plot: print('Convolved') print(np.max(np.dstack((r, g, b)))) plt.imshow(np.dstack((r/np.max(r), g/np.max(g), b/np.max(b)))) plt.show() #Add noise if do_noise: noise_idx = np.random.randint(0, len(noise)) #Pick random noise with open(noise[noise_idx], "rb") as f: noise_map = pickle.load(f) f.close() rot = np.random.randint(4, size=1) #Pick random 90 degree rotation if plot: print('Noise:', noise[noise_idx]) print(np.max(noise_map[96:160,96:160]), np.mean(noise_map[96:160,96:160]), np.min(noise_map[96:160,96:160])) plt.imshow(noise_map[96:160,96:160]/np.max(noise_map[96:160,96:160])) plt.show() print('Histograms:') bins = np.arange(-5, -2, 0.1) plt.figure(figsize=(1.2*5.5, 3*1.25*(5.5*2)/3.0)) #print('\tr') plt.subplot(3, 1, 1) plt.title('r') plt.hist(np.log10(noise_map[96:160,96:160,2].flatten()), label='Noise', alpha=0.5, bins=bins) plt.hist(np.log10(r.flatten()), label='Sim', alpha=0.5, log=True, bins=bins) #print('\tg') plt.subplot(3, 1, 2) plt.title('g') plt.hist(np.log10(noise_map[96:160,96:160,1].flatten()), label='Noise', alpha=0.5, bins=bins) plt.hist(np.log10(g.flatten()), label='Sim', alpha=0.5, log=True, bins=bins) #print('\tb') plt.subplot(3, 1, 3) plt.title('b') plt.hist(np.log10(noise_map[96:160,96:160,0].flatten()), label='Noise', alpha=0.5, bins=bins) plt.hist(np.log10(b.flatten()), label='Sim', alpha=0.5, log=True, bins=bins) r += np.rot90(noise_map[96:160,96:160,2], rot) #Add noise, rotated by random 90 degree, to EAGLE g += np.rot90(noise_map[96:160,96:160,1], rot) b += np.rot90(noise_map[96:160,96:160,0], rot) if plot: plt.subplot(3, 1, 1) plt.hist(np.log10(r.flatten()), label='Combined', alpha=0.5, log=True, bins=bins) plt.legend(loc=2) plt.subplot(3, 1, 2) plt.hist(np.log10(g.flatten()), label='Combined', alpha=0.5, log=True, bins=bins) plt.legend(loc=2) plt.subplot(3, 1, 3) plt.hist(np.log10(b.flatten()), label='Combined', alpha=0.5, log=True, bins=bins) plt.legend(loc=2) plt.show() print('With Noise') #Lupton et al. 2004, 2004PASP..116..133L I = (r+g+b)/3 #Define I up = np.min([np.max(r), np.max(g), np.max(b)]) #get upper limit #Zero correct colour channels lo = np.min([np.min(r), np.min(g), np.min(b)]) r -= np.min(r)#lo g -= np.min(g)#lo b -= np.min(b)#lo #Define big RGB R = r*img_func(I, beta=1., up=up)/I G = g*img_func(I, beta=1., up=up)/I B = b*img_func(I, beta=1., up=up)/I #zero out the bootm zro = np.where(I == 0) R[zro] = 0.0 G[zro] = 0.0 B[zro] = 0.0 #One out the Top RGB = np.dstack((R,G,B)) one = np.where(RGB > 1) RGB[one] = 1.0 #Convert to Image RGB = RGB * 255 #print(np.max(RGB), np.min(RGB)) IMG = Image.fromarray(RGB.astype(np.uint8), mode='RGB') #Linear scaling rgb = np.dstack((r,g,b)) #Stack to form RGB (with irg) rgb -= np.min(rgb) rgb = rgb / np.max(rgb) #Normalise linearly to 0.0-1.0 rgb = rgb * 255 #Scale to be 0-255 for saving img = Image.fromarray(rgb.astype(np.uint8), mode='RGB') #Make the simulation a PIL image if plot: print(np.max(np.dstack((r, g, b)))) print(np.max(r), np.max(g), np.max(b)) plt.imshow(img) plt.show() #plt.imshow(IMG) #plt.show() print(path_psf+objs[div][i][len(path):-len(extn)]+p+'.jpg') print(path_psf, objs[div][i][len(path)+len(div)+1:-len(extn)]+p) break #Save img.save(path_psf+objs[div][i][len(path):-len(extn)]+p+'.jpg') if plot: break if plot: break #Save galaxy name + projection and redshift used when reprojected cols = [] cols.append(fits.Column(name='Object', format='30A', array=obj_name)) cols.append(fits.Column(name='redshift', format='E', array=obj_zsft)) tbhdu = fits.BinTableHDU.from_columns(cols) prihdr = fits.Header() prihdr['TITLE'] = div+'_cat' prihdr['CREATOR'] = 'WJP' prihdu = fits.PrimaryHDU(header=prihdr) fits.HDUList([prihdu, tbhdu]).writeto(path_psf+div+'_catalogue.fits', overwrite=True) print('Written catalogue to', path_psf+div+'_catalogue.fits')
[ "matplotlib.pyplot.title", "astropy.convolution.convolve", "numpy.sum", "astropy.io.fits.PrimaryHDU", "astropy.cosmology.WMAP7.luminosity_distance", "numpy.isnan", "matplotlib.pyplot.figure", "astropy.io.fits.Header", "numpy.random.randint", "numpy.rot90", "pickle.load", "glob.glob", "numpy.arange", "numpy.mean", "astropy.io.fits.HDUList", "astropy.io.fits.getdata", "matplotlib.pyplot.imshow", "os.path.exists", "numpy.max", "numpy.arcsinh", "astropy.io.fits.Column", "numpy.dstack", "h5py.File", "matplotlib.pyplot.show", "matplotlib.pyplot.legend", "numpy.min", "reproject.reproject_interp", "astropy.cosmology.WMAP7.kpc_proper_per_arcmin", "matplotlib.pyplot.subplot", "astropy.table.Table", "os.makedirs", "astropy.io.fits.BinTableHDU.from_columns", "numpy.zeros", "astropy.wcs.WCS", "numpy.where", "numpy.array" ]
[((1392, 1417), 'numpy.array', 'np.array', (["['i', 'r', 'g']"], {}), "(['i', 'r', 'g'])\n", (1400, 1417), True, 'import numpy as np\n'), ((1470, 1522), 'numpy.array', 'np.array', (["['_00', '_01', '_02', '_03', '_04', '_05']"], {}), "(['_00', '_01', '_02', '_03', '_04', '_05'])\n", (1478, 1522), True, 'import numpy as np\n'), ((2025, 2056), 'glob.glob', 'glob.glob', (["(noise_path + '*.pkl')"], {}), "(noise_path + '*.pkl')\n", (2034, 2056), False, 'import glob\n'), ((2092, 2163), 'astropy.io.fits.getdata', 'fits.getdata', (['"""/Volumes/Data_Disk/sky_maps/sdss/psf/sdss_psf_g.fits"""', '(0)'], {}), "('/Volumes/Data_Disk/sky_maps/sdss/psf/sdss_psf_g.fits', 0)\n", (2104, 2163), False, 'from astropy.io import fits\n'), ((2172, 2202), 'numpy.array', 'np.array', (['psf_g'], {'dtype': '"""float"""'}), "(psf_g, dtype='float')\n", (2180, 2202), True, 'import numpy as np\n'), ((2304, 2321), 'matplotlib.pyplot.imshow', 'plt.imshow', (['psf_g'], {}), '(psf_g)\n', (2314, 2321), True, 'import matplotlib.pyplot as plt\n'), ((2322, 2332), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2330, 2332), True, 'import matplotlib.pyplot as plt\n'), ((2342, 2413), 'astropy.io.fits.getdata', 'fits.getdata', (['"""/Volumes/Data_Disk/sky_maps/sdss/psf/sdss_psf_r.fits"""', '(0)'], {}), "('/Volumes/Data_Disk/sky_maps/sdss/psf/sdss_psf_r.fits', 0)\n", (2354, 2413), False, 'from astropy.io import fits\n'), ((2422, 2452), 'numpy.array', 'np.array', (['psf_r'], {'dtype': '"""float"""'}), "(psf_r, dtype='float')\n", (2430, 2452), True, 'import numpy as np\n'), ((2537, 2554), 'matplotlib.pyplot.imshow', 'plt.imshow', (['psf_r'], {}), '(psf_r)\n', (2547, 2554), True, 'import matplotlib.pyplot as plt\n'), ((2555, 2565), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2563, 2565), True, 'import matplotlib.pyplot as plt\n'), ((2575, 2646), 'astropy.io.fits.getdata', 'fits.getdata', (['"""/Volumes/Data_Disk/sky_maps/sdss/psf/sdss_psf_i.fits"""', '(0)'], {}), "('/Volumes/Data_Disk/sky_maps/sdss/psf/sdss_psf_i.fits', 0)\n", (2587, 2646), False, 'from astropy.io import fits\n'), ((2655, 2685), 'numpy.array', 'np.array', (['psf_i'], {'dtype': '"""float"""'}), "(psf_i, dtype='float')\n", (2663, 2685), True, 'import numpy as np\n'), ((2770, 2787), 'matplotlib.pyplot.imshow', 'plt.imshow', (['psf_i'], {}), '(psf_i)\n', (2780, 2787), True, 'import matplotlib.pyplot as plt\n'), ((2788, 2798), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2796, 2798), True, 'import matplotlib.pyplot as plt\n'), ((2846, 2876), 'astropy.io.fits.getdata', 'fits.getdata', (['redshift_file', '(1)'], {}), '(redshift_file, 1)\n', (2858, 2876), False, 'from astropy.io import fits\n'), ((2885, 2896), 'astropy.table.Table', 'Table', (['data'], {}), '(data)\n', (2890, 2896), False, 'from astropy.table import Table\n'), ((3227, 3247), 'numpy.zeros', 'np.zeros', (['(256, 256)'], {}), '((256, 256))\n', (3235, 3247), True, 'import numpy as np\n'), ((3282, 3298), 'astropy.wcs.WCS', 'wcs.WCS', ([], {'naxis': '(2)'}), '(naxis=2)\n', (3289, 3298), False, 'from astropy import wcs\n'), ((3378, 3406), 'numpy.array', 'np.array', (['[0.00011, 0.00011]'], {}), '([0.00011, 0.00011])\n', (3386, 3406), True, 'import numpy as np\n'), ((3517, 3533), 'astropy.wcs.WCS', 'wcs.WCS', ([], {'naxis': '(2)'}), '(naxis=2)\n', (3524, 3533), False, 'from astropy import wcs\n'), ((3613, 3645), 'numpy.array', 'np.array', (['[px / 3600, px / 3600]'], {}), '([px / 3600, px / 3600])\n', (3621, 3645), True, 'import numpy as np\n'), ((1743, 1767), 'os.path.exists', 'os.path.exists', (['path_psf'], {}), '(path_psf)\n', (1757, 1767), False, 'import os\n'), ((1773, 1794), 'os.makedirs', 'os.makedirs', (['path_psf'], {}), '(path_psf)\n', (1784, 1794), False, 'import os\n'), ((1986, 2021), 'glob.glob', 'glob.glob', (["(path + div + '/*' + extn)"], {}), "(path + div + '/*' + extn)\n", (1995, 2021), False, 'import glob\n'), ((2259, 2272), 'numpy.max', 'np.max', (['psf_g'], {}), '(psf_g)\n', (2265, 2272), True, 'import numpy as np\n'), ((2274, 2287), 'numpy.min', 'np.min', (['psf_g'], {}), '(psf_g)\n', (2280, 2287), True, 'import numpy as np\n'), ((2289, 2302), 'numpy.sum', 'np.sum', (['psf_g'], {}), '(psf_g)\n', (2295, 2302), True, 'import numpy as np\n'), ((2492, 2505), 'numpy.max', 'np.max', (['psf_r'], {}), '(psf_r)\n', (2498, 2505), True, 'import numpy as np\n'), ((2507, 2520), 'numpy.min', 'np.min', (['psf_r'], {}), '(psf_r)\n', (2513, 2520), True, 'import numpy as np\n'), ((2522, 2535), 'numpy.sum', 'np.sum', (['psf_r'], {}), '(psf_r)\n', (2528, 2535), True, 'import numpy as np\n'), ((2725, 2738), 'numpy.max', 'np.max', (['psf_i'], {}), '(psf_i)\n', (2731, 2738), True, 'import numpy as np\n'), ((2740, 2753), 'numpy.min', 'np.min', (['psf_i'], {}), '(psf_i)\n', (2746, 2753), True, 'import numpy as np\n'), ((2755, 2768), 'numpy.sum', 'np.sum', (['psf_i'], {}), '(psf_i)\n', (2761, 2768), True, 'import numpy as np\n'), ((3097, 3113), 'numpy.where', 'np.where', (['(I < lo)'], {}), '(I < lo)\n', (3105, 3113), True, 'import numpy as np\n'), ((3142, 3158), 'numpy.where', 'np.where', (['(i > up)'], {}), '(i > up)\n', (3150, 3158), True, 'import numpy as np\n'), ((13996, 14031), 'astropy.io.fits.BinTableHDU.from_columns', 'fits.BinTableHDU.from_columns', (['cols'], {}), '(cols)\n', (14025, 14031), False, 'from astropy.io import fits\n'), ((14045, 14058), 'astropy.io.fits.Header', 'fits.Header', ([], {}), '()\n', (14056, 14058), False, 'from astropy.io import fits\n'), ((14135, 14165), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {'header': 'prihdr'}), '(header=prihdr)\n', (14150, 14165), False, 'from astropy.io import fits\n'), ((1827, 1857), 'os.path.exists', 'os.path.exists', (['(path_psf + div)'], {}), '(path_psf + div)\n', (1841, 1857), False, 'import os\n'), ((1865, 1892), 'os.makedirs', 'os.makedirs', (['(path_psf + div)'], {}), '(path_psf + div)\n', (1876, 1892), False, 'import os\n'), ((3024, 3033), 'numpy.max', 'np.max', (['i'], {}), '(i)\n', (3030, 3033), True, 'import numpy as np\n'), ((3043, 3063), 'numpy.arcsinh', 'np.arcsinh', (['(I / beta)'], {}), '(I / beta)\n', (3053, 3063), True, 'import numpy as np\n'), ((3062, 3090), 'numpy.arcsinh', 'np.arcsinh', (['((up - lo) / beta)'], {}), '((up - lo) / beta)\n', (3072, 3090), True, 'import numpy as np\n'), ((13852, 13908), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""Object"""', 'format': '"""30A"""', 'array': 'obj_name'}), "(name='Object', format='30A', array=obj_name)\n", (13863, 13908), False, 'from astropy.io import fits\n'), ((13926, 13982), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""redshift"""', 'format': '"""E"""', 'array': 'obj_zsft'}), "(name='redshift', format='E', array=obj_zsft)\n", (13937, 13982), False, 'from astropy.io import fits\n'), ((4285, 4313), 'h5py.File', 'h5py.File', (['objs[div][i]', '"""r"""'], {}), "(objs[div][i], 'r')\n", (4294, 4313), False, 'import h5py\n'), ((5032, 5064), 'numpy.array', 'np.array', (['[px / 3600, px / 3600]'], {}), '([px / 3600, px / 3600])\n', (5040, 5064), True, 'import numpy as np\n'), ((12081, 12090), 'numpy.min', 'np.min', (['r'], {}), '(r)\n', (12087, 12090), True, 'import numpy as np\n'), ((12111, 12120), 'numpy.min', 'np.min', (['g'], {}), '(g)\n', (12117, 12120), True, 'import numpy as np\n'), ((12141, 12150), 'numpy.min', 'np.min', (['b'], {}), '(b)\n', (12147, 12150), True, 'import numpy as np\n'), ((12376, 12392), 'numpy.where', 'np.where', (['(I == 0)'], {}), '(I == 0)\n', (12384, 12392), True, 'import numpy as np\n'), ((12515, 12535), 'numpy.dstack', 'np.dstack', (['(R, G, B)'], {}), '((R, G, B))\n', (12524, 12535), True, 'import numpy as np\n'), ((12552, 12569), 'numpy.where', 'np.where', (['(RGB > 1)'], {}), '(RGB > 1)\n', (12560, 12569), True, 'import numpy as np\n'), ((12827, 12847), 'numpy.dstack', 'np.dstack', (['(r, g, b)'], {}), '((r, g, b))\n', (12836, 12847), True, 'import numpy as np\n'), ((12895, 12906), 'numpy.min', 'np.min', (['rgb'], {}), '(rgb)\n', (12901, 12906), True, 'import numpy as np\n'), ((14170, 14199), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[prihdu, tbhdu]'], {}), '([prihdu, tbhdu])\n', (14182, 14199), False, 'from astropy.io import fits\n'), ((5282, 5304), 'numpy.array', 'np.array', (['f[ch[0] + p]'], {}), '(f[ch[0] + p])\n', (5290, 5304), True, 'import numpy as np\n'), ((5335, 5357), 'numpy.array', 'np.array', (['f[ch[1] + p]'], {}), '(f[ch[1] + p])\n', (5343, 5357), True, 'import numpy as np\n'), ((5376, 5398), 'numpy.array', 'np.array', (['f[ch[2] + p]'], {}), '(f[ch[2] + p])\n', (5384, 5398), True, 'import numpy as np\n'), ((6550, 6572), 'numpy.array', 'np.array', (['f[ch[0] + p]'], {}), '(f[ch[0] + p])\n', (6558, 6572), True, 'import numpy as np\n'), ((6603, 6625), 'numpy.array', 'np.array', (['f[ch[1] + p]'], {}), '(f[ch[1] + p])\n', (6611, 6625), True, 'import numpy as np\n'), ((6644, 6666), 'numpy.array', 'np.array', (['f[ch[2] + p]'], {}), '(f[ch[2] + p])\n', (6652, 6666), True, 'import numpy as np\n'), ((7440, 7490), 'reproject.reproject_interp', 'repro', (['(r, wcs_egl)', 'wcs_new'], {'shape_out': '(256, 256)'}), '((r, wcs_egl), wcs_new, shape_out=(256, 256))\n', (7445, 7490), True, 'from reproject import reproject_interp as repro\n'), ((7712, 7762), 'reproject.reproject_interp', 'repro', (['(g, wcs_egl)', 'wcs_new'], {'shape_out': '(256, 256)'}), '((g, wcs_egl), wcs_new, shape_out=(256, 256))\n', (7717, 7762), True, 'from reproject import reproject_interp as repro\n'), ((7895, 7945), 'reproject.reproject_interp', 'repro', (['(b, wcs_egl)', 'wcs_new'], {'shape_out': '(256, 256)'}), '((b, wcs_egl), wcs_new, shape_out=(256, 256))\n', (7900, 7945), True, 'from reproject import reproject_interp as repro\n'), ((8688, 8706), 'astropy.convolution.convolve', 'convolve', (['r', 'psf_i'], {}), '(r, psf_i)\n', (8696, 8706), False, 'from astropy.convolution import convolve\n'), ((8727, 8745), 'astropy.convolution.convolve', 'convolve', (['g', 'psf_r'], {}), '(g, psf_r)\n', (8735, 8745), False, 'from astropy.convolution import convolve\n'), ((8766, 8784), 'astropy.convolution.convolve', 'convolve', (['b', 'psf_g'], {}), '(b, psf_g)\n', (8774, 8784), False, 'from astropy.convolution import convolve\n'), ((9388, 9416), 'numpy.random.randint', 'np.random.randint', (['(4)'], {'size': '(1)'}), '(4, size=1)\n', (9405, 9416), True, 'import numpy as np\n'), ((10927, 10970), 'numpy.rot90', 'np.rot90', (['noise_map[96:160, 96:160, 2]', 'rot'], {}), '(noise_map[96:160, 96:160, 2], rot)\n', (10935, 10970), True, 'import numpy as np\n'), ((11040, 11083), 'numpy.rot90', 'np.rot90', (['noise_map[96:160, 96:160, 1]', 'rot'], {}), '(noise_map[96:160, 96:160, 1], rot)\n', (11048, 11083), True, 'import numpy as np\n'), ((11103, 11146), 'numpy.rot90', 'np.rot90', (['noise_map[96:160, 96:160, 0]', 'rot'], {}), '(noise_map[96:160, 96:160, 0], rot)\n', (11111, 11146), True, 'import numpy as np\n'), ((12931, 12942), 'numpy.max', 'np.max', (['rgb'], {}), '(rgb)\n', (12937, 12942), True, 'import numpy as np\n'), ((13300, 13315), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (13310, 13315), True, 'import matplotlib.pyplot as plt\n'), ((13332, 13342), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13340, 13342), True, 'import matplotlib.pyplot as plt\n'), ((5807, 5817), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5815, 5817), True, 'import matplotlib.pyplot as plt\n'), ((5991, 6040), 'astropy.cosmology.WMAP7.luminosity_distance', 'cosmo.luminosity_distance', (['table[z_col][rand_idx]'], {}), '(table[z_col][rand_idx])\n', (6016, 6040), True, 'from astropy.cosmology import WMAP7 as cosmo\n'), ((7304, 7314), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7312, 7314), True, 'import matplotlib.pyplot as plt\n'), ((7590, 7601), 'numpy.isnan', 'np.isnan', (['r'], {}), '(r)\n', (7598, 7601), True, 'import numpy as np\n'), ((7829, 7840), 'numpy.isnan', 'np.isnan', (['g'], {}), '(g)\n', (7837, 7840), True, 'import numpy as np\n'), ((8012, 8023), 'numpy.isnan', 'np.isnan', (['b'], {}), '(b)\n', (8020, 8023), True, 'import numpy as np\n'), ((8364, 8374), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8372, 8374), True, 'import matplotlib.pyplot as plt\n'), ((9093, 9103), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9101, 9103), True, 'import matplotlib.pyplot as plt\n'), ((9321, 9335), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9332, 9335), False, 'import pickle\n'), ((9770, 9780), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9778, 9780), True, 'import matplotlib.pyplot as plt\n'), ((9849, 9871), 'numpy.arange', 'np.arange', (['(-5)', '(-2)', '(0.1)'], {}), '(-5, -2, 0.1)\n', (9858, 9871), True, 'import numpy as np\n'), ((9892, 9951), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.2 * 5.5, 3 * 1.25 * (5.5 * 2) / 3.0)'}), '(figsize=(1.2 * 5.5, 3 * 1.25 * (5.5 * 2) / 3.0))\n', (9902, 9951), True, 'import matplotlib.pyplot as plt\n'), ((9996, 10016), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (10007, 10016), True, 'import matplotlib.pyplot as plt\n'), ((10037, 10051), 'matplotlib.pyplot.title', 'plt.title', (['"""r"""'], {}), "('r')\n", (10046, 10051), True, 'import matplotlib.pyplot as plt\n'), ((10317, 10337), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (10328, 10337), True, 'import matplotlib.pyplot as plt\n'), ((10358, 10372), 'matplotlib.pyplot.title', 'plt.title', (['"""g"""'], {}), "('g')\n", (10367, 10372), True, 'import matplotlib.pyplot as plt\n'), ((10638, 10658), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (10649, 10658), True, 'import matplotlib.pyplot as plt\n'), ((10679, 10693), 'matplotlib.pyplot.title', 'plt.title', (['"""b"""'], {}), "('b')\n", (10688, 10693), True, 'import matplotlib.pyplot as plt\n'), ((11191, 11211), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (11202, 11211), True, 'import matplotlib.pyplot as plt\n'), ((11334, 11351), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (11344, 11351), True, 'import matplotlib.pyplot as plt\n'), ((11372, 11392), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (11383, 11392), True, 'import matplotlib.pyplot as plt\n'), ((11515, 11532), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (11525, 11532), True, 'import matplotlib.pyplot as plt\n'), ((11553, 11573), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (11564, 11573), True, 'import matplotlib.pyplot as plt\n'), ((11696, 11713), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (11706, 11713), True, 'import matplotlib.pyplot as plt\n'), ((11734, 11744), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11742, 11744), True, 'import matplotlib.pyplot as plt\n'), ((11912, 11921), 'numpy.max', 'np.max', (['r'], {}), '(r)\n', (11918, 11921), True, 'import numpy as np\n'), ((11923, 11932), 'numpy.max', 'np.max', (['g'], {}), '(g)\n', (11929, 11932), True, 'import numpy as np\n'), ((11934, 11943), 'numpy.max', 'np.max', (['b'], {}), '(b)\n', (11940, 11943), True, 'import numpy as np\n'), ((12030, 12039), 'numpy.min', 'np.min', (['r'], {}), '(r)\n', (12036, 12039), True, 'import numpy as np\n'), ((12041, 12050), 'numpy.min', 'np.min', (['g'], {}), '(g)\n', (12047, 12050), True, 'import numpy as np\n'), ((12052, 12061), 'numpy.min', 'np.min', (['b'], {}), '(b)\n', (12058, 12061), True, 'import numpy as np\n'), ((13251, 13260), 'numpy.max', 'np.max', (['r'], {}), '(r)\n', (13257, 13260), True, 'import numpy as np\n'), ((13262, 13271), 'numpy.max', 'np.max', (['g'], {}), '(g)\n', (13268, 13271), True, 'import numpy as np\n'), ((13273, 13282), 'numpy.max', 'np.max', (['b'], {}), '(b)\n', (13279, 13282), True, 'import numpy as np\n'), ((4838, 4889), 'astropy.cosmology.WMAP7.kpc_proper_per_arcmin', 'cosmo.kpc_proper_per_arcmin', (['table[z_col][rand_idx]'], {}), '(table[z_col][rand_idx])\n', (4865, 4889), True, 'from astropy.cosmology import WMAP7 as cosmo\n'), ((9557, 9590), 'numpy.max', 'np.max', (['noise_map[96:160, 96:160]'], {}), '(noise_map[96:160, 96:160])\n', (9563, 9590), True, 'import numpy as np\n'), ((9591, 9625), 'numpy.mean', 'np.mean', (['noise_map[96:160, 96:160]'], {}), '(noise_map[96:160, 96:160])\n', (9598, 9625), True, 'import numpy as np\n'), ((9626, 9659), 'numpy.min', 'np.min', (['noise_map[96:160, 96:160]'], {}), '(noise_map[96:160, 96:160])\n', (9632, 9659), True, 'import numpy as np\n'), ((13206, 13226), 'numpy.dstack', 'np.dstack', (['(r, g, b)'], {}), '((r, g, b))\n', (13215, 13226), True, 'import numpy as np\n'), ((4572, 4623), 'astropy.cosmology.WMAP7.kpc_proper_per_arcmin', 'cosmo.kpc_proper_per_arcmin', (['table[z_col][rand_idx]'], {}), '(table[z_col][rand_idx])\n', (4599, 4623), True, 'from astropy.cosmology import WMAP7 as cosmo\n'), ((5507, 5527), 'numpy.dstack', 'np.dstack', (['(r, g, b)'], {}), '((r, g, b))\n', (5516, 5527), True, 'import numpy as np\n'), ((7004, 7024), 'numpy.dstack', 'np.dstack', (['(r, g, b)'], {}), '((r, g, b))\n', (7013, 7024), True, 'import numpy as np\n'), ((8154, 8174), 'numpy.dstack', 'np.dstack', (['(r, g, b)'], {}), '((r, g, b))\n', (8163, 8174), True, 'import numpy as np\n'), ((8883, 8903), 'numpy.dstack', 'np.dstack', (['(r, g, b)'], {}), '((r, g, b))\n', (8892, 8903), True, 'import numpy as np\n'), ((9716, 9749), 'numpy.max', 'np.max', (['noise_map[96:160, 96:160]'], {}), '(noise_map[96:160, 96:160])\n', (9722, 9749), True, 'import numpy as np\n'), ((5589, 5614), 'numpy.max', 'np.max', (['r[96:160, 96:160]'], {}), '(r[96:160, 96:160])\n', (5595, 5614), True, 'import numpy as np\n'), ((5674, 5699), 'numpy.max', 'np.max', (['g[96:160, 96:160]'], {}), '(g[96:160, 96:160])\n', (5680, 5699), True, 'import numpy as np\n'), ((5759, 5784), 'numpy.max', 'np.max', (['b[96:160, 96:160]'], {}), '(b[96:160, 96:160])\n', (5765, 5784), True, 'import numpy as np\n'), ((7086, 7111), 'numpy.max', 'np.max', (['r[96:160, 96:160]'], {}), '(r[96:160, 96:160])\n', (7092, 7111), True, 'import numpy as np\n'), ((7171, 7196), 'numpy.max', 'np.max', (['g[96:160, 96:160]'], {}), '(g[96:160, 96:160])\n', (7177, 7196), True, 'import numpy as np\n'), ((7256, 7281), 'numpy.max', 'np.max', (['b[96:160, 96:160]'], {}), '(b[96:160, 96:160])\n', (7262, 7281), True, 'import numpy as np\n'), ((8221, 8230), 'numpy.max', 'np.max', (['r'], {}), '(r)\n', (8227, 8230), True, 'import numpy as np\n'), ((8276, 8285), 'numpy.max', 'np.max', (['g'], {}), '(g)\n', (8282, 8285), True, 'import numpy as np\n'), ((8331, 8340), 'numpy.max', 'np.max', (['b'], {}), '(b)\n', (8337, 8340), True, 'import numpy as np\n'), ((8950, 8959), 'numpy.max', 'np.max', (['r'], {}), '(r)\n', (8956, 8959), True, 'import numpy as np\n'), ((9005, 9014), 'numpy.max', 'np.max', (['g'], {}), '(g)\n', (9011, 9014), True, 'import numpy as np\n'), ((9060, 9069), 'numpy.max', 'np.max', (['b'], {}), '(b)\n', (9066, 9069), True, 'import numpy as np\n')]
#! /usr/bin/python #-------------------------------------------------------------------- # PROGRAM : write_to_nc.py # CREATED BY : hjkim @IIS.2017-10-17 06:23:16.129216 # MODIFED BY : # # USAGE : $ ./write_to_nc.py # # DESCRIPTION: #------------------------------------------------------cf0.2@20120401 import os,sys from optparse import OptionParser from collections import OrderedDict from netCDF4 import Dataset import numpy as np class WriteNC( object ): def toncdf( self, outpath ): torigin = self.torigin ncfile = self.open_ncfile( outpath ) ncdims = self.create_dimensions( ncfile ) ncvars = self.create_variables( ncfile, ncdims ) self.set_attributes( ncvars ) dtime = [ (dtm-torigin).total_seconds() for dtm in self.dtime ] ncvars['time'][:] = dtime[:] ncvars['pixel'][:] = range( self.data.shape[1] ) ncvars['lat'][:] = self.lat[:] ncvars['lon'][:] = self.lon[:] ncvars['data'][:] = self.data[:] if self.griddata != []: ncvars[ 'gridlat' ][:] = self.grid.lat ncvars[ 'gridlon' ][:] = self.grid.lon ncvars[ 'griddata' ][:] = np.ma.masked_equal( self.griddata, self.missing_value ) ncfile.close() def open_ncfile( self, outpath ): ncfile = Dataset( outpath, 'w', format='NETCDF4' ) return ncfile def create_dimensions( self, ncfile ): dims = OrderedDict(( ('time', None), ('pixel', None), ('lat', None), ('lon', None) )) dims['time'] = ncfile.createDimension( "time", None) dims['pixel'] = ncfile.createDimension( "pixel", self.data.shape[1]) if self.griddata != []: dims['gridlat'] = ncfile.createDimension( "gridlat", self.grid.lat.size) dims['gridlon'] = ncfile.createDimension( "gridlon", self.grid.lon.size) return dims def create_variables( self, ncfile, ncdims ): varparams = dict(( ( 'time', ('time','f8',('time', )) ), ( 'pixel', ('pixel','i4',('pixel',)) ), ( 'lat', ('lat','f4',('time','pixel')) ), ( 'lon', ('lon','f4',('time','pixel')) ), ( 'data', ('data','f4',('time','pixel')) ), ( 'gridlat', ('gridlat','f4',('gridlat', )) ), ( 'gridlon', ('gridlon','f4',('gridlon', )) ), ( 'griddata',('griddata','f4',('time','gridlat','gridlon')) ), )) ncvars = OrderedDict() ncvars[ 'time' ] = ncfile.createVariable( *varparams['time'] ) ncvars[ 'pixel' ] = ncfile.createVariable( *varparams['pixel'] ) ncvars[ 'lat' ] = ncfile.createVariable( *varparams['lat' ] ) ncvars[ 'lon' ] = ncfile.createVariable( *varparams['lon' ] ) ncvars[ 'data' ] = ncfile.createVariable( *varparams['data' ] ) if self.griddata != []: ncvars[ 'gridlat' ] = ncfile.createVariable( *varparams['gridlat' ] ) ncvars[ 'gridlon' ] = ncfile.createVariable( *varparams['gridlon' ] ) ncvars[ 'griddata' ] = ncfile.createVariable( *varparams['griddata'], zlib=True, complevel=1 ) return ncvars def set_attributes( self, ncvars ): ncvars['time'].units = 'seconds since %s'%self.torigin.strftime("%Y-%m-%d %H:%M:%S")
[ "netCDF4.Dataset", "numpy.ma.masked_equal", "collections.OrderedDict" ]
[((1405, 1444), 'netCDF4.Dataset', 'Dataset', (['outpath', '"""w"""'], {'format': '"""NETCDF4"""'}), "(outpath, 'w', format='NETCDF4')\n", (1412, 1444), False, 'from netCDF4 import Dataset\n'), ((1534, 1610), 'collections.OrderedDict', 'OrderedDict', (["(('time', None), ('pixel', None), ('lat', None), ('lon', None))"], {}), "((('time', None), ('pixel', None), ('lat', None), ('lon', None)))\n", (1545, 1610), False, 'from collections import OrderedDict\n'), ((2837, 2850), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2848, 2850), False, 'from collections import OrderedDict\n'), ((1265, 1318), 'numpy.ma.masked_equal', 'np.ma.masked_equal', (['self.griddata', 'self.missing_value'], {}), '(self.griddata, self.missing_value)\n', (1283, 1318), True, 'import numpy as np\n')]
# Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import random import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import FlavaFeatureExtractor, FlavaProcessor from transformers.models.flava.feature_extraction_flava import ( FLAVA_CODEBOOK_MEAN, FLAVA_CODEBOOK_STD, FLAVA_IMAGE_MEAN, FLAVA_IMAGE_STD, ) @require_vision class FlavaProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() # fmt: off vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] # fmt: on self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write("".join([x + "\n" for x in vocab_tokens])) feature_extractor_map = { "image_mean": FLAVA_IMAGE_MEAN, "image_std": FLAVA_IMAGE_STD, "do_normalize": True, "do_resize": True, "size": 224, "do_center_crop": True, "crop_size": 224, "input_size_patches": 14, "total_mask_patches": 75, "mask_group_max_patches": None, "mask_group_min_patches": 16, "mask_group_min_aspect_ratio": 0.3, "mask_group_max_aspect_ratio": None, "codebook_do_resize": True, "codebook_size": 112, "codebook_resample": None, "codebook_do_center_crop": True, "codebook_crop_size": 112, "codebook_do_map_pixels": True, "codebook_do_normalize": True, "codebook_image_mean": FLAVA_CODEBOOK_MEAN, "codebook_image_std": FLAVA_CODEBOOK_STD, } self.feature_extractor_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME) with open(self.feature_extractor_file, "w", encoding="utf-8") as fp: json.dump(feature_extractor_map, fp) def get_tokenizer(self, **kwargs): return BertTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs): return BertTokenizerFast.from_pretrained(self.tmpdirname, **kwargs) def get_feature_extractor(self, **kwargs): return FlavaFeatureExtractor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_save_load_pretrained_default(self): tokenizer_slow = self.get_tokenizer() tokenizer_fast = self.get_rust_tokenizer() feature_extractor = self.get_feature_extractor() processor_slow = FlavaProcessor(tokenizer=tokenizer_slow, feature_extractor=feature_extractor) processor_slow.save_pretrained(self.tmpdirname) processor_slow = FlavaProcessor.from_pretrained(self.tmpdirname, use_fast=False) processor_fast = FlavaProcessor(tokenizer=tokenizer_fast, feature_extractor=feature_extractor) processor_fast.save_pretrained(self.tmpdirname) processor_fast = FlavaProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer, BertTokenizer) self.assertIsInstance(processor_fast.tokenizer, BertTokenizerFast) self.assertEqual(processor_slow.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertEqual(processor_fast.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor_slow.feature_extractor, FlavaFeatureExtractor) self.assertIsInstance(processor_fast.feature_extractor, FlavaFeatureExtractor) def test_save_load_pretrained_additional_features(self): processor = FlavaProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0) processor = FlavaProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, BertTokenizerFast) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, FlavaFeatureExtractor) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = FlavaProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) image_input = self.prepare_image_inputs() input_feat_extract = feature_extractor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) # With rest of the args random.seed(1234) input_feat_extract = feature_extractor( image_input, return_image_mask=True, return_codebook_pixels=True, return_tensors="np" ) random.seed(1234) input_processor = processor( images=image_input, return_image_mask=True, return_codebook_pixels=True, return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = FlavaProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "lower newer" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = FlavaProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["input_ids", "token_type_ids", "attention_mask", "pixel_values"]) # add extra args inputs = processor(text=input_str, images=image_input, return_codebook_pixels=True, return_image_mask=True) self.assertListEqual( list(inputs.keys()), [ "input_ids", "token_type_ids", "attention_mask", "pixel_values", "codebook_pixel_values", "bool_masked_pos", ], ) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = FlavaProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor)
[ "json.dump", "numpy.moveaxis", "transformers.FlavaFeatureExtractor.from_pretrained", "transformers.utils.is_vision_available", "transformers.FlavaProcessor", "transformers.BertTokenizerFast.from_pretrained", "pytest.raises", "tempfile.mkdtemp", "transformers.BertTokenizer.from_pretrained", "random.seed", "numpy.random.randint", "transformers.FlavaProcessor.from_pretrained", "shutil.rmtree", "os.path.join" ]
[((1016, 1037), 'transformers.utils.is_vision_available', 'is_vision_available', ([], {}), '()\n', (1035, 1037), False, 'from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available\n'), ((1426, 1444), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1442, 1444), False, 'import tempfile\n'), ((1656, 1718), 'os.path.join', 'os.path.join', (['self.tmpdirname', "VOCAB_FILES_NAMES['vocab_file']"], {}), "(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])\n", (1668, 1718), False, 'import os\n'), ((2828, 2881), 'os.path.join', 'os.path.join', (['self.tmpdirname', 'FEATURE_EXTRACTOR_NAME'], {}), '(self.tmpdirname, FEATURE_EXTRACTOR_NAME)\n', (2840, 2881), False, 'import os\n'), ((3063, 3119), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['self.tmpdirname'], {}), '(self.tmpdirname, **kwargs)\n', (3092, 3119), False, 'from transformers import BertTokenizer, BertTokenizerFast\n'), ((3180, 3240), 'transformers.BertTokenizerFast.from_pretrained', 'BertTokenizerFast.from_pretrained', (['self.tmpdirname'], {}), '(self.tmpdirname, **kwargs)\n', (3213, 3240), False, 'from transformers import BertTokenizer, BertTokenizerFast\n'), ((3304, 3368), 'transformers.FlavaFeatureExtractor.from_pretrained', 'FlavaFeatureExtractor.from_pretrained', (['self.tmpdirname'], {}), '(self.tmpdirname, **kwargs)\n', (3341, 3368), False, 'from transformers import FlavaFeatureExtractor, FlavaProcessor\n'), ((3402, 3432), 'shutil.rmtree', 'shutil.rmtree', (['self.tmpdirname'], {}), '(self.tmpdirname)\n', (3415, 3432), False, 'import shutil\n'), ((4094, 4171), 'transformers.FlavaProcessor', 'FlavaProcessor', ([], {'tokenizer': 'tokenizer_slow', 'feature_extractor': 'feature_extractor'}), '(tokenizer=tokenizer_slow, feature_extractor=feature_extractor)\n', (4108, 4171), False, 'from transformers import FlavaFeatureExtractor, FlavaProcessor\n'), ((4253, 4316), 'transformers.FlavaProcessor.from_pretrained', 'FlavaProcessor.from_pretrained', (['self.tmpdirname'], {'use_fast': '(False)'}), '(self.tmpdirname, use_fast=False)\n', (4283, 4316), False, 'from transformers import FlavaFeatureExtractor, FlavaProcessor\n'), ((4343, 4420), 'transformers.FlavaProcessor', 'FlavaProcessor', ([], {'tokenizer': 'tokenizer_fast', 'feature_extractor': 'feature_extractor'}), '(tokenizer=tokenizer_fast, feature_extractor=feature_extractor)\n', (4357, 4420), False, 'from transformers import FlavaFeatureExtractor, FlavaProcessor\n'), ((4502, 4549), 'transformers.FlavaProcessor.from_pretrained', 'FlavaProcessor.from_pretrained', (['self.tmpdirname'], {}), '(self.tmpdirname)\n', (4532, 4549), False, 'from transformers import FlavaFeatureExtractor, FlavaProcessor\n'), ((5802, 5930), 'transformers.FlavaProcessor.from_pretrained', 'FlavaProcessor.from_pretrained', (['self.tmpdirname'], {'bos_token': '"""(BOS)"""', 'eos_token': '"""(EOS)"""', 'do_normalize': '(False)', 'padding_value': '(1.0)'}), "(self.tmpdirname, bos_token='(BOS)',\n eos_token='(EOS)', do_normalize=False, padding_value=1.0)\n", (5832, 5930), False, 'from transformers import FlavaFeatureExtractor, FlavaProcessor\n'), ((6471, 6543), 'transformers.FlavaProcessor', 'FlavaProcessor', ([], {'tokenizer': 'tokenizer', 'feature_extractor': 'feature_extractor'}), '(tokenizer=tokenizer, feature_extractor=feature_extractor)\n', (6485, 6543), False, 'from transformers import FlavaFeatureExtractor, FlavaProcessor\n'), ((6948, 6965), 'random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (6959, 6965), False, 'import random\n'), ((7130, 7147), 'random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (7141, 7147), False, 'import random\n'), ((7603, 7675), 'transformers.FlavaProcessor', 'FlavaProcessor', ([], {'tokenizer': 'tokenizer', 'feature_extractor': 'feature_extractor'}), '(tokenizer=tokenizer, feature_extractor=feature_extractor)\n', (7617, 7675), False, 'from transformers import FlavaFeatureExtractor, FlavaProcessor\n'), ((8075, 8147), 'transformers.FlavaProcessor', 'FlavaProcessor', ([], {'tokenizer': 'tokenizer', 'feature_extractor': 'feature_extractor'}), '(tokenizer=tokenizer, feature_extractor=feature_extractor)\n', (8089, 8147), False, 'from transformers import FlavaFeatureExtractor, FlavaProcessor\n'), ((9139, 9211), 'transformers.FlavaProcessor', 'FlavaProcessor', ([], {'tokenizer': 'tokenizer', 'feature_extractor': 'feature_extractor'}), '(tokenizer=tokenizer, feature_extractor=feature_extractor)\n', (9153, 9211), False, 'from transformers import FlavaFeatureExtractor, FlavaProcessor\n'), ((2971, 3007), 'json.dump', 'json.dump', (['feature_extractor_map', 'fp'], {}), '(feature_extractor_map, fp)\n', (2980, 3007), False, 'import json\n'), ((3689, 3746), 'numpy.random.randint', 'np.random.randint', (['(255)'], {'size': '(3, 30, 400)', 'dtype': 'np.uint8'}), '(255, size=(3, 30, 400), dtype=np.uint8)\n', (3706, 3746), True, 'import numpy as np\n'), ((8931, 8956), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8944, 8956), False, 'import pytest\n'), ((3789, 3810), 'numpy.moveaxis', 'np.moveaxis', (['x', '(0)', '(-1)'], {}), '(x, 0, -1)\n', (3800, 3810), True, 'import numpy as np\n')]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from sklearn.metrics import ( accuracy_score, classification_report, confusion_matrix, f1_score, make_scorer, precision_score, recall_score, average_precision_score, auc ) def plot_confusion_mat(tar, pred, model_name): """ Create plot for confusion matrix for binary classification results from target and predicted labels. Parameters ---------- tar : ndarray numpy array of target label pred : ndarray numpy array of predicted label model_name : str Name of model for plot title Returns ---------- Plot of the confusion matrix along with the score matrics """ conf_mat = confusion_matrix(tar, pred) flags = ['True Neg','False Pos','False Neg','True Pos'] cnt = ['{0:0.0f}'.format(value) for value in conf_mat.flatten()] pct = ['{0:.2%}'.format(value) for value in conf_mat.flatten()/np.sum(conf_mat)] labels = [f'{v1}\n{v2}\n{v3}' for v1, v2, v3 in zip(flags, cnt, pct)] labels = np.asarray(labels).reshape(2,2) categories = ['Rejected subscription', 'Accepted subscription'] accuracy = np.trace(conf_mat) / float(np.sum(conf_mat)) precision = conf_mat[1,1] / sum(conf_mat[:,1]) recall = conf_mat[1,1] / sum(conf_mat[1,:]) f1 = 2*precision*recall / (precision + recall) fig = sns.heatmap(conf_mat, annot=labels, fmt = '', cmap='Blues', xticklabels=categories, yticklabels=categories, cbar=False) fig.set_ylabel('True label') fig.set_xlabel(f"Predicted label \n\n Accuracy={accuracy:0.3f} \n Precision={precision:0.3f} \n Recall={recall:0.3f} \n F1 Score={f1:0.3f}") fig.set_title(f'Confusion Matrix of {model_name}') return fig
[ "numpy.trace", "numpy.sum", "seaborn.heatmap", "numpy.asarray", "sklearn.metrics.confusion_matrix" ]
[((808, 835), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['tar', 'pred'], {}), '(tar, pred)\n', (824, 835), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, f1_score, make_scorer, precision_score, recall_score, average_precision_score, auc\n'), ((1466, 1588), 'seaborn.heatmap', 'sns.heatmap', (['conf_mat'], {'annot': 'labels', 'fmt': '""""""', 'cmap': '"""Blues"""', 'xticklabels': 'categories', 'yticklabels': 'categories', 'cbar': '(False)'}), "(conf_mat, annot=labels, fmt='', cmap='Blues', xticklabels=\n categories, yticklabels=categories, cbar=False)\n", (1477, 1588), True, 'import seaborn as sns\n'), ((1256, 1274), 'numpy.trace', 'np.trace', (['conf_mat'], {}), '(conf_mat)\n', (1264, 1274), True, 'import numpy as np\n'), ((1139, 1157), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (1149, 1157), True, 'import numpy as np\n'), ((1283, 1299), 'numpy.sum', 'np.sum', (['conf_mat'], {}), '(conf_mat)\n', (1289, 1299), True, 'import numpy as np\n'), ((1033, 1049), 'numpy.sum', 'np.sum', (['conf_mat'], {}), '(conf_mat)\n', (1039, 1049), True, 'import numpy as np\n')]
""" Helper functions for image manipulation """ from __future__ import absolute_import, division import numpy as np from skimage.util import img_as_float __all__ = ['to_norm', 'un_norm'] def to_norm(arr): """ Helper function to normalise/scale an array. This is needed for example for scikit-image which uses floats between 0 and 1. Parameters ---------- arr : `~numpy.ndarray` Array to normalise. Returns ------- arr : `~numpy.ndarray` Array with values between 0 (min) and 1 (max) Examples -------- >>> import numpy as np >>> from sunpy.image.util import to_norm >>> out = to_norm(np.array([-1, 0, 1])) >>> out array([0. , 0.5, 1. ]) """ arr = np.array(arr, dtype='double') arr = img_as_float(arr, force_copy=True) if arr.min() < 0: arr += np.abs(arr.min()) arr /= arr.max() return arr def un_norm(arr, original): """ Helper function to un-normalise (or re-scale) an array based in the values of the original array. Parameters ---------- arr : `~numpy.ndarray` Array of floats to un-normalise with values in [0,1] original : `~numpy.ndarray` Original array with the min and max values Returns ------- arr : `~numpy.ndarray` Array with values between `original.min()` and `original.max()` . Note that the type of the original image is not guaranteed to be reproduced. Examples -------- >>> import numpy as np >>> from sunpy.image.util import un_norm >>> original = np.array([-1, 0, 1]) >>> normalised = np.array([0., 0.5, 1.]) >>> out = un_norm(normalised, original) >>> out array([-1., 0., 1.]) """ level = 0 if original.min() > 0 else np.abs(original.min()) arr *= original.max() + level arr -= level return arr
[ "skimage.util.img_as_float", "numpy.array" ]
[((742, 771), 'numpy.array', 'np.array', (['arr'], {'dtype': '"""double"""'}), "(arr, dtype='double')\n", (750, 771), True, 'import numpy as np\n'), ((782, 816), 'skimage.util.img_as_float', 'img_as_float', (['arr'], {'force_copy': '(True)'}), '(arr, force_copy=True)\n', (794, 816), False, 'from skimage.util import img_as_float\n')]
"""Fourier matrix.""" import numpy as np def fourier(dim: int) -> np.ndarray: r""" Generate the Fourier transform matrix [WikDFT]_. Generates the `dim`-by-`dim` unitary matrix that implements the quantum Fourier transform. The Fourier matrix is defined as: .. math:: W_N = \frac{1}{N} \begin{pmatrix} 1 & 1 & 1 & 1 & \ldots & 1 \\ 1 & \omega & \omega^2 & \omega^3 & \ldots & \omega^{N-1} \\ 1 & \omega^2 & \omega^4 & \omega^6 & \ldots & \omega^{2(N-1)} \\ 1 & \omega^3 & \omega^6 & \omega^9 & \ldots & \omega^{3(N-1)} \\ \vdots & \vdots & \vdots & \vdots & \ddots & \vdots \\ 1 & \omega^{N-1} & \omega^{2(N-1)} & \omega^{3(N-1)} & \ldots & \omega^{3(N-1)} \end{pmatrix} Examples ========== The Fourier matrix generated from :math:`d = 3` yields the following matrix: .. math:: W_3 = \frac{1}{3} \begin{pmatrix} 1 & 1 & 1 \\ 0 & \omega & \omega^2 \\ 1 & \omega^2 & \omega^4 \end{pmatrix} >>> from toqito.matrices import fourier >>> fourier(3) [[ 0.57735027+0.j , 0.57735027+0.j , 0.57735027+0.j ], [ 0.57735027+0.j , -0.28867513+0.5j, -0.28867513-0.5j], [ 0.57735027+0.j , -0.28867513-0.5j, -0.28867513+0.5j]] References ========== .. [WikDFT] Wikipedia: DFT matrix, https://en.wikipedia.org/wiki/DFT_matrix :param dim: The size of the Fourier matrix. :return: The Fourier matrix of dimension `dim`. """ # Primitive root of unity. root_unity = np.exp(2 * 1j * np.pi / dim) entry_1 = np.arange(0, dim)[:, None] entry_2 = np.arange(0, dim) return np.power(root_unity, entry_1 * entry_2) / np.sqrt(dim)
[ "numpy.power", "numpy.arange", "numpy.exp", "numpy.sqrt" ]
[((1630, 1660), 'numpy.exp', 'np.exp', (['(2 * 1.0j * np.pi / dim)'], {}), '(2 * 1.0j * np.pi / dim)\n', (1636, 1660), True, 'import numpy as np\n'), ((1714, 1731), 'numpy.arange', 'np.arange', (['(0)', 'dim'], {}), '(0, dim)\n', (1723, 1731), True, 'import numpy as np\n'), ((1673, 1690), 'numpy.arange', 'np.arange', (['(0)', 'dim'], {}), '(0, dim)\n', (1682, 1690), True, 'import numpy as np\n'), ((1743, 1782), 'numpy.power', 'np.power', (['root_unity', '(entry_1 * entry_2)'], {}), '(root_unity, entry_1 * entry_2)\n', (1751, 1782), True, 'import numpy as np\n'), ((1785, 1797), 'numpy.sqrt', 'np.sqrt', (['dim'], {}), '(dim)\n', (1792, 1797), True, 'import numpy as np\n')]
import librosa import numpy as np import pandas as pd import os # 512 samples per frame at 44.1kHz is around 86 fps def build_features(y, sr, n_fft=4096, hop_length=512): params = { 'n_fft': n_fft, 'hop_length': hop_length } S, phase = librosa.magphase(librosa.stft(y, **params)) features = { 'centroid': librosa.feature.spectral_centroid, # 'bandwidth': librosa.feature.spectral_bandwidth, # 'contrast': librosa.feature.spectral_contrast, # 'rolloff': librosa.feature.spectral_rolloff } features = {k: v(S=S, sr=sr, **params) for k,v in features.items()} features['flatness'] = librosa.feature.spectral_flatness(S=S, **params) features['rms'] = librosa.feature.rms(S=S, frame_length=params['n_fft']) return features def build_fingerprint(y, sr, fmin=100, fmax=10000, n_bins=64, normalize=True, hop_length=512): octaves = librosa.hz_to_octs(fmax) - librosa.hz_to_octs(fmin) bins_per_octave = int(np.floor(n_bins / octaves)) S = librosa.cqt(y, sr=sr, hop_length=hop_length, fmin=fmin, n_bins=n_bins, bins_per_octave=bins_per_octave) amp = librosa.amplitude_to_db(np.abs(S)) amp = np.flipud(amp) if normalize: amp -= amp.min() amp /= amp.max() return amp.T theme_to_units = { 1: 'NO', 2: 'PQRST', 3: 'FGH', 4: 'IJK', 5: 'LM' } unit_to_theme = {} for k,v in theme_to_units.items(): for unit in v: unit_to_theme[unit] = k def build_targets(annotation_fn, y, sr, amp, gloss=True): n = len(amp) units = np.empty(shape=n, dtype='<U8') unit_position = np.zeros((n), np.float32) labels = [] if not os.path.exists(annotation_fn): print('No annotations available:', annotation_fn) singing = np.zeros((n), np.int8) themes = -np.ones((n), np.int8) return targets, labels, singing, themes, unit_position annotations = pd.read_csv(annotation_fn, sep='\t') # fill out target same size as frames seconds_to_frame = n / (len(y) / sr) for row in annotations.values: begin_sec, end_sec = row[3], row[4] begin_frame = int(seconds_to_frame * begin_sec) end_frame = int(seconds_to_frame * end_sec) unit = row[-1] if gloss: unit = unit[0] units[begin_frame:end_frame] = unit unit_position[begin_frame:end_frame] = np.linspace(0, 1, end_frame - begin_frame) themes = [] for unit in units: if unit == '': themes.append(-1) continue themes.append(unit_to_theme[unit]) themes = np.asarray(themes, dtype=np.int8) singing = (units != '').astype(np.int8) return units, singing, themes, unit_position def to_indices(labels): unique_labels = sorted(list(set(labels))) label_to_index = {label:i for i,label in enumerate(unique_labels)} index_to_label = {i:label for i,label in enumerate(unique_labels)} indices = [label_to_index[label] for label in labels] return indices, label_to_index, index_to_label
[ "librosa.feature.rms", "numpy.abs", "pandas.read_csv", "numpy.empty", "numpy.asarray", "numpy.floor", "numpy.zeros", "numpy.flipud", "os.path.exists", "numpy.ones", "librosa.cqt", "librosa.feature.spectral_flatness", "numpy.linspace", "librosa.hz_to_octs", "librosa.stft" ]
[((656, 704), 'librosa.feature.spectral_flatness', 'librosa.feature.spectral_flatness', ([], {'S': 'S'}), '(S=S, **params)\n', (689, 704), False, 'import librosa\n'), ((727, 781), 'librosa.feature.rms', 'librosa.feature.rms', ([], {'S': 'S', 'frame_length': "params['n_fft']"}), "(S=S, frame_length=params['n_fft'])\n", (746, 781), False, 'import librosa\n'), ((1026, 1133), 'librosa.cqt', 'librosa.cqt', (['y'], {'sr': 'sr', 'hop_length': 'hop_length', 'fmin': 'fmin', 'n_bins': 'n_bins', 'bins_per_octave': 'bins_per_octave'}), '(y, sr=sr, hop_length=hop_length, fmin=fmin, n_bins=n_bins,\n bins_per_octave=bins_per_octave)\n', (1037, 1133), False, 'import librosa\n'), ((1185, 1199), 'numpy.flipud', 'np.flipud', (['amp'], {}), '(amp)\n', (1194, 1199), True, 'import numpy as np\n'), ((1578, 1608), 'numpy.empty', 'np.empty', ([], {'shape': 'n', 'dtype': '"""<U8"""'}), "(shape=n, dtype='<U8')\n", (1586, 1608), True, 'import numpy as np\n'), ((1629, 1652), 'numpy.zeros', 'np.zeros', (['n', 'np.float32'], {}), '(n, np.float32)\n', (1637, 1652), True, 'import numpy as np\n'), ((1943, 1979), 'pandas.read_csv', 'pd.read_csv', (['annotation_fn'], {'sep': '"""\t"""'}), "(annotation_fn, sep='\\t')\n", (1954, 1979), True, 'import pandas as pd\n'), ((2627, 2660), 'numpy.asarray', 'np.asarray', (['themes'], {'dtype': 'np.int8'}), '(themes, dtype=np.int8)\n', (2637, 2660), True, 'import numpy as np\n'), ((282, 307), 'librosa.stft', 'librosa.stft', (['y'], {}), '(y, **params)\n', (294, 307), False, 'import librosa\n'), ((912, 936), 'librosa.hz_to_octs', 'librosa.hz_to_octs', (['fmax'], {}), '(fmax)\n', (930, 936), False, 'import librosa\n'), ((939, 963), 'librosa.hz_to_octs', 'librosa.hz_to_octs', (['fmin'], {}), '(fmin)\n', (957, 963), False, 'import librosa\n'), ((990, 1016), 'numpy.floor', 'np.floor', (['(n_bins / octaves)'], {}), '(n_bins / octaves)\n', (998, 1016), True, 'import numpy as np\n'), ((1164, 1173), 'numpy.abs', 'np.abs', (['S'], {}), '(S)\n', (1170, 1173), True, 'import numpy as np\n'), ((1687, 1716), 'os.path.exists', 'os.path.exists', (['annotation_fn'], {}), '(annotation_fn)\n', (1701, 1716), False, 'import os\n'), ((1794, 1814), 'numpy.zeros', 'np.zeros', (['n', 'np.int8'], {}), '(n, np.int8)\n', (1802, 1814), True, 'import numpy as np\n'), ((2410, 2452), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(end_frame - begin_frame)'], {}), '(0, 1, end_frame - begin_frame)\n', (2421, 2452), True, 'import numpy as np\n'), ((1835, 1854), 'numpy.ones', 'np.ones', (['n', 'np.int8'], {}), '(n, np.int8)\n', (1842, 1854), True, 'import numpy as np\n')]
import numpy as np import tensorflow as tf from run_seg_partnet import tf_IoU_per_shape, result_callback, get_probabilities, ComputeGraphSeg def test_tf_iou_per_shape(): # following logit and label are for one 3D model that belongs in category C1 which has 3 parts logit = tf.Variable(initial_value=np.array([ # logit for Undefined, C1Part1, C1Part2, C2Part3 [0.9, 0., 0., 0.], # logit for point/patch 1 [0., 0.9, 0., 0.], # logit for point/patch 2 [0.9, 0., 0., 0.], # logit for point/patch 3 [0., 0., 0.9, 0.], # logit for point/patch 4 [0., 0., 0., 0.9] # logit for point/patch 5 ])) label = tf.Variable(initial_value=np.array([ -1, # label for point/patch 1 MUST BE IGNORED 1, # label for point/patch 2 is C1Part1 0, # label for point/patch 3 is Undefined 1, # label for point/patch 4 is C1Part1 3 # label for point/patch 5 is C1Part3 ])) class_num = 4 # max(label) + 1 ignore = 0 with tf.Session() as sess: sess.run(tf.global_variables_initializer()) intersections, unions = sess.run(tf_IoU_per_shape(logit, label, class_num, mask=-1, ignore=ignore)) assert intersections[ignore] == 0 and unions[ignore] == 0 assert len(intersections) == class_num assert np.array_equal(intersections, np.array([0, 1, 0, 1])) assert np.array_equal(unions, np.array([0, 2, 1, 1])) def test_result_callback(): # number of points intersected or unioned over all the 3D models for each class intersections_and_unions = { 'intsc_0': 0.0, 'union_0': 0.0, # iou = 0 'intsc_1': 20.0, 'union_1': 50.0, # iou = 2/5 'intsc_2': 0.0, 'union_2': 30.0, # iou = 0 'intsc_3': 0.0, 'union_3': 0.0, # iou = 0 'intsc_4': 40.0, 'union_4': 60.0 # iou = 2/3 } # sum of ious over the 4 labels = 16/15 expected_iou = (16 / 15) / 3 assert result_callback(intersections_and_unions, 5)['iou'] == expected_iou def test_get_probabilities(): logits = tf.Variable(initial_value=np.array([ # logit for Undefined, C1Part1, C1Part2, C2Part3 [1.9, 0., 0., 0.], # logit for point/patch 1 [0., 2.9, 0., 0.], # logit for point/patch 2 [0.9, 0.8, 0., 0.], # logit for point/patch 3 [0., 0.9, 3.9, 0.], # logit for point/patch 4 [0., 0., 0.5, 0.9] # logit for point/patch 5 ])) probs = get_probabilities(logits) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) probabilities = sess.run(probs) for point_probs in probabilities: assert 9 <= int(np.sum(point_probs) * 10) <= 10 def test_set_weights(): tf_w = ComputeGraphSeg.set_weights([1, 2, 3, 4.]) assert tf_w.get_shape() == (4,)
[ "numpy.sum", "run_seg_partnet.result_callback", "tensorflow.global_variables_initializer", "run_seg_partnet.tf_IoU_per_shape", "tensorflow.Session", "run_seg_partnet.get_probabilities", "numpy.array", "run_seg_partnet.ComputeGraphSeg.set_weights" ]
[((2454, 2479), 'run_seg_partnet.get_probabilities', 'get_probabilities', (['logits'], {}), '(logits)\n', (2471, 2479), False, 'from run_seg_partnet import tf_IoU_per_shape, result_callback, get_probabilities, ComputeGraphSeg\n'), ((2742, 2785), 'run_seg_partnet.ComputeGraphSeg.set_weights', 'ComputeGraphSeg.set_weights', (['[1, 2, 3, 4.0]'], {}), '([1, 2, 3, 4.0])\n', (2769, 2785), False, 'from run_seg_partnet import tf_IoU_per_shape, result_callback, get_probabilities, ComputeGraphSeg\n'), ((1025, 1037), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1035, 1037), True, 'import tensorflow as tf\n'), ((2489, 2501), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2499, 2501), True, 'import tensorflow as tf\n'), ((310, 434), 'numpy.array', 'np.array', (['[[0.9, 0.0, 0.0, 0.0], [0.0, 0.9, 0.0, 0.0], [0.9, 0.0, 0.0, 0.0], [0.0, \n 0.0, 0.9, 0.0], [0.0, 0.0, 0.0, 0.9]]'], {}), '([[0.9, 0.0, 0.0, 0.0], [0.0, 0.9, 0.0, 0.0], [0.9, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.9, 0.0], [0.0, 0.0, 0.0, 0.9]])\n', (318, 434), True, 'import numpy as np\n'), ((693, 719), 'numpy.array', 'np.array', (['[-1, 1, 0, 1, 3]'], {}), '([-1, 1, 0, 1, 3])\n', (701, 719), True, 'import numpy as np\n'), ((1064, 1097), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1095, 1097), True, 'import tensorflow as tf\n'), ((1140, 1205), 'run_seg_partnet.tf_IoU_per_shape', 'tf_IoU_per_shape', (['logit', 'label', 'class_num'], {'mask': '(-1)', 'ignore': 'ignore'}), '(logit, label, class_num, mask=-1, ignore=ignore)\n', (1156, 1205), False, 'from run_seg_partnet import tf_IoU_per_shape, result_callback, get_probabilities, ComputeGraphSeg\n'), ((1365, 1387), 'numpy.array', 'np.array', (['[0, 1, 0, 1]'], {}), '([0, 1, 0, 1])\n', (1373, 1387), True, 'import numpy as np\n'), ((1427, 1449), 'numpy.array', 'np.array', (['[0, 2, 1, 1]'], {}), '([0, 2, 1, 1])\n', (1435, 1449), True, 'import numpy as np\n'), ((1955, 1999), 'run_seg_partnet.result_callback', 'result_callback', (['intersections_and_unions', '(5)'], {}), '(intersections_and_unions, 5)\n', (1970, 1999), False, 'from run_seg_partnet import tf_IoU_per_shape, result_callback, get_probabilities, ComputeGraphSeg\n'), ((2094, 2218), 'numpy.array', 'np.array', (['[[1.9, 0.0, 0.0, 0.0], [0.0, 2.9, 0.0, 0.0], [0.9, 0.8, 0.0, 0.0], [0.0, \n 0.9, 3.9, 0.0], [0.0, 0.0, 0.5, 0.9]]'], {}), '([[1.9, 0.0, 0.0, 0.0], [0.0, 2.9, 0.0, 0.0], [0.9, 0.8, 0.0, 0.0],\n [0.0, 0.9, 3.9, 0.0], [0.0, 0.0, 0.5, 0.9]])\n', (2102, 2218), True, 'import numpy as np\n'), ((2528, 2561), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2559, 2561), True, 'import tensorflow as tf\n'), ((2673, 2692), 'numpy.sum', 'np.sum', (['point_probs'], {}), '(point_probs)\n', (2679, 2692), True, 'import numpy as np\n')]
""" @author : <NAME> @date : 1 - 23 - 2021 The loss functions are really simple. You just need to understand whether it is a classification or regression task. All losses will be set in the model.finalize() model. """ import numpy as np import warnings from scipy.special import softmax as sfmx_indiv warnings.filterwarnings("ignore", category=RuntimeWarning) class Loss: """Base loss class.""" def __init__(self): self.SGD = False def loss(self, y, y_pred): pass def grad(self, y, y_pred): pass class MSE(Loss): """ MSE stands for mean-squared error, and its the loss you'll want to use for regression. To set it in the model.finalize() method just do: >>> from sealion import neural_networks as nn >>> model = nn.models.NeuralNetwork(layers_list) >>> model.finalize(loss=nn.loss.MSE(), optimizer=...) and you're all set! """ def __init__(self): super().__init__() self.type_regression = True def loss(self, y, y_pred): error = np.sum(np.power(y_pred - y, 2)) / (2 * len(y)) return error def grad(self, y, y_pred): return (y_pred - y) / len(y) def softmax(x): softmax_output = np.apply_along_axis(sfmx_indiv, 1, x) return softmax_output class CrossEntropy(Loss): """ This loss function is for classification problems. I know there's a binary log loss and then a multi-category cross entropy loss function for classification, but they're essentially the same thing so I thought using one class would make it easier. Remember to use one-hot encoded data for this to work (check out utils). If you are using this loss function, make sure your last layer is Softmax and vice versa. Otherwise, annoying error messages will occur. To set this in the ``model.finalize()`` method do: >>> from sealion import neural_networks as nn >>> model = nn.models.NeuralNetwork() >>> # ... add the layers ... >>> model.add(nn.layers.Softmax()) # last layer has to be softmax >>> model.finalize(loss=nn.loss.CrossEntropy(), optimizer=...) and that's all there is to it. """ def __init__(self): super().__init__() self.type_regression = False def loss(self, y, y_pred): return np.sum(y * np.log(y_pred + 1e-20)) / len( y ) # now give the crossentropy loss def grad(self, y, y_pred): y_pred = softmax(y_pred) return (y_pred - y) / len(y) # give the sexy partial derivative
[ "numpy.apply_along_axis", "numpy.power", "numpy.log", "warnings.filterwarnings" ]
[((303, 361), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (326, 361), False, 'import warnings\n'), ((1222, 1259), 'numpy.apply_along_axis', 'np.apply_along_axis', (['sfmx_indiv', '(1)', 'x'], {}), '(sfmx_indiv, 1, x)\n', (1241, 1259), True, 'import numpy as np\n'), ((1053, 1076), 'numpy.power', 'np.power', (['(y_pred - y)', '(2)'], {}), '(y_pred - y, 2)\n', (1061, 1076), True, 'import numpy as np\n'), ((2311, 2333), 'numpy.log', 'np.log', (['(y_pred + 1e-20)'], {}), '(y_pred + 1e-20)\n', (2317, 2333), True, 'import numpy as np\n')]
import numpy as np import keras.backend.tensorflow_backend as backend from keras.models import Sequential from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten from keras.optimizers import Adam from keras.callbacks import TensorBoard import tensorflow as tf from collections import deque import time import random from game import VanilaGame,Snake,Food,Board from tqdm import tqdm import os import cv2 tf.executing_eagerly() DISCOUNT = 0.99 REPLAY_MEMORY_SIZE = 50_000 # How many last steps to keep for model training MIN_REPLAY_MEMORY_SIZE = 10_000 # Minimum number of steps in a memory to start training MINIBATCH_SIZE = 32 # How many steps (samples) to use for training UPDATE_TARGET_EVERY = 50 # Terminal states (end of episodes) MODEL_NAME = 'BabyConnerPt2.0.1' MIN_REWARD = -200 # For model save MEMORY_FRACTION = 0.20 # Environment settings EPISODES = 20_000 # Exploration settings epsilon = 1 # not a constant, going to be decayed EPSILON_DECAY = 0.99975 MIN_EPSILON = 0.001 # Stats settings AGGREGATE_STATS_EVERY = 10 # episodes SHOW_PREVIEW = True env = VanilaGame(300,300,30) # For stats ep_rewards = [-200] # For more repetitive results random.seed(1) np.random.seed(1) tf.compat.v1.set_random_seed(1)# tf 1.0 #tf.random.set_seed(1) # Memory fraction, used mostly when trai8ning multiple agents #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=MEMORY_FRACTION) #backend.set_session(tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))) # Create models folder if not os.path.isdir('models'): os.makedirs('models') # Own Tensorboard class class ModifiedTensorBoard(TensorBoard): # Overriding init to set initial step and writer (we want one log file for all .fit() calls) def __init__(self, **kwargs): super().__init__(**kwargs) self.step = 1 self.model = None self.TB_graph = tf.compat.v1.Graph() with self.TB_graph.as_default(): self.writer = tf.summary.create_file_writer(self.log_dir, flush_millis=5000) self.writer.set_as_default() self.all_summary_ops = tf.compat.v1.summary.all_v2_summary_ops() self.TB_sess = tf.compat.v1.InteractiveSession(graph=self.TB_graph) self.TB_sess.run(self.writer.init()) # Overriding this method to stop creating default log writer def set_model(self, model): self.model = model self._train_dir = self.log_dir + '\\train' # Overrided, saves logs with our step number # (otherwise every .fit() will start writing from 0th step) def on_epoch_end(self, epoch, logs=None): self.update_stats(**logs) # Overrided # We train for one batch only, no need to save anything at epoch end def on_batch_end(self, batch, logs=None): pass def on_train_begin(self, logs=None): pass # Overrided, so won't close writer def on_train_end(self, _): pass # added for performance? def on_train_batch_end(self, _, __): pass # Custom method for saving own metrics # Creates writer, writes custom metrics and closes writer def update_stats(self, **stats): self._write_logs(stats, self.step) def _write_logs(self, logs, index): for name, value in logs.items(): self.TB_sess.run(self.all_summary_ops) if self.model is not None: name = f'{name}_{self.model.name}' self.TB_sess.run(tf.summary.scalar(name, value, step=index)) self.model = None class DQNAgent: def __init__(self): #main model gets trained every step self.tensorboard = ModifiedTensorBoard(log_dir = f"logs/{MODEL_NAME}-{int(time.time())}") tf.executing_eagerly() self.model = self.create_model() #target model this is what we predict evbery step self.target_model = self.create_model() self.target_model.set_weights(self.model.get_weights()) self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE) self.target_update_counter = 0 def create_model(self): model = Sequential() model.add(Conv2D(256,(3,3), input_shape=env.OBSERVATION_SPACE_VALUES)) model.add(Activation("relu")) model.add(MaxPooling2D(2,2)) model.add(Dropout(0.2)) model.add(Conv2D(256,(3,3))) model.add(Activation("relu")) model.add(MaxPooling2D(2,2)) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(64)) model.add(Dense(env.ACTION_SPACE_SIZE,activation="linear")) model.compile(loss='mse', optimizer=Adam(lr=0.001),metrics=['accuracy']) return model def update_replay_memory(self, transition): self.replay_memory.append(transition) def get_qs(self,state): return self.model.predict(np.array(state).reshape(-1,*state.shape)/255)[0] def train(self,terminal_state,step): if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE: return minibatch = random.sample(self.replay_memory,MINIBATCH_SIZE) current_states = np.array([transition[0] for transition in minibatch])/255 current_qs_list = self.model.predict(current_states) new_current_states = np.array([transition[3] for transition in minibatch ])/255 future_qs_list = self.target_model.predict(new_current_states) X = [] y = [] for index, (current_state, action, reward, new_current_state, done) in enumerate(minibatch): if not done: max_future_q = np.max(future_qs_list[index]) new_q = reward + DISCOUNT * max_future_q else: new_q = reward current_qs = current_qs_list[index] current_qs[action] = new_q X.append(current_state) y.append(current_qs) self.model.fit(np.array(X)/255,np.array(y), batch_size=MINIBATCH_SIZE, verbose=0,shuffle=False,callbacks=[self.tensorboard] if terminal_state else None) if terminal_state: self.target_update_counter += 1 if self.target_update_counter > UPDATE_TARGET_EVERY: self.target_model.set_weights(self.model.get_weights()) self.target_update_counter = 0 agent = DQNAgent() for episode in tqdm(range(1,EPISODES+1),ascii=True, unit="episode"): agent.tensorboard.step = episode episode_reward = 0 step = 1 current_state = env.reset() done = False while not done: if np.random.random() >epsilon: action = np.argmax(agent.get_qs(current_state)) else: action = np.random.randint(0,env.ACTION_SPACE_SIZE) new_state, reward, done = env.step(action) episode_reward += reward if SHOW_PREVIEW and not episode % AGGREGATE_STATS_EVERY: env.render() agent.update_replay_memory((current_state,action, reward, new_state, done)) agent.train(done,step) current_state = new_state step += 1 # Append episode reward to a list and log stats (every given number of episodes) ep_rewards.append(episode_reward) if not episode % AGGREGATE_STATS_EVERY or episode == 1: average_reward = sum(ep_rewards[-AGGREGATE_STATS_EVERY:])/len(ep_rewards[-AGGREGATE_STATS_EVERY:]) min_reward = min(ep_rewards[-AGGREGATE_STATS_EVERY:]) max_reward = max(ep_rewards[-AGGREGATE_STATS_EVERY:]) mem_size = len(agent.replay_memory) agent.tensorboard.update_stats(reward_avg=average_reward, reward_min=min_reward, reward_max=max_reward, epsilon=epsilon,replay_mem_size= mem_size) # Save model, but only when min reward is greater or equal a set value if min_reward >= MIN_REWARD: agent.model.save(f'models/{MODEL_NAME}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model') # Decay epsilon if epsilon > MIN_EPSILON: epsilon *= EPSILON_DECAY epsilon = max(MIN_EPSILON, epsilon)
[ "numpy.random.seed", "game.VanilaGame", "tensorflow.compat.v1.InteractiveSession", "random.sample", "keras.models.Sequential", "tensorflow.executing_eagerly", "numpy.random.randint", "collections.deque", "keras.layers.Flatten", "numpy.max", "random.seed", "keras.layers.MaxPooling2D", "tensorflow.summary.scalar", "keras.layers.Dropout", "keras.optimizers.Adam", "tensorflow.compat.v1.summary.all_v2_summary_ops", "tensorflow.compat.v1.set_random_seed", "keras.layers.Conv2D", "os.makedirs", "keras.layers.Activation", "os.path.isdir", "time.time", "keras.layers.Dense", "numpy.array", "numpy.random.random", "tensorflow.summary.create_file_writer", "tensorflow.compat.v1.Graph" ]
[((432, 454), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (452, 454), True, 'import tensorflow as tf\n'), ((1106, 1130), 'game.VanilaGame', 'VanilaGame', (['(300)', '(300)', '(30)'], {}), '(300, 300, 30)\n', (1116, 1130), False, 'from game import VanilaGame, Snake, Food, Board\n'), ((1193, 1207), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (1204, 1207), False, 'import random\n'), ((1208, 1225), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1222, 1225), True, 'import numpy as np\n'), ((1226, 1257), 'tensorflow.compat.v1.set_random_seed', 'tf.compat.v1.set_random_seed', (['(1)'], {}), '(1)\n', (1254, 1257), True, 'import tensorflow as tf\n'), ((1542, 1565), 'os.path.isdir', 'os.path.isdir', (['"""models"""'], {}), "('models')\n", (1555, 1565), False, 'import os\n'), ((1571, 1592), 'os.makedirs', 'os.makedirs', (['"""models"""'], {}), "('models')\n", (1582, 1592), False, 'import os\n'), ((1896, 1916), 'tensorflow.compat.v1.Graph', 'tf.compat.v1.Graph', ([], {}), '()\n', (1914, 1916), True, 'import tensorflow as tf\n'), ((2188, 2240), 'tensorflow.compat.v1.InteractiveSession', 'tf.compat.v1.InteractiveSession', ([], {'graph': 'self.TB_graph'}), '(graph=self.TB_graph)\n', (2219, 2240), True, 'import tensorflow as tf\n'), ((3741, 3763), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (3761, 3763), True, 'import tensorflow as tf\n'), ((4006, 4038), 'collections.deque', 'deque', ([], {'maxlen': 'REPLAY_MEMORY_SIZE'}), '(maxlen=REPLAY_MEMORY_SIZE)\n', (4011, 4038), False, 'from collections import deque\n'), ((4134, 4146), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4144, 4146), False, 'from keras.models import Sequential\n'), ((5057, 5106), 'random.sample', 'random.sample', (['self.replay_memory', 'MINIBATCH_SIZE'], {}), '(self.replay_memory, MINIBATCH_SIZE)\n', (5070, 5106), False, 'import random\n'), ((1984, 2046), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['self.log_dir'], {'flush_millis': '(5000)'}), '(self.log_dir, flush_millis=5000)\n', (2013, 2046), True, 'import tensorflow as tf\n'), ((2123, 2164), 'tensorflow.compat.v1.summary.all_v2_summary_ops', 'tf.compat.v1.summary.all_v2_summary_ops', ([], {}), '()\n', (2162, 2164), True, 'import tensorflow as tf\n'), ((4165, 4226), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'input_shape': 'env.OBSERVATION_SPACE_VALUES'}), '(256, (3, 3), input_shape=env.OBSERVATION_SPACE_VALUES)\n', (4171, 4226), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4244, 4262), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4254, 4262), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4282, 4300), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (4294, 4300), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4319, 4331), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (4326, 4331), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4352, 4371), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {}), '(256, (3, 3))\n', (4358, 4371), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4389, 4407), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4399, 4407), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4427, 4445), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (4439, 4445), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4464, 4476), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (4471, 4476), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4497, 4506), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4504, 4506), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4526, 4535), 'keras.layers.Dense', 'Dense', (['(64)'], {}), '(64)\n', (4531, 4535), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4555, 4604), 'keras.layers.Dense', 'Dense', (['env.ACTION_SPACE_SIZE'], {'activation': '"""linear"""'}), "(env.ACTION_SPACE_SIZE, activation='linear')\n", (4560, 4604), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((5132, 5185), 'numpy.array', 'np.array', (['[transition[0] for transition in minibatch]'], {}), '([transition[0] for transition in minibatch])\n', (5140, 5185), True, 'import numpy as np\n'), ((5281, 5334), 'numpy.array', 'np.array', (['[transition[3] for transition in minibatch]'], {}), '([transition[3] for transition in minibatch])\n', (5289, 5334), True, 'import numpy as np\n'), ((5932, 5943), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (5940, 5943), True, 'import numpy as np\n'), ((6551, 6569), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (6567, 6569), True, 'import numpy as np\n'), ((6675, 6718), 'numpy.random.randint', 'np.random.randint', (['(0)', 'env.ACTION_SPACE_SIZE'], {}), '(0, env.ACTION_SPACE_SIZE)\n', (6692, 6718), True, 'import numpy as np\n'), ((3479, 3521), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['name', 'value'], {'step': 'index'}), '(name, value, step=index)\n', (3496, 3521), True, 'import tensorflow as tf\n'), ((4649, 4663), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (4653, 4663), False, 'from keras.optimizers import Adam\n'), ((5600, 5629), 'numpy.max', 'np.max', (['future_qs_list[index]'], {}), '(future_qs_list[index])\n', (5606, 5629), True, 'import numpy as np\n'), ((5916, 5927), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (5924, 5927), True, 'import numpy as np\n'), ((3717, 3728), 'time.time', 'time.time', ([], {}), '()\n', (3726, 3728), False, 'import time\n'), ((4865, 4880), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (4873, 4880), True, 'import numpy as np\n'), ((7928, 7939), 'time.time', 'time.time', ([], {}), '()\n', (7937, 7939), False, 'import time\n')]
"""Testing for Linear model module.""" import numpy as np import pytest from sklearn.base import is_regressor from sklearn.datasets import load_diabetes from sklearn.model_selection import train_test_split from sklearn.exceptions import NotFittedError from pyrcn.linear_model import IncrementalRegression from sklearn.linear_model import Ridge X_diabetes, y_diabetes = load_diabetes(return_X_y=True) def test_normalize() -> None: print('\ntest_normalize():') rs = np.random.RandomState(42) X = np.hstack((np.linspace(0., 10., 1000).reshape(-1, 1), np.linspace(-1., 1., 1000).reshape(-1, 1), rs.random(1000).reshape(-1, 1))) transformation = rs.random(size=(3, 2)) y = np.matmul(X, transformation) reg = IncrementalRegression(normalize=True) reg.fit(X, y) def test_postpone_inverse() -> None: print('\ntest_postpone_inverse():') rs = np.random.RandomState(42) index = range(1000) X = np.hstack((np.linspace(0., 10., 1000).reshape(-1, 1), np.linspace(-1., 1., 1000).reshape(-1, 1), rs.random(1000).reshape(-1, 1))) transformation = rs.random(size=(3, 2)) y = np.matmul(X, transformation) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=10, random_state=42) reg = IncrementalRegression() assert is_regressor(reg) for prt in np.array_split(index, 3): reg.partial_fit(X[prt, :], y[prt, :], postpone_inverse=True) with pytest.raises(NotFittedError): y_reg = reg.predict(X_test) reg.partial_fit(X, y) y_reg = reg.predict(X_test) print("tests: {0}\nregr: {1}".format(y_test, y_reg)) np.testing.assert_allclose(y_reg, y_test, rtol=.01, atol=.15) def test_linear() -> None: print('\ntest_linear():') rs = np.random.RandomState(42) index = range(1000) X = np.hstack((np.linspace(0., 10., 1000).reshape(-1, 1), np.linspace(-1., 1., 1000).reshape(-1, 1), rs.random(1000).reshape(-1, 1))) transformation = rs.random(size=(3, 2)) y = np.matmul(X, transformation) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=10, random_state=42) reg = IncrementalRegression() assert is_regressor(reg) for prt in np.array_split(index, 3): reg.partial_fit(X[prt, :], y[prt, :]) y_reg = reg.predict(X_test) print("tests: {0}\nregr: {1}".format(y_test, y_reg)) np.testing.assert_allclose(y_reg, y_test, rtol=.01, atol=.15) def test_compare_ridge() -> None: X_train, X_test, y_train, y_test = train_test_split(X_diabetes, y_diabetes, test_size=10, random_state=42) i_reg = IncrementalRegression(alpha=.01).fit(X_train, y_train) ridge = Ridge(alpha=.01, solver='svd').fit(X_train, y_train) print("incremental: {0} ridge: {1}".format(i_reg.coef_, ridge.coef_)) np.testing.assert_allclose(i_reg.coef_, ridge.coef_, rtol=.0001)
[ "pyrcn.linear_model.IncrementalRegression", "sklearn.model_selection.train_test_split", "sklearn.datasets.load_diabetes", "numpy.random.RandomState", "pytest.raises", "numpy.matmul", "numpy.linspace", "numpy.array_split", "numpy.testing.assert_allclose", "sklearn.base.is_regressor", "sklearn.linear_model.Ridge" ]
[((374, 404), 'sklearn.datasets.load_diabetes', 'load_diabetes', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (387, 404), False, 'from sklearn.datasets import load_diabetes\n'), ((479, 504), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (500, 504), True, 'import numpy as np\n'), ((733, 761), 'numpy.matmul', 'np.matmul', (['X', 'transformation'], {}), '(X, transformation)\n', (742, 761), True, 'import numpy as np\n'), ((772, 809), 'pyrcn.linear_model.IncrementalRegression', 'IncrementalRegression', ([], {'normalize': '(True)'}), '(normalize=True)\n', (793, 809), False, 'from pyrcn.linear_model import IncrementalRegression\n'), ((916, 941), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (937, 941), True, 'import numpy as np\n'), ((1194, 1222), 'numpy.matmul', 'np.matmul', (['X', 'transformation'], {}), '(X, transformation)\n', (1203, 1222), True, 'import numpy as np\n'), ((1263, 1316), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(10)', 'random_state': '(42)'}), '(X, y, test_size=10, random_state=42)\n', (1279, 1316), False, 'from sklearn.model_selection import train_test_split\n'), ((1383, 1406), 'pyrcn.linear_model.IncrementalRegression', 'IncrementalRegression', ([], {}), '()\n', (1404, 1406), False, 'from pyrcn.linear_model import IncrementalRegression\n'), ((1418, 1435), 'sklearn.base.is_regressor', 'is_regressor', (['reg'], {}), '(reg)\n', (1430, 1435), False, 'from sklearn.base import is_regressor\n'), ((1452, 1476), 'numpy.array_split', 'np.array_split', (['index', '(3)'], {}), '(index, 3)\n', (1466, 1476), True, 'import numpy as np\n'), ((1744, 1807), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y_reg', 'y_test'], {'rtol': '(0.01)', 'atol': '(0.15)'}), '(y_reg, y_test, rtol=0.01, atol=0.15)\n', (1770, 1807), True, 'import numpy as np\n'), ((1874, 1899), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (1895, 1899), True, 'import numpy as np\n'), ((2152, 2180), 'numpy.matmul', 'np.matmul', (['X', 'transformation'], {}), '(X, transformation)\n', (2161, 2180), True, 'import numpy as np\n'), ((2221, 2274), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(10)', 'random_state': '(42)'}), '(X, y, test_size=10, random_state=42)\n', (2237, 2274), False, 'from sklearn.model_selection import train_test_split\n'), ((2341, 2364), 'pyrcn.linear_model.IncrementalRegression', 'IncrementalRegression', ([], {}), '()\n', (2362, 2364), False, 'from pyrcn.linear_model import IncrementalRegression\n'), ((2376, 2393), 'sklearn.base.is_regressor', 'is_regressor', (['reg'], {}), '(reg)\n', (2388, 2393), False, 'from sklearn.base import is_regressor\n'), ((2410, 2434), 'numpy.array_split', 'np.array_split', (['index', '(3)'], {}), '(index, 3)\n', (2424, 2434), True, 'import numpy as np\n'), ((2576, 2639), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y_reg', 'y_test'], {'rtol': '(0.01)', 'atol': '(0.15)'}), '(y_reg, y_test, rtol=0.01, atol=0.15)\n', (2602, 2639), True, 'import numpy as np\n'), ((2713, 2784), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_diabetes', 'y_diabetes'], {'test_size': '(10)', 'random_state': '(42)'}), '(X_diabetes, y_diabetes, test_size=10, random_state=42)\n', (2729, 2784), False, 'from sklearn.model_selection import train_test_split\n'), ((3053, 3118), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['i_reg.coef_', 'ridge.coef_'], {'rtol': '(0.0001)'}), '(i_reg.coef_, ridge.coef_, rtol=0.0001)\n', (3079, 3118), True, 'import numpy as np\n'), ((1557, 1586), 'pytest.raises', 'pytest.raises', (['NotFittedError'], {}), '(NotFittedError)\n', (1570, 1586), False, 'import pytest\n'), ((2854, 2887), 'pyrcn.linear_model.IncrementalRegression', 'IncrementalRegression', ([], {'alpha': '(0.01)'}), '(alpha=0.01)\n', (2875, 2887), False, 'from pyrcn.linear_model import IncrementalRegression\n'), ((2921, 2952), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': '(0.01)', 'solver': '"""svd"""'}), "(alpha=0.01, solver='svd')\n", (2926, 2952), False, 'from sklearn.linear_model import Ridge\n'), ((524, 552), 'numpy.linspace', 'np.linspace', (['(0.0)', '(10.0)', '(1000)'], {}), '(0.0, 10.0, 1000)\n', (535, 552), True, 'import numpy as np\n'), ((586, 614), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(1.0)', '(1000)'], {}), '(-1.0, 1.0, 1000)\n', (597, 614), True, 'import numpy as np\n'), ((985, 1013), 'numpy.linspace', 'np.linspace', (['(0.0)', '(10.0)', '(1000)'], {}), '(0.0, 10.0, 1000)\n', (996, 1013), True, 'import numpy as np\n'), ((1047, 1075), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(1.0)', '(1000)'], {}), '(-1.0, 1.0, 1000)\n', (1058, 1075), True, 'import numpy as np\n'), ((1943, 1971), 'numpy.linspace', 'np.linspace', (['(0.0)', '(10.0)', '(1000)'], {}), '(0.0, 10.0, 1000)\n', (1954, 1971), True, 'import numpy as np\n'), ((2005, 2033), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(1.0)', '(1000)'], {}), '(-1.0, 1.0, 1000)\n', (2016, 2033), True, 'import numpy as np\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Model for fitting an absorption profile to spectral data. """ from __future__ import (division, print_function, absolute_import, unicode_literals) __all__ = ["ProfileFittingModel"] import logging import numpy as np import scipy.optimize as op import astropy.table from astropy.table import Row from astropy.constants import c as speed_of_light from collections import OrderedDict from scipy.special import wofz from scipy import integrate from .base import BaseSpectralModel from ..specutils import Spectrum1D from ..linelists import LineList logger = logging.getLogger(__name__) def _gaussian(x, *parameters): """ Evaluate a Gaussian profile at x, given the profile parameters. y = amplitude * exp(-(x - position)**2 / (2.0 * sigma**2)) :param x: The x-values to evaluate the Gaussian profile at. :param parameters: The position, sigma, and amplitude of the Gaussian profile. """ position, sigma, amplitude = parameters return amplitude * np.exp(-(x - position)**2 / (2.0 * sigma**2)) def _lorentzian(x, *parameters): """ Evaluate a Lorentzian profile at x, given the profile parameters: y = (amplitude/PI) * (width/((x - positions)**2 + width**2)) :param x: The x-values to evaluate the Lorentzian profile at. :param parameters: The position, width, and amplitude of the Lorentzian profile. """ position, width, amplitude = parameters return (amplitude/np.pi) * (width/((x - position)**2 + width**2)) def _voigt(x, *parameters): """ Evaluate a Voigt profile at x, given the profile parameters. :param x: The x-values to evaluate the Voigt profile at. :param parameters: The position, fwhm, amplitude, and shape of the Voigt profile. """ try: n = len(x) except TypeError: n = 1 position, fwhm, amplitude, shape = parameters profile = 1 / wofz(np.zeros((n)) + 1j * np.sqrt(np.log(2.0)) * shape).real profile *= amplitude * wofz(2*np.sqrt(np.log(2.0)) * (x - position)/fwhm \ + 1j * np.sqrt(np.log(2.0))*shape).real return profile class ProfileFittingModel(BaseSpectralModel): _profiles = { "gaussian": (_gaussian, ("mean", "sigma", "amplitude")), "lorentzian": (_lorentzian, ("mean", "width", "amplitude")), "voigt": (_voigt, ("mean", "fwhm", "amplitude", "shape")) } def __init__(self, session, transitions, **kwargs): """ Initialize a class for modelling spectra with analytic profile. :param session: The session that this spectral model will be associated with. :param transitions: A linelist containing atomic data for this model. """ super(ProfileFittingModel, self).__init__(session, transitions, **kwargs) # Initialize metadata with default fitting values. self.metadata.update({ "profile": "gaussian", "central_weighting": True, "window": 5, "continuum_order": 1, "detection_sigma": 0.5, "detection_pixels": 3, "max_iterations": 5, "wavelength_tolerance": 0.5, "velocity_tolerance": None, "mask": [], "antimask_flag": False, "elements": [self._verify_elements()], "species": [self._verify_species()] }) # Set the model parameter names based on the current metadata. self._update_parameter_names() self._verify_transitions() self._verify_metadata() # Create a _repr_element for this. self._repr_element = self.transitions["element"][0] return None @property def abundance_uncertainties(self): try: result = self.metadata["fitted_result"][2] return result["abundance_uncertainties"][0] except KeyError: return None @property def expot(self): return self.transitions[0]["expot"] @property def loggf(self): return self.transitions[0]["loggf"] @property def equivalent_width(self): try: result = self.metadata["fitted_result"][2] equivalent_width = result["equivalent_width"][0] except KeyError: return None return 1000. * equivalent_width @property def equivalent_width_uncertainty(self): try: result = self.metadata["fitted_result"][2] err = 1000.*np.nanmax(np.abs(result["equivalent_width"][1:3])) return err except: return None @property def reduced_equivalent_width(self): eqw = self.equivalent_width if eqw is None: return None return np.log10(eqw/self.wavelength) - 3. @property def measurement_type(self): return "eqw" @property def fwhm(self): try: popt = self.metadata["fitted_result"][0] if self.metadata["profile"] == "gaussian": return popt["sigma"]*2.355 elif self.metadata["profile"] == "lorentzian": return popt["width"] # I may be wrong about this value elif self.metadata["profile"] == "voigt": return popt["fwhm"] # I may be wrong about this value else: return None except (KeyError, TypeError): return None _profiles = { "gaussian": (_gaussian, ("mean", "sigma", "amplitude")), "lorentzian": (_lorentzian, ("mean", "width", "amplitude")), "voigt": (_voigt, ("mean", "fwhm", "amplitude", "shape")) } def _verify_transitions(self): """ Verify that the atomic or molecular transitions associated with this class are valid. """ # Check format first. transitions = self.transitions super(ProfileFittingModel, self)._verify_transitions() if len(transitions) > 1 and not isinstance(transitions, Row): raise ValueError("only a single transition can be associated with " "a ProfileFittingModel") # Check that the transition does not have multiple element names. try: elem2 = transitions["elem2"][0] except IndexError: elem2 = transitions["elem2"] if elem2 != "": raise ValueError("only an atomic transition can be associated with " "a ProfileFittingModel") return True def _verify_elements(self): """ Return the element that will be measured by this model. """ return self.transitions["element"][0].split()[0] def _verify_species(self): """ Return the species that will be measured by this model. Ignore isotopes. """ return np.floor(self.transitions["species"][0]*10)/10 def _verify_metadata(self): """ Verify the metadata associated with this class. """ # TODO return True def _update_parameter_names(self): """ Update the model parameter names based on the current metadata. """ # Profile parameter names. func, parameter_names = self._profiles[self.metadata["profile"]] parameter_names = list(parameter_names) # Continuum coefficients. parameter_names += ["c{0}".format(i) \ for i in range(self.metadata["continuum_order"] + 1)] # Update the bounds. bounds = {} if self.metadata["profile"] == "gaussian": bounds.update({ "sigma": (-0.5, 0.5), "amplitude": (0, 1), }) elif self.metadata["profile"] == "voigt": bounds["fwhm"] = (-0.5, 0.5) if self.metadata["wavelength_tolerance"] is not None \ or self.metadata["velocity_tolerance"] is not None: # Convert velocity tolerance into wavelength. wavelength = self.transitions["wavelength"] try: wavelength = wavelength[0] except IndexError: None vt = abs(self.metadata.get("velocity_tolerance", None) or np.inf) wt = abs(self.metadata.get("wavelength_tolerance", None) or np.inf) bound = np.nanmin([ wt, wavelength * vt/speed_of_light.to("km/s").value]) bounds["mean"] = (wavelength - bound, wavelength + bound) else: # TODO: Allow the wavelength to be fixed. raise NotImplementedError("wavelength cannot be fixed yet; " "set a small tolerance on wavelength") self._parameter_names = parameter_names self._parameter_bounds = bounds return True def _initial_guess(self, spectrum, **kwargs): """ Generate an initial guess for the model parameters. :param spectrum: The observed spectrum. """ wavelength = self.transitions["wavelength"] try: wavelength = wavelength[0] except IndexError: None p0 = [ wavelength, kwargs.pop("p0_sigma", 0.1), ] if spectrum is None: p0.append(0.5) else: p0.append(1.0 - \ spectrum.flux[spectrum.dispersion.searchsorted(wavelength)]) if self.metadata["profile"] == "voigt": p0.append(kwargs.pop("p0_shape", 0)) # Continuum? if self.metadata["continuum_order"] > -1: p0.extend(([0] * self.metadata["continuum_order"]) + [1]) return np.array(p0) def fit(self, spectrum=None, **kwargs): """ Fit an absorption profile to the transition in the spectrum. :param spectrum: [optional] The observed spectrum to fit the profile transition model. If None is given, this will default to the normalized rest-frame spectrum in the parent session. """ spectrum = self._verify_spectrum(spectrum) failure = False # Update internal metadata with any input parameters. # Ignore additional parameters because other BaseSpectralModels will # have different input arguments. for key in set(self.metadata).intersection(kwargs): self.metadata[key] = kwargs[key] # What model parameters are in the fitting process? # In synth these would be abundances, etc. Here they are profile/cont # parameters. self._update_parameter_names() # Get a bad initial guess. p0 = self._initial_guess(spectrum, **kwargs) # Build a mask based on the window fitting range, and prepare the data. mask = self.mask(spectrum) x, y = spectrum.dispersion[mask], spectrum.flux[mask] yerr, absolute_sigma = ((1.0/spectrum.ivar[mask])**0.5, True) if not np.all(np.isfinite(yerr)): yerr, absolute_sigma = (np.ones_like(x), False) # Central weighting? if self.metadata["central_weighting"]: yerr /= (1 + np.exp(-(x - p0[0])**2 / (4.0 * p0[1]**2))) # How many iterations to do? nearby_lines = [] iterative_mask = np.isfinite(y * yerr) for iteration in range(self.metadata["max_iterations"]): if not any(iterative_mask): self.metadata["is_acceptable"] = False try: del self.metadata["fitted_result"] except KeyError: None return failure try: p_opt, p_cov = op.curve_fit(self.fitting_function, xdata=x[iterative_mask], ydata=y[iterative_mask], sigma=yerr[iterative_mask], p0=p0, absolute_sigma=absolute_sigma) except: logger.exception( "Exception raised in fitting atomic transition {0} "\ "on iteration {1}".format(self, iteration)) if iteration == 0: self.metadata["is_acceptable"] = False try: del self.metadata["fitted_result"] except KeyError: None return failure # Look for outliers peaks. # TODO: use continuum or model? O = self.metadata["continuum_order"] continuum = np.ones_like(x) \ if 0 > O else np.polyval(p_opt[-(O + 1):], x) model = self(x, *p_opt) sigmas = (y - model)/np.std(y[iterative_mask]) sigmas[~iterative_mask] = 0 # Ignore points that are already masked outliers = np.where(sigmas < -self.metadata["detection_sigma"])[0] # Look for groups of neighbouring outlier points. separators = np.repeat(1 + np.where(np.diff(outliers) > 1)[0], 2) separators = np.hstack([0, separators, None]).reshape(-1, 2) for start, end in separators: indices = outliers[start:end] # Require a minimum group size by number of pixels. if indices.size < self.metadata["detection_pixels"]: continue # Try and fit an absorption function to the centroid of the # region. lower_group_wl = x[indices[0]] upper_group_wl = x[indices[-1]] def model_nearby_line(x_, *p): # Strict requirements on these parameters, since we don't # need to know them precisely. if not (lower_group_wl <= p[0] <= upper_group_wl) \ or abs(p[1]) > p_opt[1] \ or not (1 >= p[2] > 0): return np.nan * np.ones_like(x_) return model[iterative_mask] * self(x_, *p) # Initial parameters for this line. p0_outlier = [ np.mean(x[indices]), 0.5 * p_opt[1], np.max(continuum[indices] - y[indices]) ] if self.metadata["profile"] == "voigt": p0_outlier.append(p0[3]) try: p_out, cov = op.curve_fit(model_nearby_line, xdata=x[iterative_mask], ydata=y[iterative_mask], sigma=yerr[iterative_mask], p0=p0_outlier, absolute_sigma=absolute_sigma, check_finite=True) except: # Just take a narrow range and exclude that? # TODO: The old SMH just did nothing in this scenario, but # we may want to revise that behaviour. None else: # Exclude +/- 3 sigma of the fitted line l, u = (p_out[0] - 3 * p_out[1], p_out[0] + 3 * p_out[1]) # Store this line and the region masked out by it. nearby_lines.append([p_out, (u, l)]) # Now update the iterative mask to exclude this line. iterative_mask *= ~((u > x) * (x > l)) # Update p0 with the best guess from this iteration. p0 = p_opt.copy() # Finished looking for neighbouring lines # `max_iterations` rounds of removing nearby lines. Now do final fit: p_opt, p_cov = op.curve_fit(self.fitting_function, xdata=x[iterative_mask], ydata=y[iterative_mask], sigma=yerr[iterative_mask], p0=p0, absolute_sigma=absolute_sigma) assert p_cov is not None # Make many draws from the covariance matrix. draws = kwargs.pop("covariance_draws", self.session.setting("covariance_draws",100)) percentiles = kwargs.pop("percentiles", \ self.session.setting("error_percentiles",(16, 84))) if np.all(np.isfinite(p_cov)): p_alt = np.random.multivariate_normal(p_opt, p_cov, size=draws) else: p_alt = np.nan * np.ones((draws, p_opt.size)) # Integrate the profile. profile, _ = self._profiles[self.metadata["profile"]] if profile == _gaussian: ew = abs(p_opt[1] * p_opt[2] * np.sqrt(2 * np.pi)) ew_alt = np.abs(p_alt[:, 1] * p_alt[:, 2] * np.sqrt(2 * np.pi)) ew_uncertainty = np.percentile(ew_alt, percentiles) - ew else: N, integrate_sigma = (len(_), kwargs.pop("integrate_sigma", 10)) l, u = ( p_opt[0] - integrate_sigma * p_opt[1], p_opt[0] + integrate_sigma * p_opt[1] ) ew = np.abs(integrate.quad(profile, l, u, args=tuple(p_opt[:N]))[0]) ew_alt = np.abs([integrate.quad(profile, l, u, args=tuple(_[:N]))[0] \ for _ in p_alt]) ew_uncertainty = np.percentile(ew_alt, percentiles) - ew # Calculate chi-square for the points that we modelled. ivar = spectrum.ivar[mask] if not np.any(np.isfinite(ivar)): ivar = 1 residuals = y - self(x, *p_opt) residuals[~iterative_mask] = np.nan chi_sq = residuals**2 * ivar dof = np.isfinite(chi_sq).sum() - len(p_opt) - 1 chi_sq = np.nansum(chi_sq) model_y = self(x, *p_opt) model_yerr = np.percentile( [self(x, *_) for _ in p_alt], percentiles, axis=0) - model_y model_yerr = np.max(np.abs(model_yerr), axis=0) ### DEBUG PLOT ##fig, ax = plt.subplots() ##ax.plot(x, y, c='k', drawstyle='steps-mid') ## ##O = self.metadata["continuum_order"] ##bg = np.ones_like(x) if 0 > O else np.polyval(p_opt[-(O + 1):], x) ##for p, (u, l) in nearby_lines: ## bg *= self(x, *p) ## ## m = (u >= x) * (x >= l) ## ax.scatter(x[m], y[m], facecolor="r") ## ##ax.plot(x, bg, c='r') ##ax.plot(x, model_y, c='b') ## ##model_err = np.percentile( ## [self(x, *_) for _ in p_alt], percentiles, axis=0) ## ##ax.fill_between(x, model_err[0] + model_y, model_err[1] + model_y, ## edgecolor="None", facecolor="b", alpha=0.5) # Convert x, model_y, etc back to real-spectrum indices. if self.session.setting("show_full_profiles", False): # HACK #152 indices = spectrum.dispersion.searchsorted(x) x = spectrum.dispersion[indices[0]:1 + indices[-1]] y = spectrum.flux[indices[0]:1 + indices[-1]] model_y = self(x, *p_opt) model_yerr = np.percentile( [self(x, *_) for _ in p_alt], percentiles, axis=0) - model_y model_yerr = np.max(np.abs(model_yerr), axis=0) residuals = y - model_y else: x, model_y, model_yerr, residuals = self._fill_masked_arrays( spectrum, x, model_y, model_yerr, residuals) # We ignore the uncertainty in wavelength position because it only # affects the uncertainty in REW at the ~10^-5 level. rew = np.log10(ew/p_opt[0]) rew_uncertainty = np.log10((ew + ew_uncertainty)/p_opt[0]) - rew fitting_metadata = { "equivalent_width": (ew, ew_uncertainty[0], ew_uncertainty[1]), "reduced_equivalent_width": np.hstack([rew, rew_uncertainty]), "data_indices": np.where(mask)[0][iterative_mask], "model_x": x, "model_y": model_y, "model_yerr": model_yerr, "residual": residuals, "nearby_lines": nearby_lines, "chi_sq": chi_sq, "dof": dof } # Update the equivalent width in the transition. # REMOVED: see Issue #38 #self.transitions["equivalent_width"] = ew # Convert p_opt to ordered dictionary named_p_opt = OrderedDict(zip(self.parameter_names, p_opt)) self.metadata["fitted_result"] = (named_p_opt, p_cov, fitting_metadata) # Only mark as acceptable if the model meets the quality constraints. self.is_acceptable = self.meets_quality_constraints_in_parent_session # Used normal fit self.metadata["used_monte_carlo_fit"] = False return self.metadata["fitted_result"] def montecarlo_fit(self, N=100, **kwargs): """ Run a fit N times using spectrum ivar and report the resulting median and scatter """ # Create base spectrum to add noise to spectrum = self._verify_spectrum(None) window = abs(self.metadata["window"]) mask = (spectrum.dispersion >= self.transitions["wavelength"][0] - window) & \ (spectrum.dispersion <= self.transitions["wavelength"][-1]+ window) spectrum = Spectrum1D(spectrum.dispersion[mask], spectrum.flux[mask], spectrum.ivar[mask]) # Get rid of some kwargs orig_covariance_draws = kwargs.pop("covariance_draws", self.session.setting("covariance_draws",100)) # Fit multiple times EQWs = np.zeros(N) + np.nan for i in range(N): spec_i = spectrum.add_noise() try: meta = self.fit(spec_i, covariance_draws=1, **kwargs) except Exception as e: print(e) EQWs[i] = np.nan else: if meta: EQWs[i] = meta[2]["equivalent_width"][0] else: EQWs[i] = np.nan if np.any(np.isnan(EQWs)): logger.debug("{:.1f} {:.1f}: {} EQWs are nan".format(self.species[0], self.wavelength, np.sum(np.isnan(EQWs)))) # Run a fit to reset the plotting part to the original spectrum while True: meta = self.fit(covariance_draws=orig_covariance_draws, **kwargs) if "fitted_result" in self.metadata: break logger.debug("ERROR: default fit did not work! Trying again...") # Update with the montecarlo errors self.metadata["used_monte_carlo_fit"] = True percentiles = kwargs.pop("percentiles", \ self.session.setting("error_percentiles",(16, 84))) ew = np.nanmedian(EQWs) ew_uncertainty = np.nanpercentile(EQWs, percentiles) - ew rew = np.log10(ew/self.wavelength) rew_uncertainty = np.log10((ew + ew_uncertainty)/self.wavelength) - rew self.metadata["fitted_result"][2]["equivalent_width"] = (ew, ew_uncertainty[0], ew_uncertainty[1]) self.metadata["fitted_result"][2]["reduced_equivalent_width"] = np.hstack([rew, rew_uncertainty]) return 1000.*ew, 1000.*np.max(np.abs(ew_uncertainty)) def _find_abund(self, ew_err=None): eqw = self.equivalent_width if eqw is None: return None if ew_err is not None: transitions = LineList.vstack([self.transitions[0], self.transitions[0]]) transitions["equivalent_width"] = [eqw, eqw + ew_err] failure_message = "Failed to compute abundances for {:.1f} {:.3f}, eqws={:.2f}, {:.2f}".format( self.species[0], self.wavelength, eqw, eqw+ew_err) else: transitions = self.transitions.copy() transitions["equivalent_width"] = [eqw] failure_message = "Failed to compute abundances for {:.1f} {:.3f}, eqw={:.2f}".format( self.species[0], self.wavelength, eqw) try: abundances = self.session.rt.abundance_cog( self.session.stellar_photosphere, transitions, twd=self.session.twd) except Exception as e: logger.warn(failure_message) return None else: if len(abundances)==2: return np.abs(abundances[1] - abundances[0]) else: return abundances[0] def find_error(self, sigma=1, ew_err_scale=1.0): """ Find the sigma-th abundance uncertainty corresponding to sigma-th deviation in EQW Scale the equivalent width error by ew_err_scale """ eqw = self.equivalent_width ew_err = self.equivalent_width_uncertainty if eqw is None or ew_err is None: return None ew_err *= ew_err_scale*sigma abund_err = self._find_abund(ew_err) self.metadata["{}_sigma_abundance_error".format(sigma)] = abund_err self.metadata["{}_sigma_eqw_error".format(sigma)] = ew_err return abund_err def propagate_stellar_parameter_error(self): e_Teff, e_logg, e_vt, e_MH = self.session.stellar_parameters_err Teff, logg, vt, MH = self.session.stellar_parameters alpha = self.session.metadata["stellar_parameters"]["alpha"] try: self.session.set_stellar_parameters( Teff, logg, vt, MH, alpha) abund0 = self._find_abund() self.session.set_stellar_parameters( Teff+e_Teff, logg, vt, MH, alpha) abund1 = self._find_abund() self.session.set_stellar_parameters( Teff, logg+e_logg, vt, MH, alpha) abund2 = self._find_abund() self.session.set_stellar_parameters( Teff, logg, vt+e_vt, MH, alpha) abund3 = self._find_abund() self.session.set_stellar_parameters( Teff, logg, vt, MH+e_MH, alpha) abund4 = self._find_abund() dTeff_error = abund1-abund0 dlogg_error = abund2-abund0 dvt_error = abund3-abund0 dMH_error = abund4-abund0 except: self.session.set_stellar_parameters( Teff, logg, vt, MH, alpha) else: self.session.set_stellar_parameters( Teff, logg, vt, MH, alpha) self.metadata["systematic_abundance_error"] = np.sqrt( dTeff_error**2 + dlogg_error**2 + dvt_error**2 + dMH_error**2) self.metadata["systematic_stellar_parameter_abundance_error"] = { "effective_temperature": dTeff_error, "surface_gravity": dlogg_error, "microturbulence": dvt_error, "metallicity": dMH_error } return self.metadata["systematic_abundance_error"] def __call__(self, dispersion, *parameters): """ Generate data at the dispersion points, given the parameters. :param dispersion: An array of dispersion points to calculate the data for. :param parameters: Keyword arguments of the model parameters and their values. """ function, profile_parameters = self._profiles[self.metadata["profile"]] N = len(profile_parameters) y = 1.0 - function(dispersion, *parameters[:N]) # Assume rest of the parameters are continuum coefficients. if parameters[N:]: y *= np.polyval(parameters[N:], dispersion) return y
[ "numpy.nanpercentile", "numpy.abs", "numpy.nanmedian", "numpy.floor", "numpy.ones", "numpy.isnan", "numpy.mean", "numpy.exp", "astropy.constants.c.to", "numpy.polyval", "numpy.std", "numpy.isfinite", "numpy.max", "numpy.log10", "numpy.nansum", "numpy.ones_like", "scipy.optimize.curve_fit", "numpy.hstack", "numpy.percentile", "numpy.log", "numpy.zeros", "numpy.where", "numpy.array", "numpy.random.multivariate_normal", "numpy.diff", "logging.getLogger", "numpy.sqrt" ]
[((632, 659), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (649, 659), False, 'import logging\n'), ((1077, 1126), 'numpy.exp', 'np.exp', (['(-(x - position) ** 2 / (2.0 * sigma ** 2))'], {}), '(-(x - position) ** 2 / (2.0 * sigma ** 2))\n', (1083, 1126), True, 'import numpy as np\n'), ((9834, 9846), 'numpy.array', 'np.array', (['p0'], {}), '(p0)\n', (9842, 9846), True, 'import numpy as np\n'), ((11469, 11490), 'numpy.isfinite', 'np.isfinite', (['(y * yerr)'], {}), '(y * yerr)\n', (11480, 11490), True, 'import numpy as np\n'), ((15913, 16074), 'scipy.optimize.curve_fit', 'op.curve_fit', (['self.fitting_function'], {'xdata': 'x[iterative_mask]', 'ydata': 'y[iterative_mask]', 'sigma': 'yerr[iterative_mask]', 'p0': 'p0', 'absolute_sigma': 'absolute_sigma'}), '(self.fitting_function, xdata=x[iterative_mask], ydata=y[\n iterative_mask], sigma=yerr[iterative_mask], p0=p0, absolute_sigma=\n absolute_sigma)\n', (15925, 16074), True, 'import scipy.optimize as op\n'), ((17805, 17822), 'numpy.nansum', 'np.nansum', (['chi_sq'], {}), '(chi_sq)\n', (17814, 17822), True, 'import numpy as np\n'), ((19697, 19720), 'numpy.log10', 'np.log10', (['(ew / p_opt[0])'], {}), '(ew / p_opt[0])\n', (19705, 19720), True, 'import numpy as np\n'), ((22864, 22882), 'numpy.nanmedian', 'np.nanmedian', (['EQWs'], {}), '(EQWs)\n', (22876, 22882), True, 'import numpy as np\n'), ((22963, 22993), 'numpy.log10', 'np.log10', (['(ew / self.wavelength)'], {}), '(ew / self.wavelength)\n', (22971, 22993), True, 'import numpy as np\n'), ((23251, 23284), 'numpy.hstack', 'np.hstack', (['[rew, rew_uncertainty]'], {}), '([rew, rew_uncertainty])\n', (23260, 23284), True, 'import numpy as np\n'), ((4880, 4911), 'numpy.log10', 'np.log10', (['(eqw / self.wavelength)'], {}), '(eqw / self.wavelength)\n', (4888, 4911), True, 'import numpy as np\n'), ((6994, 7039), 'numpy.floor', 'np.floor', (["(self.transitions['species'][0] * 10)"], {}), "(self.transitions['species'][0] * 10)\n", (7002, 7039), True, 'import numpy as np\n'), ((16441, 16459), 'numpy.isfinite', 'np.isfinite', (['p_cov'], {}), '(p_cov)\n', (16452, 16459), True, 'import numpy as np\n'), ((16482, 16537), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['p_opt', 'p_cov'], {'size': 'draws'}), '(p_opt, p_cov, size=draws)\n', (16511, 16537), True, 'import numpy as np\n'), ((18004, 18022), 'numpy.abs', 'np.abs', (['model_yerr'], {}), '(model_yerr)\n', (18010, 18022), True, 'import numpy as np\n'), ((19745, 19787), 'numpy.log10', 'np.log10', (['((ew + ew_uncertainty) / p_opt[0])'], {}), '((ew + ew_uncertainty) / p_opt[0])\n', (19753, 19787), True, 'import numpy as np\n'), ((19938, 19971), 'numpy.hstack', 'np.hstack', (['[rew, rew_uncertainty]'], {}), '([rew, rew_uncertainty])\n', (19947, 19971), True, 'import numpy as np\n'), ((21666, 21677), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (21674, 21677), True, 'import numpy as np\n'), ((22126, 22140), 'numpy.isnan', 'np.isnan', (['EQWs'], {}), '(EQWs)\n', (22134, 22140), True, 'import numpy as np\n'), ((22908, 22943), 'numpy.nanpercentile', 'np.nanpercentile', (['EQWs', 'percentiles'], {}), '(EQWs, percentiles)\n', (22924, 22943), True, 'import numpy as np\n'), ((23018, 23067), 'numpy.log10', 'np.log10', (['((ew + ew_uncertainty) / self.wavelength)'], {}), '((ew + ew_uncertainty) / self.wavelength)\n', (23026, 23067), True, 'import numpy as np\n'), ((26518, 26596), 'numpy.sqrt', 'np.sqrt', (['(dTeff_error ** 2 + dlogg_error ** 2 + dvt_error ** 2 + dMH_error ** 2)'], {}), '(dTeff_error ** 2 + dlogg_error ** 2 + dvt_error ** 2 + dMH_error ** 2)\n', (26525, 26596), True, 'import numpy as np\n'), ((27583, 27621), 'numpy.polyval', 'np.polyval', (['parameters[N:]', 'dispersion'], {}), '(parameters[N:], dispersion)\n', (27593, 27621), True, 'import numpy as np\n'), ((11154, 11171), 'numpy.isfinite', 'np.isfinite', (['yerr'], {}), '(yerr)\n', (11165, 11171), True, 'import numpy as np\n'), ((11210, 11225), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (11222, 11225), True, 'import numpy as np\n'), ((11336, 11382), 'numpy.exp', 'np.exp', (['(-(x - p0[0]) ** 2 / (4.0 * p0[1] ** 2))'], {}), '(-(x - p0[0]) ** 2 / (4.0 * p0[1] ** 2))\n', (11342, 11382), True, 'import numpy as np\n'), ((11883, 12044), 'scipy.optimize.curve_fit', 'op.curve_fit', (['self.fitting_function'], {'xdata': 'x[iterative_mask]', 'ydata': 'y[iterative_mask]', 'sigma': 'yerr[iterative_mask]', 'p0': 'p0', 'absolute_sigma': 'absolute_sigma'}), '(self.fitting_function, xdata=x[iterative_mask], ydata=y[\n iterative_mask], sigma=yerr[iterative_mask], p0=p0, absolute_sigma=\n absolute_sigma)\n', (11895, 12044), True, 'import scipy.optimize as op\n'), ((12747, 12762), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (12759, 12762), True, 'import numpy as np\n'), ((12795, 12826), 'numpy.polyval', 'np.polyval', (['p_opt[-(O + 1):]', 'x'], {}), '(p_opt[-(O + 1):], x)\n', (12805, 12826), True, 'import numpy as np\n'), ((12914, 12939), 'numpy.std', 'np.std', (['y[iterative_mask]'], {}), '(y[iterative_mask])\n', (12920, 12939), True, 'import numpy as np\n'), ((13043, 13095), 'numpy.where', 'np.where', (["(sigmas < -self.metadata['detection_sigma'])"], {}), "(sigmas < -self.metadata['detection_sigma'])\n", (13051, 13095), True, 'import numpy as np\n'), ((16581, 16609), 'numpy.ones', 'np.ones', (['(draws, p_opt.size)'], {}), '((draws, p_opt.size))\n', (16588, 16609), True, 'import numpy as np\n'), ((16907, 16941), 'numpy.percentile', 'np.percentile', (['ew_alt', 'percentiles'], {}), '(ew_alt, percentiles)\n', (16920, 16941), True, 'import numpy as np\n'), ((17410, 17444), 'numpy.percentile', 'np.percentile', (['ew_alt', 'percentiles'], {}), '(ew_alt, percentiles)\n', (17423, 17444), True, 'import numpy as np\n'), ((17580, 17597), 'numpy.isfinite', 'np.isfinite', (['ivar'], {}), '(ivar)\n', (17591, 17597), True, 'import numpy as np\n'), ((19324, 19342), 'numpy.abs', 'np.abs', (['model_yerr'], {}), '(model_yerr)\n', (19330, 19342), True, 'import numpy as np\n'), ((24423, 24460), 'numpy.abs', 'np.abs', (['(abundances[1] - abundances[0])'], {}), '(abundances[1] - abundances[0])\n', (24429, 24460), True, 'import numpy as np\n'), ((2016, 2027), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2024, 2027), True, 'import numpy as np\n'), ((4630, 4669), 'numpy.abs', 'np.abs', (["result['equivalent_width'][1:3]"], {}), "(result['equivalent_width'][1:3])\n", (4636, 4669), True, 'import numpy as np\n'), ((13265, 13297), 'numpy.hstack', 'np.hstack', (['[0, separators, None]'], {}), '([0, separators, None])\n', (13274, 13297), True, 'import numpy as np\n'), ((14333, 14352), 'numpy.mean', 'np.mean', (['x[indices]'], {}), '(x[indices])\n', (14340, 14352), True, 'import numpy as np\n'), ((14410, 14449), 'numpy.max', 'np.max', (['(continuum[indices] - y[indices])'], {}), '(continuum[indices] - y[indices])\n', (14416, 14449), True, 'import numpy as np\n'), ((14624, 14807), 'scipy.optimize.curve_fit', 'op.curve_fit', (['model_nearby_line'], {'xdata': 'x[iterative_mask]', 'ydata': 'y[iterative_mask]', 'sigma': 'yerr[iterative_mask]', 'p0': 'p0_outlier', 'absolute_sigma': 'absolute_sigma', 'check_finite': '(True)'}), '(model_nearby_line, xdata=x[iterative_mask], ydata=y[\n iterative_mask], sigma=yerr[iterative_mask], p0=p0_outlier,\n absolute_sigma=absolute_sigma, check_finite=True)\n', (14636, 14807), True, 'import scipy.optimize as op\n'), ((16782, 16800), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (16789, 16800), True, 'import numpy as np\n'), ((16858, 16876), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (16865, 16876), True, 'import numpy as np\n'), ((20001, 20015), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (20009, 20015), True, 'import numpy as np\n'), ((23323, 23345), 'numpy.abs', 'np.abs', (['ew_uncertainty'], {}), '(ew_uncertainty)\n', (23329, 23345), True, 'import numpy as np\n'), ((17745, 17764), 'numpy.isfinite', 'np.isfinite', (['chi_sq'], {}), '(chi_sq)\n', (17756, 17764), True, 'import numpy as np\n'), ((22310, 22324), 'numpy.isnan', 'np.isnan', (['EQWs'], {}), '(EQWs)\n', (22318, 22324), True, 'import numpy as np\n'), ((8529, 8554), 'astropy.constants.c.to', 'speed_of_light.to', (['"""km/s"""'], {}), "('km/s')\n", (8546, 8554), True, 'from astropy.constants import c as speed_of_light\n'), ((14127, 14143), 'numpy.ones_like', 'np.ones_like', (['x_'], {}), '(x_)\n', (14139, 14143), True, 'import numpy as np\n'), ((2045, 2056), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (2051, 2056), True, 'import numpy as np\n'), ((2174, 2185), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (2180, 2185), True, 'import numpy as np\n'), ((13210, 13227), 'numpy.diff', 'np.diff', (['outliers'], {}), '(outliers)\n', (13217, 13227), True, 'import numpy as np\n'), ((2114, 2125), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (2120, 2125), True, 'import numpy as np\n')]
import glob import os import numpy as np import pickle from sklearn.model_selection import train_test_split #import importlib #import logisRegresANA def main(): np.random.seed(1) # shuffle random seed generator # Ising model parameters L=40 # linear system size J=-1.0 # Ising interaction T=np.linspace(0.25,4.0,16) # set of temperatures T_c=2.26 # Onsager critical temperature in the TD limit ##### prepare training and test data sets ###### define ML parameters num_classes=2 train_to_test_ratio=0.5 # training samples # path to data directory path_to_data=os.path.expanduser('.')+'/data/' # load data file_name = "Ising2DFM_reSample_L40_T=All.pkl" # this file contains 16*10000 samples taken in T=np.arange(0.25,4.0001,0.25) file = open(path_to_data+file_name,'rb') data = pickle.load(file) # pickle reads the file and returns the Python object (1D array, compressed bits) data = np.unpackbits(data).reshape(-1, 1600) # Decompress array and reshape for convenience #type(data) #data[np.where(data==0)]=-1 # map 0 state to -1 (Ising variable can take values +/-1) file_name = "Ising2DFM_reSample_L40_T=All_labels.pkl" # this file contains 16*10000 samples taken in T=np.arange(0.25,4.0001,0.25) labels = pickle.load(open(path_to_data+file_name,'rb')) # pickle reads the file and returns the Python object (here just a 1D array with the binary labels) # divide data into ordered, critical and disordered X_ordered=data[:70000,:] Y_ordered=labels[:70000] X_critical=data[70000:100000,:] Y_critical=labels[70000:100000] X_disordered=data[100000:,:] Y_disordered=labels[100000:] X_ordered[np.where(X_ordered==0)]=-1 # map 0 state to -1 (Ising variable can take values +/-1) X_critical[np.where(X_critical==0)]=-1 # map 0 state to -1 (Ising variable can take values +/-1) X_disordered[np.where(X_disordered==0)]=-1 # map 0 state to -1 (Ising variable can take values +/-1) del data,labels # define training and test data sets X=np.concatenate((X_ordered,X_disordered)) Y=np.concatenate((Y_ordered,Y_disordered)) # pick random data points from ordered and disordered states # to create the training and test sets test_size = 1. - train_to_test_ratio X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=test_size) #train_size=train_to_test_ratio) # full data set X=np.concatenate((X_critical,X)) Y=np.concatenate((Y_critical,Y)) print('X_train shape:', X_train.shape) print('Y_train shape:', Y_train.shape) print() print(X_train.shape[0], 'train samples') print(X_critical.shape[0], 'critical samples') print(X_test.shape[0], 'test samples') file = open("inputNN", 'wb') data = {'X_train': X_train, 'Y_train': Y_train, 'X_test': X_test, 'Y_test': Y_test} pickle.dump(data, file) file.close() """ file = open("inputNN", 'rb') data = picle.load(file) file.close() X_train = data['X_train'] Y_train = data['Y_train'] X_test = data['X_test'] Y_test = data['Y_test'] importlib.reload(logisRegresANA) in_layer = X_train.shape[1] #number of neurons in the input layer if (len(Y_train.shape)==1): out_layer = 1 #number of neurons in the output layer else: out_layer = Y_train.shape[1] biasesnn, weightsnn= logisRegresANA.neuralnetwork([in_layer, 10, out_layer], X_train, Y_train, validation_x=X_test, validation_y=Y_test, verbose=True, epochs= 30, mini_batch_size = 10, lr= 0.5, C='ce') print('biasesnn ', biasesnn) print('weightsnn ', weightsnn) file = open("stateNN", 'w') data = {'biases': biasesnn, 'weights': weightsnn} pickle.dump(data, file) file.close() """ if __name__ == '__main__': main()
[ "pickle.dump", "numpy.random.seed", "sklearn.model_selection.train_test_split", "pickle.load", "numpy.where", "numpy.linspace", "numpy.unpackbits", "os.path.expanduser", "numpy.concatenate" ]
[((167, 184), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (181, 184), True, 'import numpy as np\n'), ((306, 332), 'numpy.linspace', 'np.linspace', (['(0.25)', '(4.0)', '(16)'], {}), '(0.25, 4.0, 16)\n', (317, 332), True, 'import numpy as np\n'), ((815, 832), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (826, 832), False, 'import pickle\n'), ((2003, 2044), 'numpy.concatenate', 'np.concatenate', (['(X_ordered, X_disordered)'], {}), '((X_ordered, X_disordered))\n', (2017, 2044), True, 'import numpy as np\n'), ((2048, 2089), 'numpy.concatenate', 'np.concatenate', (['(Y_ordered, Y_disordered)'], {}), '((Y_ordered, Y_disordered))\n', (2062, 2089), True, 'import numpy as np\n'), ((2265, 2308), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': 'test_size'}), '(X, Y, test_size=test_size)\n', (2281, 2308), False, 'from sklearn.model_selection import train_test_split\n'), ((2363, 2394), 'numpy.concatenate', 'np.concatenate', (['(X_critical, X)'], {}), '((X_critical, X))\n', (2377, 2394), True, 'import numpy as np\n'), ((2398, 2429), 'numpy.concatenate', 'np.concatenate', (['(Y_critical, Y)'], {}), '((Y_critical, Y))\n', (2412, 2429), True, 'import numpy as np\n'), ((2775, 2798), 'pickle.dump', 'pickle.dump', (['data', 'file'], {}), '(data, file)\n', (2786, 2798), False, 'import pickle\n'), ((589, 612), 'os.path.expanduser', 'os.path.expanduser', (['"""."""'], {}), "('.')\n", (607, 612), False, 'import os\n'), ((1654, 1678), 'numpy.where', 'np.where', (['(X_ordered == 0)'], {}), '(X_ordered == 0)\n', (1662, 1678), True, 'import numpy as np\n'), ((1752, 1777), 'numpy.where', 'np.where', (['(X_critical == 0)'], {}), '(X_critical == 0)\n', (1760, 1777), True, 'import numpy as np\n'), ((1853, 1880), 'numpy.where', 'np.where', (['(X_disordered == 0)'], {}), '(X_disordered == 0)\n', (1861, 1880), True, 'import numpy as np\n'), ((924, 943), 'numpy.unpackbits', 'np.unpackbits', (['data'], {}), '(data)\n', (937, 943), True, 'import numpy as np\n')]
import numpy as np import pytest from unittest import TestCase from unittest.mock import MagicMock from mvc.controllers.eval import EvalController from tests.test_utils import DummyNetwork, DummyMetrics from tests.test_utils import make_input, make_output class TestEvalController: def setup_method(self): self.network = DummyNetwork() self.metrics = DummyMetrics() self.metrics.has = MagicMock(return_value=True) self.controller = EvalController(self.network, self.metrics, 10) @pytest.mark.parametrize("batch", [True, False]) def test_step(self, batch): output = make_output() self.network.infer = MagicMock(return_value=output) self.metrics.get = MagicMock(return_value=0) if batch: inpt = list(make_input(batch_size=4, batch=True)) inpt[2] = np.zeros((4,)) else: inpt = list(make_input()) inpt[2] = 0.0 step_output = self.controller.step(*inpt) assert step_output is output.action assert self.network.infer.call_count == 1 if batch: assert self.metrics.get.call_count == 4 else: assert self.metrics.get.call_count == 0 @pytest.mark.parametrize("batch", [True, False]) def test_step_with_done(self, batch): output = make_output() self.network.infer = MagicMock(return_value=output) self.metrics.add = MagicMock() self.metrics.get = MagicMock(return_value=1) reward = np.random.random() if batch: inpt = list(make_input(batch_size=4, batch=True)) index = np.random.randint(4) inpt[2] = np.zeros((4,)) inpt[2][index] = 1.0 inpt[3][index]['reward'] = reward else: inpt = list(make_input()) inpt[2] = 1.0 inpt[3]['reward'] = reward self.controller.step(*inpt) if batch: assert self.metrics.add.call_count == 2 assert list(self.metrics.add.mock_calls[1])[1] == ('eval_reward', reward) assert list(self.metrics.add.mock_calls[0])[1] == ('eval_episode', 1) else: self.metrics.add.assert_not_called() def test_step_with_eval_episode_over_limit(self): output = make_output() self.network.infer = MagicMock(return_value=output) self.metrics.add = MagicMock(side_effect=Exception) self.metrics.get = MagicMock(return_value=10) inpt = list(make_input(batch_size=4, batch=True)) index = np.random.randint(4) reward = np.random.random() inpt[2] = np.zeros((4,)) inpt[2][index] = 1.0 inpt[3][index]['reward'] = reward self.controller.step(*inpt) def test_stop_episode(self): self.metrics.add = MagicMock() obs, reward, _, info = make_input() self.controller.stop_episode(obs, reward, info) assert self.metrics.add.call_count == 2 assert list(self.metrics.add.mock_calls[0])[1] == ('eval_reward', info['reward']) assert list(self.metrics.add.mock_calls[1])[1] == ('eval_episode', 1) def test_should_update(self): assert not self.controller.should_update() def test_update(self): with pytest.raises(Exception): self.controller.update() def test_should_log(self): assert not self.controller.should_log() def test_log(self): with pytest.raises(Exception): self.controller.log() def test_is_finished(self): self.metrics.get = MagicMock(return_value=5) self.metrics.reset = MagicMock() self.metrics.log_metric = MagicMock() assert not self.controller.is_finished() self.metrics.reset.assert_not_called() self.metrics.log_metric.assert_not_called() self.metrics.get = MagicMock(return_value=10) assert self.controller.is_finished() assert list(self.metrics.reset.mock_calls[0])[1] == ('eval_episode',) assert list(self.metrics.reset.mock_calls[1])[1] == ('eval_reward',) self.metrics.log_metric.assert_called_once_with('eval_reward', 10) def test_should_save(self): assert not self.controller.should_save() def test_save(self): self.metrics.save_model = MagicMock() self.controller.save() self.metrics.save_model.assert_not_called() def test_should_eval(self): assert not self.controller.should_eval()
[ "mvc.controllers.eval.EvalController", "tests.test_utils.make_output", "unittest.mock.MagicMock", "tests.test_utils.DummyMetrics", "numpy.zeros", "tests.test_utils.make_input", "tests.test_utils.DummyNetwork", "pytest.raises", "numpy.random.random", "numpy.random.randint", "pytest.mark.parametrize" ]
[((524, 571), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""batch"""', '[True, False]'], {}), "('batch', [True, False])\n", (547, 571), False, 'import pytest\n'), ((1231, 1278), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""batch"""', '[True, False]'], {}), "('batch', [True, False])\n", (1254, 1278), False, 'import pytest\n'), ((336, 350), 'tests.test_utils.DummyNetwork', 'DummyNetwork', ([], {}), '()\n', (348, 350), False, 'from tests.test_utils import DummyNetwork, DummyMetrics\n'), ((374, 388), 'tests.test_utils.DummyMetrics', 'DummyMetrics', ([], {}), '()\n', (386, 388), False, 'from tests.test_utils import DummyNetwork, DummyMetrics\n'), ((416, 444), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (425, 444), False, 'from unittest.mock import MagicMock\n'), ((471, 517), 'mvc.controllers.eval.EvalController', 'EvalController', (['self.network', 'self.metrics', '(10)'], {}), '(self.network, self.metrics, 10)\n', (485, 517), False, 'from mvc.controllers.eval import EvalController\n'), ((621, 634), 'tests.test_utils.make_output', 'make_output', ([], {}), '()\n', (632, 634), False, 'from tests.test_utils import make_input, make_output\n'), ((664, 694), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'output'}), '(return_value=output)\n', (673, 694), False, 'from unittest.mock import MagicMock\n'), ((722, 747), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '(0)'}), '(return_value=0)\n', (731, 747), False, 'from unittest.mock import MagicMock\n'), ((1338, 1351), 'tests.test_utils.make_output', 'make_output', ([], {}), '()\n', (1349, 1351), False, 'from tests.test_utils import make_input, make_output\n'), ((1381, 1411), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'output'}), '(return_value=output)\n', (1390, 1411), False, 'from unittest.mock import MagicMock\n'), ((1439, 1450), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (1448, 1450), False, 'from unittest.mock import MagicMock\n'), ((1478, 1503), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '(1)'}), '(return_value=1)\n', (1487, 1503), False, 'from unittest.mock import MagicMock\n'), ((1522, 1540), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1538, 1540), True, 'import numpy as np\n'), ((2305, 2318), 'tests.test_utils.make_output', 'make_output', ([], {}), '()\n', (2316, 2318), False, 'from tests.test_utils import make_input, make_output\n'), ((2348, 2378), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'output'}), '(return_value=output)\n', (2357, 2378), False, 'from unittest.mock import MagicMock\n'), ((2406, 2438), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'Exception'}), '(side_effect=Exception)\n', (2415, 2438), False, 'from unittest.mock import MagicMock\n'), ((2466, 2492), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '(10)'}), '(return_value=10)\n', (2475, 2492), False, 'from unittest.mock import MagicMock\n'), ((2568, 2588), 'numpy.random.randint', 'np.random.randint', (['(4)'], {}), '(4)\n', (2585, 2588), True, 'import numpy as np\n'), ((2606, 2624), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2622, 2624), True, 'import numpy as np\n'), ((2643, 2657), 'numpy.zeros', 'np.zeros', (['(4,)'], {}), '((4,))\n', (2651, 2657), True, 'import numpy as np\n'), ((2826, 2837), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (2835, 2837), False, 'from unittest.mock import MagicMock\n'), ((2870, 2882), 'tests.test_utils.make_input', 'make_input', ([], {}), '()\n', (2880, 2882), False, 'from tests.test_utils import make_input, make_output\n'), ((3584, 3609), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '(5)'}), '(return_value=5)\n', (3593, 3609), False, 'from unittest.mock import MagicMock\n'), ((3639, 3650), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (3648, 3650), False, 'from unittest.mock import MagicMock\n'), ((3685, 3696), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (3694, 3696), False, 'from unittest.mock import MagicMock\n'), ((3874, 3900), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '(10)'}), '(return_value=10)\n', (3883, 3900), False, 'from unittest.mock import MagicMock\n'), ((4318, 4329), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (4327, 4329), False, 'from unittest.mock import MagicMock\n'), ((851, 865), 'numpy.zeros', 'np.zeros', (['(4,)'], {}), '((4,))\n', (859, 865), True, 'import numpy as np\n'), ((1641, 1661), 'numpy.random.randint', 'np.random.randint', (['(4)'], {}), '(4)\n', (1658, 1661), True, 'import numpy as np\n'), ((1684, 1698), 'numpy.zeros', 'np.zeros', (['(4,)'], {}), '((4,))\n', (1692, 1698), True, 'import numpy as np\n'), ((2514, 2550), 'tests.test_utils.make_input', 'make_input', ([], {'batch_size': '(4)', 'batch': '(True)'}), '(batch_size=4, batch=True)\n', (2524, 2550), False, 'from tests.test_utils import make_input, make_output\n'), ((3283, 3307), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3296, 3307), False, 'import pytest\n'), ((3464, 3488), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3477, 3488), False, 'import pytest\n'), ((791, 827), 'tests.test_utils.make_input', 'make_input', ([], {'batch_size': '(4)', 'batch': '(True)'}), '(batch_size=4, batch=True)\n', (801, 827), False, 'from tests.test_utils import make_input, make_output\n'), ((904, 916), 'tests.test_utils.make_input', 'make_input', ([], {}), '()\n', (914, 916), False, 'from tests.test_utils import make_input, make_output\n'), ((1583, 1619), 'tests.test_utils.make_input', 'make_input', ([], {'batch_size': '(4)', 'batch': '(True)'}), '(batch_size=4, batch=True)\n', (1593, 1619), False, 'from tests.test_utils import make_input, make_output\n'), ((1816, 1828), 'tests.test_utils.make_input', 'make_input', ([], {}), '()\n', (1826, 1828), False, 'from tests.test_utils import make_input, make_output\n')]
# -*- coding: utf-8 -*- from load_model_from_file import load_model_from_json import os, sys from PIL import Image import numpy as np def load_img(path): """ """ img = np.array(Image.open(path))[:, :, 0:3] img = np.expand_dims(img, axis=0) return img def main(): """ """ home = os.path.join(os.path.abspath('..'), 'data', 'orig') img_path = os.path.join(home, sys.argv[1], sys.argv[2], sys.argv[3]) model = load_model_from_json() model.compile('adam', 'binary_crossentropy') img = load_img(img_path) print("Image shape: {}".format(img)) pred = model.predict(img) print("Prediction: {}".format(pred[0])) if __name__ == '__main__': main()
[ "os.path.abspath", "load_model_from_file.load_model_from_json", "numpy.expand_dims", "PIL.Image.open", "os.path.join" ]
[((235, 262), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (249, 262), True, 'import numpy as np\n'), ((391, 448), 'os.path.join', 'os.path.join', (['home', 'sys.argv[1]', 'sys.argv[2]', 'sys.argv[3]'], {}), '(home, sys.argv[1], sys.argv[2], sys.argv[3])\n', (403, 448), False, 'import os, sys\n'), ((461, 483), 'load_model_from_file.load_model_from_json', 'load_model_from_json', ([], {}), '()\n', (481, 483), False, 'from load_model_from_file import load_model_from_json\n'), ((337, 358), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (352, 358), False, 'import os, sys\n'), ((196, 212), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (206, 212), False, 'from PIL import Image\n')]
# First, and before importing any Enthought packages, set the ETS_TOOLKIT # environment variable to qt4, to tell Traits that we will use Qt. from mayavi.core.ui.api import MayaviScene, MlabSceneModel, SceneEditor from traits.api import HasTraits, Instance, on_trait_change from traitsui.api import View, Item from mayavi import mlab from PyQt4 import QtGui, QtCore import numpy as np import time import os from reshaper import Reshaper import utils os.environ['ETS_TOOLKIT'] = 'qt4' # A QSlider with its own ID, used to determine which PC it corresponds to # Customized signal. Agment original valueChanged(int) with sliderID, and # the min, max values of the slider class IndexedQSlider(QtGui.QSlider): valueChangeForwarded = QtCore.pyqtSignal(int, int, int, int) def __init__(self, sliderID, orientation, parent=None): QtGui.QSlider.__init__(self, orientation, parent) self.sliderID = sliderID self.connect(self, QtCore.SIGNAL('valueChanged(int)'), self.valueChangeForwarder) ''' Emit coustomized valuechanged sigmal ''' def valueChangeForwarder(self, val): self.valueChangeForwarded.emit( self.sliderID, val, self.minimum(), self.maximum()) class myAction(QtGui.QAction): myact = QtCore.pyqtSignal(int) def __init__(self, _id, *args): QtGui.QAction.__init__(self, *args) self._id = _id self.connect(self, QtCore.SIGNAL("triggered()"), self.emitSelect) def emitSelect(self): self.myact.emit(self._id) class Visualization(HasTraits): scene = Instance(MlabSceneModel, ()) @on_trait_change('scene.activated') def update_plot(self, v, f): mlab.clf() if not isinstance(v, str): mlab.triangular_mesh(v[:, 0], v[:, 1], v[:, 2], f) # the layout of the dialog screated view = View(Item('scene', editor=SceneEditor(scene_class=MayaviScene), height=200, width=250, show_label=False), resizable=True) # The QWidget for rendering 3D shape class MayaviQWidget(QtGui.QWidget): def __init__(self, parent): QtGui.QWidget.__init__(self, parent) layout = QtGui.QVBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) layout.setSpacing(0) self.visualization = Visualization() # The edit_traits call will generate the widget to embed. self.ui = self.visualization.edit_traits(parent=self, kind='subpanel').control layout.addWidget(self.ui) self.ui.setParent(self) # models for shape representing self.bodies = {"female": Reshaper(label="female"), "male":Reshaper(label="male")} self.body = self.bodies["female"] self.flag_ = 0 self.vertices = self.body.mean_vertex self.normals = self.body.normals self.facets = self.body.facets self.input_data = np.zeros((utils.M_NUM, 1)) self.update() def update(self): [self.vertices, self.normals, self.facets] = \ self.body.mapping(self.input_data, self.flag_) self.vertices = self.vertices.astype('float32') self.visualization.update_plot(self.vertices, self.facets) def select_mode(self, label="female", flag=0): self.body = self.bodies[label] self.flag_ = flag self.update() def sliderForwardedValueChangeHandler(self, sliderID, val, minVal, maxVal): x = val / 10.0 self.input_data[sliderID] = x start = time.time() self.update() print(' [**] update body in %f s' % (time.time() - start)) def save(self): utils.save_obj("result.obj", self.vertices, self.facets+1) output = np.array(utils.calc_measure(self.body.cp, self.vertices, self.facets)) for i in range(0, utils.M_NUM): print("%s: %f" % (utils.M_STR[i], output[i, 0])) def predict(self, data): mask = np.zeros((utils.M_NUM, 1), dtype=bool) for i in range(0, data.shape[0]): if data[i, 0] != 0: data[i, 0] -= self.body.mean_measure[i, 0] data[i, 0] /= self.body.std_measure[i, 0] mask[i, 0] = 1 self.input_data = self.body.get_predict(mask, data) self.update() measure = self.body.mean_measure + self.input_data*self.body.std_measure return [self.input_data, measure]
[ "traits.api.Instance", "PyQt4.QtGui.QAction.__init__", "mayavi.mlab.triangular_mesh", "traits.api.on_trait_change", "utils.save_obj", "mayavi.mlab.clf", "PyQt4.QtGui.QVBoxLayout", "numpy.zeros", "time.time", "PyQt4.QtGui.QSlider.__init__", "reshaper.Reshaper", "mayavi.core.ui.api.SceneEditor", "utils.calc_measure", "PyQt4.QtGui.QWidget.__init__", "PyQt4.QtCore.pyqtSignal", "PyQt4.QtCore.SIGNAL" ]
[((732, 769), 'PyQt4.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['int', 'int', 'int', 'int'], {}), '(int, int, int, int)\n', (749, 769), False, 'from PyQt4 import QtGui, QtCore\n'), ((1226, 1248), 'PyQt4.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['int'], {}), '(int)\n', (1243, 1248), False, 'from PyQt4 import QtGui, QtCore\n'), ((1510, 1538), 'traits.api.Instance', 'Instance', (['MlabSceneModel', '()'], {}), '(MlabSceneModel, ())\n', (1518, 1538), False, 'from traits.api import HasTraits, Instance, on_trait_change\n'), ((1542, 1576), 'traits.api.on_trait_change', 'on_trait_change', (['"""scene.activated"""'], {}), "('scene.activated')\n", (1557, 1576), False, 'from traits.api import HasTraits, Instance, on_trait_change\n'), ((832, 881), 'PyQt4.QtGui.QSlider.__init__', 'QtGui.QSlider.__init__', (['self', 'orientation', 'parent'], {}), '(self, orientation, parent)\n', (854, 881), False, 'from PyQt4 import QtGui, QtCore\n'), ((1287, 1322), 'PyQt4.QtGui.QAction.__init__', 'QtGui.QAction.__init__', (['self', '*args'], {}), '(self, *args)\n', (1309, 1322), False, 'from PyQt4 import QtGui, QtCore\n'), ((1612, 1622), 'mayavi.mlab.clf', 'mlab.clf', ([], {}), '()\n', (1620, 1622), False, 'from mayavi import mlab\n'), ((1992, 2028), 'PyQt4.QtGui.QWidget.__init__', 'QtGui.QWidget.__init__', (['self', 'parent'], {}), '(self, parent)\n', (2014, 2028), False, 'from PyQt4 import QtGui, QtCore\n'), ((2042, 2065), 'PyQt4.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', (['self'], {}), '(self)\n', (2059, 2065), False, 'from PyQt4 import QtGui, QtCore\n'), ((2695, 2721), 'numpy.zeros', 'np.zeros', (['(utils.M_NUM, 1)'], {}), '((utils.M_NUM, 1))\n', (2703, 2721), True, 'import numpy as np\n'), ((3251, 3262), 'time.time', 'time.time', ([], {}), '()\n', (3260, 3262), False, 'import time\n'), ((3367, 3427), 'utils.save_obj', 'utils.save_obj', (['"""result.obj"""', 'self.vertices', '(self.facets + 1)'], {}), "('result.obj', self.vertices, self.facets + 1)\n", (3381, 3427), False, 'import utils\n'), ((3640, 3678), 'numpy.zeros', 'np.zeros', (['(utils.M_NUM, 1)'], {'dtype': 'bool'}), '((utils.M_NUM, 1), dtype=bool)\n', (3648, 3678), True, 'import numpy as np\n'), ((934, 968), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""valueChanged(int)"""'], {}), "('valueChanged(int)')\n", (947, 968), False, 'from PyQt4 import QtGui, QtCore\n'), ((1365, 1393), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""triggered()"""'], {}), "('triggered()')\n", (1378, 1393), False, 'from PyQt4 import QtGui, QtCore\n'), ((1660, 1710), 'mayavi.mlab.triangular_mesh', 'mlab.triangular_mesh', (['v[:, 0]', 'v[:, 1]', 'v[:, 2]', 'f'], {}), '(v[:, 0], v[:, 1], v[:, 2], f)\n', (1680, 1710), False, 'from mayavi import mlab\n'), ((2444, 2468), 'reshaper.Reshaper', 'Reshaper', ([], {'label': '"""female"""'}), "(label='female')\n", (2452, 2468), False, 'from reshaper import Reshaper\n'), ((2477, 2499), 'reshaper.Reshaper', 'Reshaper', ([], {'label': '"""male"""'}), "(label='male')\n", (2485, 2499), False, 'from reshaper import Reshaper\n'), ((3448, 3508), 'utils.calc_measure', 'utils.calc_measure', (['self.body.cp', 'self.vertices', 'self.facets'], {}), '(self.body.cp, self.vertices, self.facets)\n', (3466, 3508), False, 'import utils\n'), ((1784, 1820), 'mayavi.core.ui.api.SceneEditor', 'SceneEditor', ([], {'scene_class': 'MayaviScene'}), '(scene_class=MayaviScene)\n', (1795, 1820), False, 'from mayavi.core.ui.api import MayaviScene, MlabSceneModel, SceneEditor\n'), ((3322, 3333), 'time.time', 'time.time', ([], {}), '()\n', (3331, 3333), False, 'import time\n')]
# coding: utf-8 """ TensorFlow tests. """ import os import cmsml from cmsml.util import tmp_file, tmp_dir from . import CMSMLTestCase class TensorFlowTestCase(CMSMLTestCase): def __init__(self, *args, **kwargs): super(TensorFlowTestCase, self).__init__(*args, **kwargs) os.environ["CUDA_VISIBLE_DEVICES"] = "-1" self._tf = None self._tf1 = None self._tf_version = None self._W = None self._b = None @property def tf(self): if self._tf is None: self._tf, self._tf1, self._tf_version = cmsml.tensorflow.import_tf() return self._tf @property def tf1(self): if self._tf1 is None: self._tf, self._tf1, self._tf_version = cmsml.tensorflow.import_tf() return self._tf1 @property def tf_version(self): if self._tf_version is None: self._tf, self._tf1, self._tf_version = cmsml.tensorflow.import_tf() return self._tf_version @property def W(self): if self._W is None: self._W = self.tf.Variable(self.tf.ones([10, 1])) return self._W @property def b(self): if self._b is None: self._b = self.tf.Variable(self.tf.ones([1])) return self._b def create_keras_model(self, tf): model = tf.keras.Sequential() model.add(tf.keras.layers.InputLayer(input_shape=(10,), dtype=tf.float32, name="input")) model.add(tf.keras.layers.BatchNormalization(axis=1, renorm=True)) model.add(tf.keras.layers.Dense(100, activation="tanh")) model.add(tf.keras.layers.BatchNormalization(axis=1, renorm=True)) model.add(tf.keras.layers.Dense(3, activation="softmax", name="output")) return model def create_tf1_session(self, graph): tf = self.tf1 if tf is None: return None return tf.Session(graph=graph, config=tf.ConfigProto( intra_op_parallelism_threads=1, inter_op_parallelism_threads=1, device_count={"GPU": 0}, )) def create_tf1_graph(self, create_session=True): tf = self.tf1 if tf is None: return None, None graph = tf.Graph() with graph.as_default(): x_ = tf.placeholder(tf.float32, [None, 10], name="input") W = tf.Variable(tf.ones([10, 1])) b = tf.Variable(tf.ones([1])) h = tf.add(tf.matmul(x_, W), b) y = tf.tanh(h, name="output") # noqa if not create_session: return graph session = self.create_tf1_session(graph) with graph.as_default(): session.run(tf.global_variables_initializer()) return graph, session def create_tf_function(self, frozen=False, no_input=False, concrete=False): tf = self.tf if frozen: # polymorphic function, frozen input signature @tf.function(input_signature=(tf.TensorSpec(shape=[2, 10], dtype=tf.float32),)) def func(x): h = tf.add(tf.matmul(x, self.W), self.b) y = tf.tanh(h, name="output") return y elif no_input: # polymorphic function, empty input signature @tf.function def func(): x = tf.ones([2, 10]) h = tf.add(tf.matmul(x, self.W), self.b) y = tf.tanh(h, name="output") return y else: # polymorphic function, unknown input signature @tf.function def func(x): h = tf.add(tf.matmul(x, self.W), self.b) y = tf.tanh(h, name="output") return y if concrete: # convert to concrete function with known signature func = func.get_concrete_function(tf.TensorSpec(shape=[2, 10], dtype=tf.float32)) return func def test_import_tf(self): tf, tf1, tf_version = cmsml.tensorflow.import_tf() self.assertEqual(len(tf_version), 3) if tf_version[0] == "1": self.assertEqual(tf, tf1) def test_save_graph(self): graph, session = self.create_tf1_graph() if graph is None or session is None: return with tmp_file(suffix=".pb") as path: cmsml.tensorflow.save_graph(path, graph, variables_to_constants=False) self.assertTrue(os.path.exists(path)) with tmp_file(suffix=".pb.txt") as path: cmsml.tensorflow.save_graph(path, graph, variables_to_constants=False) self.assertTrue(os.path.exists(path)) with tmp_file(suffix=".pb") as path: cmsml.tensorflow.save_graph(path, graph.as_graph_def(), variables_to_constants=False) self.assertTrue(os.path.exists(path)) with tmp_file(suffix=".pb") as path: cmsml.tensorflow.save_graph(path, session, variables_to_constants=False) self.assertTrue(os.path.exists(path)) with tmp_file(suffix=".pb") as path: cmsml.tensorflow.save_graph(path, session, variables_to_constants=True, output_names=["output"]) self.assertTrue(os.path.exists(path)) with tmp_file(suffix=".pb") as path: with self.assertRaises(ValueError): cmsml.tensorflow.save_graph(path, session, variables_to_constants=True) self.assertFalse(os.path.exists(path)) def test_save_polymorphic_function_error(self): poly_func = self.create_tf_function() with self.assertRaises(ValueError): with tmp_file(suffix=".pb") as path: cmsml.tensorflow.save_graph(path, poly_func, variables_to_constants=False) with self.assertRaises(ValueError): with tmp_file(suffix=".pb") as path: cmsml.tensorflow.save_graph(path, poly_func, variables_to_constants=True) def test_save_empty_polymorphic_function(self): empty_poly_func = self.create_tf_function(no_input=True) with tmp_file(suffix=".pb") as path: cmsml.tensorflow.save_graph(path, empty_poly_func, variables_to_constants=False) self.assertTrue(os.path.exists(path)) with tmp_file(suffix=".pb") as path: cmsml.tensorflow.save_graph(path, empty_poly_func, variables_to_constants=True) self.assertTrue(os.path.exists(path)) def test_save_frozen_polymorphic_function(self): frozen_poly_func = self.create_tf_function(frozen=True) with tmp_file(suffix=".pb") as path: cmsml.tensorflow.save_graph(path, frozen_poly_func, variables_to_constants=False) self.assertTrue(os.path.exists(path)) with tmp_file(suffix=".pb.txt") as path: cmsml.tensorflow.save_graph(path, frozen_poly_func, variables_to_constants=False) self.assertTrue(os.path.exists(path)) with tmp_file(suffix=".pb") as path: cmsml.tensorflow.save_graph(path, frozen_poly_func, variables_to_constants=True) self.assertTrue(os.path.exists(path)) def test_save_concrete_function(self): concrete_func = self.create_tf_function(concrete=True) with tmp_file(suffix=".pb") as path: cmsml.tensorflow.save_graph(path, concrete_func, variables_to_constants=False) self.assertTrue(os.path.exists(path)) with tmp_file(suffix=".pb.txt") as path: cmsml.tensorflow.save_graph(path, concrete_func, variables_to_constants=False) self.assertTrue(os.path.exists(path)) with tmp_file(suffix=".pb") as path: cmsml.tensorflow.save_graph(path, concrete_func, variables_to_constants=True) self.assertTrue(os.path.exists(path)) def test_save_keras_model_v1(self): model = self.create_keras_model(self.tf1) with tmp_file(suffix=".pb") as path: cmsml.tensorflow.save_graph(path, model, variables_to_constants=False) self.assertTrue(os.path.exists(path)) with tmp_file(suffix=".pb.txt") as path: cmsml.tensorflow.save_graph(path, model, variables_to_constants=False) self.assertTrue(os.path.exists(path)) with tmp_file(suffix=".pb") as path: cmsml.tensorflow.save_graph(path, model, variables_to_constants=True) self.assertTrue(os.path.exists(path)) with tmp_file(suffix=".pb") as path: cmsml.tensorflow.save_graph(path, self.tf1.keras.backend.get_session(), variables_to_constants=False) self.assertTrue(os.path.exists(path)) def test_save_keras_model_v2(self): model = self.create_keras_model(self.tf) with tmp_file(suffix=".pb") as path: cmsml.tensorflow.save_graph(path, model, variables_to_constants=False) self.assertTrue(os.path.exists(path)) with tmp_file(suffix=".pb") as path: cmsml.tensorflow.save_graph(path, model, variables_to_constants=True) self.assertTrue(os.path.exists(path)) def test_load_graph(self): import google.protobuf as pb concrete_func = self.create_tf_function(concrete=True) with tmp_file(suffix=".pb") as path_pb, tmp_file(suffix=".pb.txt") as path_txt: cmsml.tensorflow.save_graph(path_txt, concrete_func, variables_to_constants=True) cmsml.tensorflow.save_graph(path_pb, concrete_func, variables_to_constants=False) self.assertTrue(os.path.exists(path_pb)) self.assertTrue(os.path.exists(path_txt)) graph = cmsml.tensorflow.load_graph(path_txt) self.assertIsInstance(graph, self.tf.Graph) graph = cmsml.tensorflow.load_graph(path_pb) self.assertIsInstance(graph, self.tf.Graph) with self.assertRaises(pb.text_format.ParseError): cmsml.tensorflow.load_graph(path_pb, as_text=True) with self.assertRaises(pb.message.DecodeError): cmsml.tensorflow.load_graph(path_txt, as_text=False) def test_load_graph_and_run(self): import numpy as np tf = self.tf1 if tf is None: return _, session = self.create_tf1_graph() with tmp_file(suffix=".pb.txt") as path: cmsml.tensorflow.save_graph(path, session, variables_to_constants=True, output_names=["output"]) graph = cmsml.tensorflow.load_graph(path) session = self.create_tf1_session(graph) with graph.as_default(): x = graph.get_tensor_by_name("input:0") y = graph.get_tensor_by_name("output:0") out = session.run(y, {x: np.ones((2, 10))}) self.assertEqual(out.shape, (2, 1)) self.assertEqual(tuple(out[..., 0]), (1., 1.)) def test_write_summary(self): concrete_func = self.create_tf_function(concrete=True) with tmp_dir(create=False) as path: cmsml.tensorflow.write_graph_summary(concrete_func.graph, path) self.assertTrue(os.path.exists(path)) self.assertGreater(len(os.listdir(path)), 0) with tmp_file(suffix=".pb") as graph_path: cmsml.tensorflow.save_graph(graph_path, concrete_func) with tmp_dir(create=False) as path: cmsml.tensorflow.write_graph_summary(graph_path, path) self.assertTrue(os.path.exists(path)) self.assertGreater(len(os.listdir(path)), 0) self.assertTrue(os.path.exists(path))
[ "cmsml.tensorflow.load_graph", "os.path.exists", "cmsml.tensorflow.save_graph", "numpy.ones", "cmsml.tensorflow.import_tf", "cmsml.tensorflow.write_graph_summary", "cmsml.util.tmp_file", "cmsml.util.tmp_dir", "os.listdir" ]
[((4007, 4035), 'cmsml.tensorflow.import_tf', 'cmsml.tensorflow.import_tf', ([], {}), '()\n', (4033, 4035), False, 'import cmsml\n'), ((582, 610), 'cmsml.tensorflow.import_tf', 'cmsml.tensorflow.import_tf', ([], {}), '()\n', (608, 610), False, 'import cmsml\n'), ((751, 779), 'cmsml.tensorflow.import_tf', 'cmsml.tensorflow.import_tf', ([], {}), '()\n', (777, 779), False, 'import cmsml\n'), ((935, 963), 'cmsml.tensorflow.import_tf', 'cmsml.tensorflow.import_tf', ([], {}), '()\n', (961, 963), False, 'import cmsml\n'), ((4313, 4335), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb"""'}), "(suffix='.pb')\n", (4321, 4335), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((4357, 4427), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path', 'graph'], {'variables_to_constants': '(False)'}), '(path, graph, variables_to_constants=False)\n', (4384, 4427), False, 'import cmsml\n'), ((4492, 4518), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb.txt"""'}), "(suffix='.pb.txt')\n", (4500, 4518), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((4540, 4610), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path', 'graph'], {'variables_to_constants': '(False)'}), '(path, graph, variables_to_constants=False)\n', (4567, 4610), False, 'import cmsml\n'), ((4675, 4697), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb"""'}), "(suffix='.pb')\n", (4683, 4697), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((4869, 4891), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb"""'}), "(suffix='.pb')\n", (4877, 4891), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((4913, 4985), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path', 'session'], {'variables_to_constants': '(False)'}), '(path, session, variables_to_constants=False)\n', (4940, 4985), False, 'import cmsml\n'), ((5050, 5072), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb"""'}), "(suffix='.pb')\n", (5058, 5072), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((5094, 5194), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path', 'session'], {'variables_to_constants': '(True)', 'output_names': "['output']"}), "(path, session, variables_to_constants=True,\n output_names=['output'])\n", (5121, 5194), False, 'import cmsml\n'), ((5271, 5293), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb"""'}), "(suffix='.pb')\n", (5279, 5293), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((6090, 6112), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb"""'}), "(suffix='.pb')\n", (6098, 6112), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((6134, 6219), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path', 'empty_poly_func'], {'variables_to_constants': '(False)'}), '(path, empty_poly_func, variables_to_constants=False\n )\n', (6161, 6219), False, 'import cmsml\n'), ((6279, 6301), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb"""'}), "(suffix='.pb')\n", (6287, 6301), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((6323, 6402), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path', 'empty_poly_func'], {'variables_to_constants': '(True)'}), '(path, empty_poly_func, variables_to_constants=True)\n', (6350, 6402), False, 'import cmsml\n'), ((6585, 6607), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb"""'}), "(suffix='.pb')\n", (6593, 6607), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((6629, 6715), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path', 'frozen_poly_func'], {'variables_to_constants': '(False)'}), '(path, frozen_poly_func, variables_to_constants=\n False)\n', (6656, 6715), False, 'import cmsml\n'), ((6775, 6801), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb.txt"""'}), "(suffix='.pb.txt')\n", (6783, 6801), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((6823, 6909), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path', 'frozen_poly_func'], {'variables_to_constants': '(False)'}), '(path, frozen_poly_func, variables_to_constants=\n False)\n', (6850, 6909), False, 'import cmsml\n'), ((6969, 6991), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb"""'}), "(suffix='.pb')\n", (6977, 6991), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((7013, 7098), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path', 'frozen_poly_func'], {'variables_to_constants': '(True)'}), '(path, frozen_poly_func, variables_to_constants=True\n )\n', (7040, 7098), False, 'import cmsml\n'), ((7265, 7287), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb"""'}), "(suffix='.pb')\n", (7273, 7287), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((7309, 7387), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path', 'concrete_func'], {'variables_to_constants': '(False)'}), '(path, concrete_func, variables_to_constants=False)\n', (7336, 7387), False, 'import cmsml\n'), ((7452, 7478), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb.txt"""'}), "(suffix='.pb.txt')\n", (7460, 7478), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((7500, 7578), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path', 'concrete_func'], {'variables_to_constants': '(False)'}), '(path, concrete_func, variables_to_constants=False)\n', (7527, 7578), False, 'import cmsml\n'), ((7643, 7665), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb"""'}), "(suffix='.pb')\n", (7651, 7665), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((7687, 7764), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path', 'concrete_func'], {'variables_to_constants': '(True)'}), '(path, concrete_func, variables_to_constants=True)\n', (7714, 7764), False, 'import cmsml\n'), ((7920, 7942), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb"""'}), "(suffix='.pb')\n", (7928, 7942), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((7964, 8034), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path', 'model'], {'variables_to_constants': '(False)'}), '(path, model, variables_to_constants=False)\n', (7991, 8034), False, 'import cmsml\n'), ((8099, 8125), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb.txt"""'}), "(suffix='.pb.txt')\n", (8107, 8125), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((8147, 8217), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path', 'model'], {'variables_to_constants': '(False)'}), '(path, model, variables_to_constants=False)\n', (8174, 8217), False, 'import cmsml\n'), ((8282, 8304), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb"""'}), "(suffix='.pb')\n", (8290, 8304), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((8326, 8395), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path', 'model'], {'variables_to_constants': '(True)'}), '(path, model, variables_to_constants=True)\n', (8353, 8395), False, 'import cmsml\n'), ((8460, 8482), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb"""'}), "(suffix='.pb')\n", (8468, 8482), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((8776, 8798), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb"""'}), "(suffix='.pb')\n", (8784, 8798), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((8820, 8890), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path', 'model'], {'variables_to_constants': '(False)'}), '(path, model, variables_to_constants=False)\n', (8847, 8890), False, 'import cmsml\n'), ((8955, 8977), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb"""'}), "(suffix='.pb')\n", (8963, 8977), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((8999, 9068), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path', 'model'], {'variables_to_constants': '(True)'}), '(path, model, variables_to_constants=True)\n', (9026, 9068), False, 'import cmsml\n'), ((9266, 9288), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb"""'}), "(suffix='.pb')\n", (9274, 9288), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((9301, 9327), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb.txt"""'}), "(suffix='.pb.txt')\n", (9309, 9327), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((9353, 9439), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path_txt', 'concrete_func'], {'variables_to_constants': '(True)'}), '(path_txt, concrete_func, variables_to_constants\n =True)\n', (9380, 9439), False, 'import cmsml\n'), ((9447, 9533), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path_pb', 'concrete_func'], {'variables_to_constants': '(False)'}), '(path_pb, concrete_func, variables_to_constants=\n False)\n', (9474, 9533), False, 'import cmsml\n'), ((9658, 9695), 'cmsml.tensorflow.load_graph', 'cmsml.tensorflow.load_graph', (['path_txt'], {}), '(path_txt)\n', (9685, 9695), False, 'import cmsml\n'), ((9773, 9809), 'cmsml.tensorflow.load_graph', 'cmsml.tensorflow.load_graph', (['path_pb'], {}), '(path_pb)\n', (9800, 9809), False, 'import cmsml\n'), ((10317, 10343), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb.txt"""'}), "(suffix='.pb.txt')\n", (10325, 10343), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((10365, 10465), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path', 'session'], {'variables_to_constants': '(True)', 'output_names': "['output']"}), "(path, session, variables_to_constants=True,\n output_names=['output'])\n", (10392, 10465), False, 'import cmsml\n'), ((10498, 10531), 'cmsml.tensorflow.load_graph', 'cmsml.tensorflow.load_graph', (['path'], {}), '(path)\n', (10525, 10531), False, 'import cmsml\n'), ((10988, 11009), 'cmsml.util.tmp_dir', 'tmp_dir', ([], {'create': '(False)'}), '(create=False)\n', (10995, 11009), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((11031, 11094), 'cmsml.tensorflow.write_graph_summary', 'cmsml.tensorflow.write_graph_summary', (['concrete_func.graph', 'path'], {}), '(concrete_func.graph, path)\n', (11067, 11094), False, 'import cmsml\n'), ((11216, 11238), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb"""'}), "(suffix='.pb')\n", (11224, 11238), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((11266, 11320), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['graph_path', 'concrete_func'], {}), '(graph_path, concrete_func)\n', (11293, 11320), False, 'import cmsml\n'), ((4456, 4476), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4470, 4476), False, 'import os\n'), ((4639, 4659), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4653, 4659), False, 'import os\n'), ((4833, 4853), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4847, 4853), False, 'import os\n'), ((5014, 5034), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (5028, 5034), False, 'import os\n'), ((5235, 5255), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (5249, 5255), False, 'import os\n'), ((5367, 5438), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path', 'session'], {'variables_to_constants': '(True)'}), '(path, session, variables_to_constants=True)\n', (5394, 5438), False, 'import cmsml\n'), ((5468, 5488), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (5482, 5488), False, 'import os\n'), ((5651, 5673), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb"""'}), "(suffix='.pb')\n", (5659, 5673), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((5699, 5773), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path', 'poly_func'], {'variables_to_constants': '(False)'}), '(path, poly_func, variables_to_constants=False)\n', (5726, 5773), False, 'import cmsml\n'), ((5836, 5858), 'cmsml.util.tmp_file', 'tmp_file', ([], {'suffix': '""".pb"""'}), "(suffix='.pb')\n", (5844, 5858), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((5884, 5957), 'cmsml.tensorflow.save_graph', 'cmsml.tensorflow.save_graph', (['path', 'poly_func'], {'variables_to_constants': '(True)'}), '(path, poly_func, variables_to_constants=True)\n', (5911, 5957), False, 'import cmsml\n'), ((6243, 6263), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6257, 6263), False, 'import os\n'), ((6431, 6451), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6445, 6451), False, 'import os\n'), ((6739, 6759), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6753, 6759), False, 'import os\n'), ((6933, 6953), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6947, 6953), False, 'import os\n'), ((7122, 7142), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (7136, 7142), False, 'import os\n'), ((7416, 7436), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (7430, 7436), False, 'import os\n'), ((7607, 7627), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (7621, 7627), False, 'import os\n'), ((7793, 7813), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (7807, 7813), False, 'import os\n'), ((8063, 8083), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (8077, 8083), False, 'import os\n'), ((8246, 8266), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (8260, 8266), False, 'import os\n'), ((8424, 8444), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (8438, 8444), False, 'import os\n'), ((8650, 8670), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (8664, 8670), False, 'import os\n'), ((8919, 8939), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (8933, 8939), False, 'import os\n'), ((9097, 9117), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (9111, 9117), False, 'import os\n'), ((9558, 9581), 'os.path.exists', 'os.path.exists', (['path_pb'], {}), '(path_pb)\n', (9572, 9581), False, 'import os\n'), ((9611, 9635), 'os.path.exists', 'os.path.exists', (['path_txt'], {}), '(path_txt)\n', (9625, 9635), False, 'import os\n'), ((9946, 9996), 'cmsml.tensorflow.load_graph', 'cmsml.tensorflow.load_graph', (['path_pb'], {'as_text': '(True)'}), '(path_pb, as_text=True)\n', (9973, 9996), False, 'import cmsml\n'), ((10073, 10125), 'cmsml.tensorflow.load_graph', 'cmsml.tensorflow.load_graph', (['path_txt'], {'as_text': '(False)'}), '(path_txt, as_text=False)\n', (10100, 10125), False, 'import cmsml\n'), ((11123, 11143), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (11137, 11143), False, 'import os\n'), ((11338, 11359), 'cmsml.util.tmp_dir', 'tmp_dir', ([], {'create': '(False)'}), '(create=False)\n', (11345, 11359), False, 'from cmsml.util import tmp_file, tmp_dir\n'), ((11385, 11439), 'cmsml.tensorflow.write_graph_summary', 'cmsml.tensorflow.write_graph_summary', (['graph_path', 'path'], {}), '(graph_path, path)\n', (11421, 11439), False, 'import cmsml\n'), ((10757, 10773), 'numpy.ones', 'np.ones', (['(2, 10)'], {}), '((2, 10))\n', (10764, 10773), True, 'import numpy as np\n'), ((11180, 11196), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (11190, 11196), False, 'import os\n'), ((11472, 11492), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (11486, 11492), False, 'import os\n'), ((11587, 11607), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (11601, 11607), False, 'import os\n'), ((11533, 11549), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (11543, 11549), False, 'import os\n')]
# This script is part of pyroglancer (https://github.com/SridharJagannathan/pyroglancer). # Copyright (C) 2020 <NAME> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. """Module contains functions to handle skeleton data.""" import numpy as np import os from cloudvolume import Skeleton, CloudVolume from cloudvolume.datasource.precomputed.sharding import ShardingSpecification import pandas as pd import pymaid import navis import json def _generate_skeleton(x, min_radius=0): """Generate skeleton (of cloudvolume class) for given neuron. Parameters ---------- x : CatmaidNeuron | TreeNeuron Returns ------- skeleton : Cloud volume skeleton """ # flatten the list of the segments (sub-trees).. nodes_ordered = [n for seg in x.segments for n in seg[::-1]] # arrange the nodes in the order of segments.. this_tn = x.nodes.set_index('node_id').loc[nodes_ordered] # remove the first occurance of duplicated elements (as seglist stuff is repeated for different segments).. this_tn = this_tn[~this_tn.index.duplicated(keep='first')] this_tn['index'] = list(range(1, this_tn.shape[0] + 1)) # treenode to index.. tn2ix = this_tn['index'].to_dict() # set the rootnodes as 0.. this_tn['parent_ix'] = this_tn.parent_id.map(lambda x: tn2ix.get(x, -1)) # get the vertices now.. vertices = np.array(this_tn[['x', 'y', 'z']].values.tolist(), dtype="float32") # get the edges now.. edges = np.array(this_tn[['index', 'parent_ix']].values[1:] - 1, dtype="uint32") skeleton = Skeleton(segid=x.id, vertices=vertices, edges=edges) # set the min_radius min_radius = 0 if not isinstance(min_radius, type(None)): this_tn.loc[this_tn.radius < min_radius, 'radius'] = min_radius skeleton.radius = np.array(this_tn['radius'].values, dtype="float32") # Set Label column to 0 (undefined) this_tn['label'] = 0 # Add end/branch labels this_tn.loc[this_tn.type == 'branch', 'label'] = 5 this_tn.loc[this_tn.type == 'end', 'label'] = 6 # Add soma label if x.soma is not None: this_tn.loc[x.soma, 'label'] = 1 skeleton.vertex_types = this_tn.label return skeleton def to_ngskeletons(x): """Generate skeleton (of cloudvolume class) for given neuron(s). Parameters ---------- x : CatmaidNeuron | CatmaidNeuronList or TreeNeuron | NeuronList neuron or neuronlist of different formats Returns ------- skeldatasource: list contains cloud volume skeletons. skeldatasegidlist: list contains the segids(skid). skelsegnamelist: list contains the names of segments. """ if isinstance(x, pymaid.core.CatmaidNeuron): x = pymaid.core.CatmaidNeuronList(x) elif isinstance(x, navis.core.TreeNeuron): x = navis.core.NeuronList(x) elif (isinstance(x, pymaid.core.CatmaidNeuronList) or isinstance(x, navis.core.NeuronList)): pass else: raise TypeError(f'Expected neuron or neuronlist, got "{type(x)}"') skeldatasegidlist = [] skeldatasource = [] skelsegnamelist = [] for neuronelement in x: skeldata = _generate_skeleton(neuronelement) skeldatasource.append(skeldata) skeldatasegidlist.append(skeldata.id) skelsegnamelist.append(neuronelement.name) skeldatasegidlist = list(map(str, skeldatasegidlist)) skelsegnamelist = list(map(str, skelsegnamelist)) return skeldatasource, skeldatasegidlist, skelsegnamelist def uploadskeletons(skelsource, skelseglist, skelnamelist, path, layer_name): """Upload skeleton (of cloudvolume class) to a local server. Parameters ---------- skeldatasource: list contains cloud volume skeletons. skeldatasegidlist: list contains the segids(skid). skelsegnamelist: list contains the names of segments. path: str local path of the precomputed hosted layer. layer_name: str layer name. Returns ------- cv : CloudVolume object of cloudvolume class """ info = {"@type": "neuroglancer_skeletons", "transform": skelsource[0].transform.flatten(), "vertex_attributes": [{"id": "radius", "data_type": "float32", "num_components": 1}], "scales": "um"} path = 'file://' + path + '/precomputed/' + layer_name cv = CloudVolume(path, info=info) # prepare for info file cv.skeleton.meta.info['@type'] = 'neuroglancer_skeletons' cv.skeleton.meta.info['transform'] = skelsource[0].transform.flatten() cv.skeleton.meta.info['vertex_attributes'] = [ {'id': 'radius', 'data_type': 'float32', 'num_components': 1}] del cv.skeleton.meta.info['sharding'] del cv.skeleton.meta.info['spatial_index'] cv.skeleton.meta.info['segment_properties'] = 'seg_props' cv.skeleton.meta.commit_info() files = [os.path.join(cv.skeleton.meta.skeleton_path, str(skel.id)) for skel in skelsource] for fileidx in range(len(files)): fullfilepath = files[fileidx] fullfilepath = os.path.join(cv.basepath, os.path.basename(path), fullfilepath) uploadskel = Skeleton( vertices=skelsource[fileidx].vertices, edges=skelsource[fileidx].edges) print(fullfilepath) with open(fullfilepath, 'wb') as f: f.write(uploadskel.to_precomputed()) segfilepath = os.path.join(cv.basepath, os.path.basename( path), cv.skeleton.meta.skeleton_path, 'seg_props') if not os.path.exists(segfilepath): os.makedirs(segfilepath) print('creating:', segfilepath) allsegproplist = [] for segid in skelseglist: segpropdict = {} segpropdict['id'] = segid segpropdict['type'] = 'label' segpropdict['values'] = skelnamelist allsegproplist.append(segpropdict) seginfo = {"@type": "neuroglancer_segment_properties", "inline": {"ids": skelseglist, "properties": allsegproplist}} segfile = os.path.join(segfilepath, 'info') with open(segfile, 'w') as segfile: json.dump(seginfo, segfile) return cv def to_precomputedskels(skelsource, path): """Upload skeleton (of cloudvolume class) to a local path. Parameters ---------- skelsource: list contains the cloud volume skeletons. path: str local path of the precomputed hosted layer. """ info = {"@type": "neuroglancer_skeletons", "transform": skelsource[0].transform.flatten(), "vertex_attributes": [{"id": "radius", "data_type": "float32", "num_components": 1}], "scales": "um"} path = 'file://' + path + '/precomputed' cv = CloudVolume(path, info=info) # prepare for info file cv.skeleton.meta.info['@type'] = 'neuroglancer_skeletons' cv.skeleton.meta.info['transform'] = skelsource[0].transform.flatten() cv.skeleton.meta.info['vertex_attributes'] = [ {'id': 'radius', 'data_type': 'float32', 'num_components': 1}] del cv.skeleton.meta.info['sharding'] del cv.skeleton.meta.info['spatial_index'] cv.skeleton.meta.info['segment_properties'] = 'seg_props' cv.skeleton.meta.commit_info() files = [os.path.join(cv.skeleton.meta.skeleton_path, str(skel.id)) for skel in skelsource] for fileidx in range(len(files)): fullfilepath = files[fileidx] fullfilepath = os.path.join(cv.basepath, os.path.basename(path), fullfilepath) uploadskel = Skeleton( vertices=skelsource[fileidx].vertices, edges=skelsource[fileidx].edges) # print(fullfilepath) with open(fullfilepath, 'wb') as f: f.write(uploadskel.to_precomputed()) # delete the info file path, as they will be updated seperately.. info_file = os.path.join(cv.basepath, os.path.basename(path), 'skeletons', 'info') os.remove(info_file) def to_precomputedskelsinfo(skelseglist, skelnamelist, path): """Upload skeleton info to a local path. Parameters ---------- skelseglist: list contains the segids(skid). skelnamelist: list contains the names of skeletons. path: str local path of the precomputed hosted layer. """ info = {"@type": "neuroglancer_skeletons", "transform": [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0], "vertex_attributes": [{"id": "radius", "data_type": "float32", "num_components": 1}], "scales": "um"} path = 'file://' + path + '/precomputed' cv = CloudVolume(path, info=info) # prepare for info file cv.skeleton.meta.info['@type'] = 'neuroglancer_skeletons' cv.skeleton.meta.info['vertex_attributes'] = [ {'id': 'radius', 'data_type': 'float32', 'num_components': 1}] del cv.skeleton.meta.info['sharding'] del cv.skeleton.meta.info['spatial_index'] cv.skeleton.meta.info['segment_properties'] = 'seg_props' cv.skeleton.meta.commit_info() segfilepath = os.path.join(cv.basepath, os.path.basename( path), cv.skeleton.meta.skeleton_path, 'seg_props') if not os.path.exists(segfilepath): os.makedirs(segfilepath) print('creating:', segfilepath) allsegproplist = {} allsegproplist['id'] = 'label' allsegproplist['type'] = 'label' allsegproplist['values'] = skelnamelist seginfo = {"@type": "neuroglancer_segment_properties", "inline": {"ids": skelseglist, "properties": [allsegproplist]}} segfile = os.path.join(segfilepath, 'info') with open(segfile, 'w') as segfile: json.dump(seginfo, segfile) def uploadshardedskeletons(skelsource, skelseglist, skelnamelist, path, layer_name, shardprogress=False): """Upload sharded skeletons to a local server. Parameters ---------- skelsource: list contains cloud volume skeletons. skelseglist: list contains the segids(skid). skelnamelist: list contains the names of segments. path: str local path of the precomputed hosted layer. layer_name: str layer name. shardprogress: bool progress bar for sharding operation Returns ------- cv : CloudVolume object of cloudvolume class """ info = {"@type": "neuroglancer_skeletons", "transform": skelsource[0].transform.flatten(), "vertex_attributes": [{"id": "radius", "data_type": "float32", "num_components": 1}], "scales": "um"} path = 'file://' + path + '/precomputed/' + layer_name cv = CloudVolume(path, info=info) # prepare for info file cv.skeleton.meta.info['@type'] = 'neuroglancer_skeletons' cv.skeleton.meta.info['transform'] = skelsource[0].transform.flatten() cv.skeleton.meta.info['vertex_attributes'] = [ {'id': 'radius', 'data_type': 'float32', 'num_components': 1}] # prepare sharding info spec = ShardingSpecification('neuroglancer_uint64_sharded_v1', preshift_bits=9, hash='murmurhash3_x86_128', minishard_bits=6, shard_bits=15, minishard_index_encoding='raw', data_encoding='raw',) cv.skeleton.meta.info['sharding'] = spec.to_dict() cv.skeleton.meta.info['segment_properties'] = 'seg_props' cv.skeleton.meta.commit_info() precomputedskels = {} for skelidx in range(len(skelsource)): skelid = int(skelsource[skelidx].id) skel = Skeleton(skelsource[skelidx].vertices, edges=skelsource[skelidx].edges, segid=skelid, extra_attributes=[{"id": "radius", "data_type": "float32", "num_components": 1, }] ).physical_space() precomputedskels[skelid] = skel.to_precomputed() shardfiles = spec.synthesize_shards(precomputedskels, progress=shardprogress) shardedfilepath = os.path.join(cv.basepath, os.path.basename(path), cv.skeleton.meta.skeleton_path) for fname in shardfiles.keys(): with open(shardedfilepath + '/' + fname, 'wb') as f: f.write(shardfiles[fname]) segfilepath = os.path.join(cv.basepath, os.path.basename(path), cv.skeleton.meta.skeleton_path, 'seg_props') if not os.path.exists(segfilepath): os.makedirs(segfilepath) print('creating:', segfilepath) allsegproplist = [] for segid in skelseglist: segpropdict = {} segpropdict['id'] = segid segpropdict['type'] = 'label' segpropdict['values'] = skelnamelist allsegproplist.append(segpropdict) seginfo = {"@type": "neuroglancer_segment_properties", "inline": {"ids": skelseglist, "properties": allsegproplist}} segfile = os.path.join(segfilepath, 'info') with open(segfile, 'w') as segfile: json.dump(seginfo, segfile) return cv def skeletons2nodepoints(x, layer_scale): """Generate nodepoints (point A, point B) from skeletons for given neuron(s). Parameters ---------- x : CatmaidNeuron | CatmaidNeuronList or TreeNeuron | NeuronList neuron or neuronlist of different formats layer_scale : int | float scaling from voxel to native space in 'x', 'y', 'z' Returns ------- nodepointscollec_df : dataframe contains node points in point A - point B format used in flywire annotations. """ if isinstance(x, pymaid.core.CatmaidNeuron): x = pymaid.core.CatmaidNeuronList(x) elif isinstance(x, navis.core.TreeNeuron): x = navis.core.NeuronList(x) elif (isinstance(x, pymaid.core.CatmaidNeuronList) or isinstance(x, navis.core.NeuronList)): pass else: raise TypeError(f'Expected neuron or neuronlist, got "{type(x)}"') nodepointscollec_df = [] for neuronelement in x: nodes = neuronelement.nodes nonrootnodes = nodes[nodes.parent_id >= 0] ptA = nonrootnodes[['x', 'y', 'z']].values ptB = nodes.set_index('node_id').loc[nonrootnodes.parent_id.values, ['x', 'y', 'z']].values # scale the points incase it is in voxel coordinates.. ptA = ptA / layer_scale ptB = ptB / layer_scale pts_df = pd.DataFrame(pd.Series(ptA.tolist()), columns=['pointA']) pts_df['pointB'] = pd.Series(ptB.tolist()) nodepointscollec_df.append([neuronelement.id, pts_df]) nodepointscollec_df = pd.DataFrame(nodepointscollec_df, columns=['id', 'points_df']) return nodepointscollec_df
[ "pandas.DataFrame", "json.dump", "os.remove", "os.makedirs", "cloudvolume.Skeleton", "os.path.basename", "navis.core.NeuronList", "os.path.exists", "cloudvolume.datasource.precomputed.sharding.ShardingSpecification", "pymaid.core.CatmaidNeuronList", "cloudvolume.CloudVolume", "numpy.array", "os.path.join" ]
[((1968, 2040), 'numpy.array', 'np.array', (["(this_tn[['index', 'parent_ix']].values[1:] - 1)"], {'dtype': '"""uint32"""'}), "(this_tn[['index', 'parent_ix']].values[1:] - 1, dtype='uint32')\n", (1976, 2040), True, 'import numpy as np\n'), ((2057, 2109), 'cloudvolume.Skeleton', 'Skeleton', ([], {'segid': 'x.id', 'vertices': 'vertices', 'edges': 'edges'}), '(segid=x.id, vertices=vertices, edges=edges)\n', (2065, 2109), False, 'from cloudvolume import Skeleton, CloudVolume\n'), ((2297, 2348), 'numpy.array', 'np.array', (["this_tn['radius'].values"], {'dtype': '"""float32"""'}), "(this_tn['radius'].values, dtype='float32')\n", (2305, 2348), True, 'import numpy as np\n'), ((4909, 4937), 'cloudvolume.CloudVolume', 'CloudVolume', (['path'], {'info': 'info'}), '(path, info=info)\n', (4920, 4937), False, 'from cloudvolume import Skeleton, CloudVolume\n'), ((6566, 6599), 'os.path.join', 'os.path.join', (['segfilepath', '"""info"""'], {}), "(segfilepath, 'info')\n", (6578, 6599), False, 'import os\n'), ((7259, 7287), 'cloudvolume.CloudVolume', 'CloudVolume', (['path'], {'info': 'info'}), '(path, info=info)\n', (7270, 7287), False, 'from cloudvolume import Skeleton, CloudVolume\n'), ((8425, 8445), 'os.remove', 'os.remove', (['info_file'], {}), '(info_file)\n', (8434, 8445), False, 'import os\n'), ((9126, 9154), 'cloudvolume.CloudVolume', 'CloudVolume', (['path'], {'info': 'info'}), '(path, info=info)\n', (9137, 9154), False, 'from cloudvolume import Skeleton, CloudVolume\n'), ((10114, 10147), 'os.path.join', 'os.path.join', (['segfilepath', '"""info"""'], {}), "(segfilepath, 'info')\n", (10126, 10147), False, 'import os\n'), ((11170, 11198), 'cloudvolume.CloudVolume', 'CloudVolume', (['path'], {'info': 'info'}), '(path, info=info)\n', (11181, 11198), False, 'from cloudvolume import Skeleton, CloudVolume\n'), ((11527, 11721), 'cloudvolume.datasource.precomputed.sharding.ShardingSpecification', 'ShardingSpecification', (['"""neuroglancer_uint64_sharded_v1"""'], {'preshift_bits': '(9)', 'hash': '"""murmurhash3_x86_128"""', 'minishard_bits': '(6)', 'shard_bits': '(15)', 'minishard_index_encoding': '"""raw"""', 'data_encoding': '"""raw"""'}), "('neuroglancer_uint64_sharded_v1', preshift_bits=9,\n hash='murmurhash3_x86_128', minishard_bits=6, shard_bits=15,\n minishard_index_encoding='raw', data_encoding='raw')\n", (11548, 11721), False, 'from cloudvolume.datasource.precomputed.sharding import ShardingSpecification\n'), ((13594, 13627), 'os.path.join', 'os.path.join', (['segfilepath', '"""info"""'], {}), "(segfilepath, 'info')\n", (13606, 13627), False, 'import os\n'), ((15262, 15324), 'pandas.DataFrame', 'pd.DataFrame', (['nodepointscollec_df'], {'columns': "['id', 'points_df']"}), "(nodepointscollec_df, columns=['id', 'points_df'])\n", (15274, 15324), True, 'import pandas as pd\n'), ((3251, 3283), 'pymaid.core.CatmaidNeuronList', 'pymaid.core.CatmaidNeuronList', (['x'], {}), '(x)\n', (3280, 3283), False, 'import pymaid\n'), ((5696, 5781), 'cloudvolume.Skeleton', 'Skeleton', ([], {'vertices': 'skelsource[fileidx].vertices', 'edges': 'skelsource[fileidx].edges'}), '(vertices=skelsource[fileidx].vertices, edges=skelsource[fileidx].edges\n )\n', (5704, 5781), False, 'from cloudvolume import Skeleton, CloudVolume\n'), ((5956, 5978), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (5972, 5978), False, 'import os\n'), ((6046, 6073), 'os.path.exists', 'os.path.exists', (['segfilepath'], {}), '(segfilepath)\n', (6060, 6073), False, 'import os\n'), ((6083, 6107), 'os.makedirs', 'os.makedirs', (['segfilepath'], {}), '(segfilepath)\n', (6094, 6107), False, 'import os\n'), ((6648, 6675), 'json.dump', 'json.dump', (['seginfo', 'segfile'], {}), '(seginfo, segfile)\n', (6657, 6675), False, 'import json\n'), ((8046, 8131), 'cloudvolume.Skeleton', 'Skeleton', ([], {'vertices': 'skelsource[fileidx].vertices', 'edges': 'skelsource[fileidx].edges'}), '(vertices=skelsource[fileidx].vertices, edges=skelsource[fileidx].edges\n )\n', (8054, 8131), False, 'from cloudvolume import Skeleton, CloudVolume\n'), ((8376, 8398), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (8392, 8398), False, 'import os\n'), ((9601, 9623), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (9617, 9623), False, 'import os\n'), ((9691, 9718), 'os.path.exists', 'os.path.exists', (['segfilepath'], {}), '(segfilepath)\n', (9705, 9718), False, 'import os\n'), ((9728, 9752), 'os.makedirs', 'os.makedirs', (['segfilepath'], {}), '(segfilepath)\n', (9739, 9752), False, 'import os\n'), ((10196, 10223), 'json.dump', 'json.dump', (['seginfo', 'segfile'], {}), '(seginfo, segfile)\n', (10205, 10223), False, 'import json\n'), ((12755, 12777), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (12771, 12777), False, 'import os\n'), ((12993, 13015), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (13009, 13015), False, 'import os\n'), ((13074, 13101), 'os.path.exists', 'os.path.exists', (['segfilepath'], {}), '(segfilepath)\n', (13088, 13101), False, 'import os\n'), ((13111, 13135), 'os.makedirs', 'os.makedirs', (['segfilepath'], {}), '(segfilepath)\n', (13122, 13135), False, 'import os\n'), ((13676, 13703), 'json.dump', 'json.dump', (['seginfo', 'segfile'], {}), '(seginfo, segfile)\n', (13685, 13703), False, 'import json\n'), ((14309, 14341), 'pymaid.core.CatmaidNeuronList', 'pymaid.core.CatmaidNeuronList', (['x'], {}), '(x)\n', (14338, 14341), False, 'import pymaid\n'), ((3343, 3367), 'navis.core.NeuronList', 'navis.core.NeuronList', (['x'], {}), '(x)\n', (3364, 3367), False, 'import navis\n'), ((5637, 5659), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (5653, 5659), False, 'import os\n'), ((7987, 8009), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (8003, 8009), False, 'import os\n'), ((14401, 14425), 'navis.core.NeuronList', 'navis.core.NeuronList', (['x'], {}), '(x)\n', (14422, 14425), False, 'import navis\n'), ((12197, 12372), 'cloudvolume.Skeleton', 'Skeleton', (['skelsource[skelidx].vertices'], {'edges': 'skelsource[skelidx].edges', 'segid': 'skelid', 'extra_attributes': "[{'id': 'radius', 'data_type': 'float32', 'num_components': 1}]"}), "(skelsource[skelidx].vertices, edges=skelsource[skelidx].edges,\n segid=skelid, extra_attributes=[{'id': 'radius', 'data_type': 'float32',\n 'num_components': 1}])\n", (12205, 12372), False, 'from cloudvolume import Skeleton, CloudVolume\n')]
# -*- coding: utf-8 -*- from functools import reduce from operator import mul import numpy as np from africanus.util.docs import DocstringTemplate from africanus.util.numba import jit @jit(nopython=True, nogil=True, cache=True) def _nb_feed_rotation(parallactic_angles, feed_type, feed_rotation): shape = parallactic_angles.shape parangles = parallactic_angles.flat # Linear feeds if feed_type == 0: for i, pa in enumerate(parangles): pa_cos = np.cos(pa) pa_sin = np.sin(pa) feed_rotation.real[i, 0, 0] = pa_cos feed_rotation.imag[i, 0, 0] = 0.0 feed_rotation.real[i, 0, 1] = pa_sin feed_rotation.imag[i, 0, 1] = 0.0 feed_rotation.real[i, 1, 0] = -pa_sin feed_rotation.imag[i, 1, 0] = 0.0 feed_rotation.real[i, 1, 1] = pa_cos feed_rotation.imag[i, 1, 1] = 0.0 # Circular feeds elif feed_type == 1: for i, pa in enumerate(parangles): pa_cos = np.cos(pa) pa_sin = np.sin(pa) feed_rotation.real[i, 0, 0] = pa_cos feed_rotation.imag[i, 0, 0] = -pa_sin feed_rotation[i, 0, 1] = 0.0 + 0.0*1j feed_rotation[i, 1, 0] = 0.0 + 0.0*1j feed_rotation.real[i, 1, 1] = pa_cos feed_rotation.imag[i, 1, 1] = pa_sin else: raise ValueError("Invalid feed_type") return feed_rotation.reshape(shape + (2, 2)) def feed_rotation(parallactic_angles, feed_type='linear'): if feed_type == 'linear': poltype = 0 elif feed_type == 'circular': poltype = 1 else: raise ValueError("Invalid feed_type '%s'" % feed_type) if parallactic_angles.dtype == np.float32: dtype = np.complex64 elif parallactic_angles.dtype == np.float64: dtype = np.complex128 else: raise ValueError("parallactic_angles has " "none-floating point type %s" % parallactic_angles.dtype) # Create result array with flattened parangles shape = (reduce(mul, parallactic_angles.shape),) + (2, 2) result = np.empty(shape, dtype=dtype) return _nb_feed_rotation(parallactic_angles, poltype, result) FEED_ROTATION_DOCS = DocstringTemplate(r""" Computes the 2x2 feed rotation (L) matrix from the ``parallactic_angles``. .. math:: \textrm{linear} \begin{bmatrix} cos(pa) & sin(pa) \\ -sin(pa) & cos(pa) \end{bmatrix} \qquad \textrm{circular} \begin{bmatrix} e^{-i pa} & 0 \\ 0 & e^{i pa} \end{bmatrix} Parameters ---------- parallactic_angles : $(array_type) floating point parallactic angles. Of shape :code:`(pa0, pa1, ..., pan)`. feed_type : {'linear', 'circular'} The type of feed Returns ------- feed_matrix : $(array_type) Feed rotation matrix of shape :code:`(pa0, pa1,...,pan,2,2)` """) try: feed_rotation.__doc__ = FEED_ROTATION_DOCS.substitute( array_type=":class:`numpy.ndarray`") except AttributeError: pass
[ "africanus.util.docs.DocstringTemplate", "numpy.empty", "numpy.sin", "numpy.cos", "functools.reduce", "africanus.util.numba.jit" ]
[((191, 233), 'africanus.util.numba.jit', 'jit', ([], {'nopython': '(True)', 'nogil': '(True)', 'cache': '(True)'}), '(nopython=True, nogil=True, cache=True)\n', (194, 233), False, 'from africanus.util.numba import jit\n'), ((2279, 2934), 'africanus.util.docs.DocstringTemplate', 'DocstringTemplate', (['"""\nComputes the 2x2 feed rotation (L) matrix\nfrom the ``parallactic_angles``.\n\n.. math::\n\n \\\\textrm{linear}\n \\\\begin{bmatrix}\n cos(pa) & sin(pa) \\\\\\\\\n -sin(pa) & cos(pa)\n \\\\end{bmatrix}\n \\\\qquad\n \\\\textrm{circular}\n \\\\begin{bmatrix}\n e^{-i pa} & 0 \\\\\\\\\n 0 & e^{i pa}\n \\\\end{bmatrix}\n\nParameters\n----------\nparallactic_angles : $(array_type)\n floating point parallactic angles. Of shape\n :code:`(pa0, pa1, ..., pan)`.\nfeed_type : {\'linear\', \'circular\'}\n The type of feed\n\nReturns\n-------\nfeed_matrix : $(array_type)\n Feed rotation matrix of shape :code:`(pa0, pa1,...,pan,2,2)`\n"""'], {}), '(\n """\nComputes the 2x2 feed rotation (L) matrix\nfrom the ``parallactic_angles``.\n\n.. math::\n\n \\\\textrm{linear}\n \\\\begin{bmatrix}\n cos(pa) & sin(pa) \\\\\\\\\n -sin(pa) & cos(pa)\n \\\\end{bmatrix}\n \\\\qquad\n \\\\textrm{circular}\n \\\\begin{bmatrix}\n e^{-i pa} & 0 \\\\\\\\\n 0 & e^{i pa}\n \\\\end{bmatrix}\n\nParameters\n----------\nparallactic_angles : $(array_type)\n floating point parallactic angles. Of shape\n :code:`(pa0, pa1, ..., pan)`.\nfeed_type : {\'linear\', \'circular\'}\n The type of feed\n\nReturns\n-------\nfeed_matrix : $(array_type)\n Feed rotation matrix of shape :code:`(pa0, pa1,...,pan,2,2)`\n"""\n )\n', (2296, 2934), False, 'from africanus.util.docs import DocstringTemplate\n'), ((2160, 2188), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (2168, 2188), True, 'import numpy as np\n'), ((487, 497), 'numpy.cos', 'np.cos', (['pa'], {}), '(pa)\n', (493, 497), True, 'import numpy as np\n'), ((519, 529), 'numpy.sin', 'np.sin', (['pa'], {}), '(pa)\n', (525, 529), True, 'import numpy as np\n'), ((2098, 2135), 'functools.reduce', 'reduce', (['mul', 'parallactic_angles.shape'], {}), '(mul, parallactic_angles.shape)\n', (2104, 2135), False, 'from functools import reduce\n'), ((1023, 1033), 'numpy.cos', 'np.cos', (['pa'], {}), '(pa)\n', (1029, 1033), True, 'import numpy as np\n'), ((1055, 1065), 'numpy.sin', 'np.sin', (['pa'], {}), '(pa)\n', (1061, 1065), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- import datetime import uuid from collections import abc from typing import MutableSequence, Any, Optional, Sequence import typing import numpy as np from .array import StateVector, CovarianceMatrix, PrecisionMatrix from .base import Type from .numeric import Probability from .particle import Particles from ..base import Property class State(Type): """State type. Most simple state type, which only has time and a state vector.""" timestamp: datetime.datetime = Property( default=None, doc="Timestamp of the state. Default None.") state_vector: StateVector = Property(doc='State vector.') def __init__(self, state_vector, *args, **kwargs): # Don't cast away subtype of state_vector if not necessary if state_vector is not None \ and not isinstance(state_vector, StateVector): state_vector = StateVector(state_vector) super().__init__(state_vector, *args, **kwargs) @property def ndim(self): """The number of dimensions represented by the state.""" return self.state_vector.shape[0] @staticmethod def from_state(state: 'State', *args: Any, target_type: Optional[typing.Type] = None, **kwargs: Any) -> 'State': """Class utility function to create a new state (or compatible type) from an existing state. The type and properties of this new state are defined by `state` except for any explicitly overwritten via `args` and `kwargs`. It acts similarly in feel to a copy constructor, with the optional over-writing of properties. Parameters ---------- state: State :class:`~.State` to use existing properties from, and identify new state-type from. \\*args: Sequence Arguments to pass to newly created state, replacing those with same name in `state`. target_type: Type, optional Optional argument specifying the type of of object to be created. This need not necessarily be :class:`~.State` subclass. Any arguments that match between the input `state` and the target type will be copied from the old to the new object (except those explicitly specified in `args` and `kwargs`. \\*\\*kwargs: Mapping New property names and associate value for use in newly created state, replacing those on the `state` parameter. """ # Handle being initialised with state sequence if isinstance(state, StateMutableSequence): state = state.state if target_type is None: target_type = type(state) args_property_names = { name for n, name in enumerate(target_type.properties) if n < len(args)} new_kwargs = { name: getattr(state, name) for name in type(state).properties.keys() & target_type.properties.keys() if name not in args_property_names} new_kwargs.update(kwargs) return target_type(*args, **new_kwargs) class CreatableFromState: class_mapping = {} def __init_subclass__(cls, **kwargs): bases = cls.__bases__ if CreatableFromState in bases: # Direct subclasses should not be added to the class mapping, only subclasses of # subclasses return if len(bases) != 2: raise TypeError('A CreatableFromState subclass must have exactly two superclasses') base_class, state_type = cls.__bases__ if not issubclass(base_class, CreatableFromState): raise TypeError('The first superclass of a CreatableFromState subclass must be a ' 'CreatableFromState (or a subclass)') if not issubclass(state_type, State): # Non-state subclasses do not need adding to the class mapping, as they should not # be created from States return if base_class not in CreatableFromState.class_mapping: CreatableFromState.class_mapping[base_class] = {} CreatableFromState.class_mapping[base_class][state_type] = cls super().__init_subclass__(**kwargs) @classmethod def from_state( cls, state: State, *args: Any, target_type: Optional[type] = None, **kwargs: Any) -> 'State': """ Return new object instance of suitable type from an existing `state`. The type returned can be explicitly specified using the `target_type` argument, otherwise it is chosen by introspection of the created subclasses of this type: see below for an example. Any compatible properties are copied from the input `state` to the returned object, except for those specified by `args` and `kwargs`, which take precedence over those from the input `state`. This method is primarily concerned with type selection, with actual copying performed by the static :meth:`~.State.from_state` method. As an example of the type selection algorithm, consider the case of the class `GaussianStatePrediction(Prediction, GaussianState)`. This is subclass of `Prediction`, and `GaussianState` and so the `class_mapping` property will have an entry added (when `GaussianStatePrediction` is defined) such that `class_mapping[Prediction][GaussianState] = GaussianStatePrediction`. If this method is then called like below >>>> gaussian_state = GaussianState(some_arguments) >>>> new_prediction = Prediction.from_state(gaussian_state, *args, **kwargs) then the `from_state` method will look up the class mapping and see that `Prediction.from_state()` called with a `GaussianState` input should return a `GaussianStatePrediction` object, and therefore the type of `new_prediction` will be `GaussianStatePrediction` The functionality is currently used by :class:`~.Prediction` and :class:`~.Updater` objects. Parameters ---------- state: State :class:`~.State` to use existing properties from, and identify prediction type from \\*args: Sequence Arguments to pass to newly created prediction, replacing those with same name on ``state`` parameter. target_type: Type, optional Type to use for prediction, overriding one from :attr:`class_mapping`. \\*\\*kwargs: Mapping New property names and associate value for use in newly created prediction, replacing those on the ``state`` parameter. """ # Handle being initialised with state sequence if isinstance(state, StateMutableSequence): state = state.state try: state_type = next(type_ for type_ in type(state).mro() if type_ in CreatableFromState.class_mapping[cls]) except StopIteration: raise TypeError(f'{cls.__name__} type not defined for {type(state).__name__}') if target_type is None: target_type = CreatableFromState.class_mapping[cls][state_type] return State.from_state(state, *args, **kwargs, target_type=target_type) class StateMutableSequence(Type, abc.MutableSequence): """A mutable sequence for :class:`~.State` instances This sequence acts like a regular list object for States, as well as proxying state attributes to the last state in the sequence. This sequence can also be indexed/sliced by :class:`datetime.datetime` instances. Example ------- >>> t0 = datetime.datetime(2018, 1, 1, 14, 00) >>> t1 = t0 + datetime.timedelta(minutes=1) >>> state0 = State([[0]], t0) >>> sequence = StateMutableSequence([state0]) >>> print(sequence.state_vector, sequence.timestamp) [[0]] 2018-01-01 14:00:00 >>> sequence.append(State([[1]], t1)) >>> for state in sequence[t1:]: ... print(state.state_vector, state.timestamp) [[1]] 2018-01-01 14:01:00 """ states: MutableSequence[State] = Property( default=None, doc="The initial list of states. Default `None` which initialises with empty list.") def __init__(self, states=None, *args, **kwargs): if states is None: states = [] elif not isinstance(states, abc.Sequence): # Ensure states is a list states = [states] super().__init__(states, *args, **kwargs) def __len__(self): return self.states.__len__() def __setitem__(self, index, value): return self.states.__setitem__(index, value) def __delitem__(self, index): return self.states.__delitem__(index) def __getitem__(self, index): if isinstance(index, slice) and ( isinstance(index.start, datetime.datetime) or isinstance(index.stop, datetime.datetime)): items = [] for state in self.states: try: if index.start and state.timestamp < index.start: continue if index.stop and state.timestamp >= index.stop: continue except TypeError as exc: raise TypeError( 'both indices must be `datetime.datetime` objects for' 'time slice') from exc items.append(state) return StateMutableSequence(items[::index.step]) elif isinstance(index, datetime.datetime): for state in reversed(self.states): if state.timestamp == index: return state else: raise IndexError('timestamp not found in states') elif isinstance(index, slice): return StateMutableSequence(self.states.__getitem__(index)) else: return self.states.__getitem__(index) def __getattribute__(self, name): # This method is called if we try to access an attribute of self. First we try to get the # attribute directly, but if that fails, we want to try getting the same attribute from # self.state instead. If that, in turn, fails we want to return the error message that # would have originally been raised, rather than an error message that the State has no # such attribute. # # An alternative mechanism using __getattr__ seems simpler (as it skips the first few lines # of code, but __getattr__ has no mechanism to capture the originally raised error. try: # This tries first to get the attribute from self. return Type.__getattribute__(self, name) except AttributeError as original_error: if name.startswith("_"): # Don't proxy special/private attributes to `state`, just raise the original error raise original_error else: # For non _ attributes, try to get the attribute from self.state instead of self. try: my_state = Type.__getattribute__(self, 'state') return getattr(my_state, name) except AttributeError: # If we get the error about 'State' not having the attribute, then we want to # raise the original error instead raise original_error def insert(self, index, value): return self.states.insert(index, value) @property def state(self): return self.states[-1] def last_timestamp_generator(self): """Generator yielding the last state for each timestamp This provides a method of iterating over a sequence of states, such that when multiple states for the same timestamp exist, only the last state is yielded. This is particularly useful in cases where you may have multiple :class:`~.Update` states for a single timestamp e.g. multi-sensor tracking example. Yields ------ State A state for each timestamp present in the sequence. """ state_iter = iter(self) current_state = next(state_iter) for next_state in state_iter: if next_state.timestamp > current_state.timestamp: yield current_state current_state = next_state yield current_state class GaussianState(State): """Gaussian State type This is a simple Gaussian state object, which, as the name suggests, is described by a Gaussian state distribution. """ covar: CovarianceMatrix = Property(doc='Covariance matrix of state.') def __init__(self, state_vector, covar, *args, **kwargs): # Don't cast away subtype of covar if not necessary if not isinstance(covar, CovarianceMatrix): covar = CovarianceMatrix(covar) super().__init__(state_vector, covar, *args, **kwargs) if self.state_vector.shape[0] != self.covar.shape[0]: raise ValueError( "state vector and covariance should have same dimensions") @property def mean(self): """The state mean, equivalent to state vector""" return self.state_vector class SqrtGaussianState(State): """A Gaussian State type where the covariance matrix is stored in a form :math:`W` such that :math:`P = WW^T` For :math:`P` in general, :math:`W` is not unique and the user may choose the form to their taste. No checks are undertaken to ensure that a sensible square root form has been chosen. """ sqrt_covar: CovarianceMatrix = Property(doc="A square root form of the Gaussian covariance " "matrix.") def __init__(self, state_vector, sqrt_covar, *args, **kwargs): sqrt_covar = CovarianceMatrix(sqrt_covar) super().__init__(state_vector, sqrt_covar, *args, **kwargs) @property def mean(self): """The state mean, equivalent to state vector""" return self.state_vector @property def covar(self): """The full covariance matrix. Returns ------- : :class:`~.CovarianceMatrix` The covariance matrix calculated via :math:`W W^T`, where :math:`W` is a :class:`~.SqrtCovarianceMatrix` """ return self.sqrt_covar @ self.sqrt_covar.T GaussianState.register(SqrtGaussianState) # noqa: E305 class InformationState(State): r"""Information State Type The information state class carries the :attr:`state_vector`, :math:`\mathbf{y}_k = Y_k \mathbf{x}_k` and the precision or information matrix :math:`Y_k = P_k^{-1}`, where :math:`\mathbf{x}_k` and :math:`P_k` are the mean and covariance, respectively, of a Gaussian state. """ precision: PrecisionMatrix = Property(doc='precision matrix of state.') class WeightedGaussianState(GaussianState): """Weighted Gaussian State Type Gaussian State object with an associated weight. Used as components for a GaussianMixtureState. """ weight: Probability = Property(default=0, doc="Weight of the Gaussian State.") @property def gaussian_state(self): """The Gaussian state.""" return GaussianState(self.state_vector, self.covar, timestamp=self.timestamp) @classmethod def from_gaussian_state(cls, gaussian_state, *args, copy=True, **kwargs): r""" Returns a WeightedGaussianState instance based on the gaussian_state. Parameters ---------- gaussian_state : :class:`~.GaussianState` The guassian_state used to create the new WeightedGaussianState. \*args : See main :class:`~.WeightedGaussianState` args are passed to :class:`~.WeightedGaussianState` __init__() copy : Boolean, optional If True, the WeightedGaussianState is created with copies of the elements of gaussian_state. The default is True. \*\*kwargs : See main :class:`~.WeightedGaussianState` kwargs are passed to :class:`~.WeightedGaussianState` __init__() Returns ------- :class:`~.WeightedGaussianState` Instance of WeightedGaussianState. """ state_vector = gaussian_state.state_vector covar = gaussian_state.covar timestamp = gaussian_state.timestamp if copy: state_vector = state_vector.copy() covar = covar.copy() return cls( state_vector=state_vector, covar=covar, timestamp=timestamp, *args, **kwargs ) class TaggedWeightedGaussianState(WeightedGaussianState): """Tagged Weighted Gaussian State Type Gaussian State object with an associated weight and tag. Used as components for a GaussianMixtureState. """ tag: str = Property(default=None, doc="Unique tag of the Gaussian State.") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.tag is None: self.tag = str(uuid.uuid4()) class ParticleState(Type): """Particle State type This is a particle state object which describes the state as a distribution of particles""" particles: Particles = Property(doc='All particles.') fixed_covar: CovarianceMatrix = Property(default=None, doc='Fixed covariance value. Default `None`, where' 'weighted sample covariance is then used.') timestamp: datetime.datetime = Property(default=None, doc="Timestamp of the state. Default None.") def __init__(self, particles, *args, **kwargs): if particles is not None and not isinstance(particles, Particles): particles = Particles(particle_list=particles) super().__init__(particles, *args, **kwargs) @property def ndim(self): return self.particles.ndim @property def mean(self): """The state mean, equivalent to state vector""" result = np.average(self.particles.state_vector, axis=1, weights=self.particles.weight) # Convert type as may have type of weights return result @property def state_vector(self): """The mean value of the particle states""" return self.mean @property def covar(self): if self.fixed_covar is not None: return self.fixed_covar cov = np.cov(self.particles.state_vector, ddof=0, aweights=np.array(self.particles.weight)) # Fix one dimensional covariances being returned with zero dimension return cov State.register(ParticleState) # noqa: E305 class CategoricalState(State): r"""CategoricalState type. State object representing an object in a categorical state space. A state vector :math:`\mathbf{x}_i = P(\phi_i)` defines a categorical distribution over a finite set of discrete categories :math:`\Phi = \{\phi_m|m\in Z_{\ge0}\}`.""" category_names: Sequence[str] = Property( default=None, doc="Sequence of category names corresponding to each state vector component. Defaults to " "a list of integers." ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Check there is a category name for each state vector component if self.category_names and len(self.category_names) != self.ndim: raise ValueError(f"{len(self.category_names)} category names were given for a state " f"vector with {self.ndim} elements") # Build default list of integers if no category names given if self.category_names is None: self.category_names = list(range(self.ndim)) # Check all vector elements are valid probabilities if any(p < 0 or p > 1 for p in self.state_vector): raise ValueError("Category probabilities must lie in the closed interval [0, 1]") # Check vector is normalised if not np.isclose(np.sum(self.state_vector), 1): raise ValueError("Category probabilities must sum to 1") def __str__(self): strings = [f"P({category}) = {p}" for category, p in zip(self.category_names, self.state_vector)] return f"({', '.join(strings)})" @property def category(self): """Return the name of the most likely category""" return self.category_names[np.argmax(self.state_vector)]
[ "uuid.uuid4", "numpy.average", "numpy.sum", "numpy.argmax", "numpy.array" ]
[((18291, 18369), 'numpy.average', 'np.average', (['self.particles.state_vector'], {'axis': '(1)', 'weights': 'self.particles.weight'}), '(self.particles.state_vector, axis=1, weights=self.particles.weight)\n', (18301, 18369), True, 'import numpy as np\n'), ((20756, 20784), 'numpy.argmax', 'np.argmax', (['self.state_vector'], {}), '(self.state_vector)\n', (20765, 20784), True, 'import numpy as np\n'), ((17246, 17258), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (17256, 17258), False, 'import uuid\n'), ((18799, 18830), 'numpy.array', 'np.array', (['self.particles.weight'], {}), '(self.particles.weight)\n', (18807, 18830), True, 'import numpy as np\n'), ((20334, 20359), 'numpy.sum', 'np.sum', (['self.state_vector'], {}), '(self.state_vector)\n', (20340, 20359), True, 'import numpy as np\n')]
# Copyright Cartopy Contributors # # This file is part of Cartopy and is released under the LGPL license. # See COPYING and COPYING.LESSER in the root of the repository for full # licensing details. from __future__ import (absolute_import, division, print_function) import operator import warnings import matplotlib import matplotlib.collections as mcollections import matplotlib.ticker as mticker import matplotlib.transforms as mtrans import matplotlib.path as mpath import numpy as np import shapely.geometry as sgeom import cartopy from cartopy.crs import Projection, _RectangularProjection from cartopy.mpl.ticker import ( LongitudeLocator, LatitudeLocator, LongitudeFormatter, LatitudeFormatter) degree_locator = mticker.MaxNLocator(nbins=9, steps=[1, 1.5, 1.8, 2, 3, 6, 10]) classic_locator = mticker.MaxNLocator(nbins=9) classic_formatter = mticker.ScalarFormatter _DEGREE_SYMBOL = u'\u00B0' _X_INLINE_PROJS = ( cartopy.crs.InterruptedGoodeHomolosine, cartopy.crs.LambertConformal, cartopy.crs.Mollweide, cartopy.crs.Sinusoidal, cartopy.crs.RotatedPole, ) _POLAR_PROJS = ( cartopy.crs.NorthPolarStereo, cartopy.crs.SouthPolarStereo, cartopy.crs.Stereographic ) def _fix_lons(lons): """ Fix the given longitudes into the range ``[-180, 180]``. """ lons = np.array(lons, copy=False, ndmin=1) fixed_lons = ((lons + 180) % 360) - 180 # Make the positive 180s positive again. fixed_lons[(fixed_lons == -180) & (lons > 0)] *= -1 return fixed_lons def _lon_hemisphere(longitude): """Return the hemisphere (E, W or '' for 0) for the given longitude.""" longitude = _fix_lons(longitude) if longitude > 0: hemisphere = 'E' elif longitude < 0: hemisphere = 'W' else: hemisphere = '' return hemisphere def _lat_hemisphere(latitude): """Return the hemisphere (N, S or '' for 0) for the given latitude.""" if latitude > 0: hemisphere = 'N' elif latitude < 0: hemisphere = 'S' else: hemisphere = '' return hemisphere def _east_west_formatted(longitude, num_format='g'): fmt_string = u'{longitude:{num_format}}{degree}{hemisphere}' return fmt_string.format(longitude=abs(longitude), num_format=num_format, hemisphere=_lon_hemisphere(longitude), degree=_DEGREE_SYMBOL) def _north_south_formatted(latitude, num_format='g'): fmt_string = u'{latitude:{num_format}}{degree}{hemisphere}' return fmt_string.format(latitude=abs(latitude), num_format=num_format, hemisphere=_lat_hemisphere(latitude), degree=_DEGREE_SYMBOL) #: A formatter which turns longitude values into nice longitudes such as 110W LONGITUDE_FORMATTER = mticker.FuncFormatter(lambda v, pos: _east_west_formatted(v)) #: A formatter which turns longitude values into nice longitudes such as 45S LATITUDE_FORMATTER = mticker.FuncFormatter(lambda v, pos: _north_south_formatted(v)) class Gridliner(object): # NOTE: In future, one of these objects will be add-able to a GeoAxes (and # maybe even a plain old mpl axes) and it will call the "_draw_gridliner" # method on draw. This will enable automatic gridline resolution # determination on zoom/pan. def __init__(self, axes, crs, draw_labels=False, xlocator=None, ylocator=None, collection_kwargs=None, xformatter=None, yformatter=None, dms=False, x_inline=None, y_inline=None, auto_inline=True): """ Object used by :meth:`cartopy.mpl.geoaxes.GeoAxes.gridlines` to add gridlines and tick labels to a map. Parameters ---------- axes The :class:`cartopy.mpl.geoaxes.GeoAxes` object to be drawn on. crs The :class:`cartopy.crs.CRS` defining the coordinate system that the gridlines are drawn in. draw_labels: optional Toggle whether to draw labels. For finer control, attributes of :class:`Gridliner` may be modified individually. Defaults to False. xlocator: optional A :class:`matplotlib.ticker.Locator` instance which will be used to determine the locations of the gridlines in the x-coordinate of the given CRS. Defaults to None, which implies automatic locating of the gridlines. ylocator: optional A :class:`matplotlib.ticker.Locator` instance which will be used to determine the locations of the gridlines in the y-coordinate of the given CRS. Defaults to None, which implies automatic locating of the gridlines. xformatter: optional A :class:`matplotlib.ticker.Formatter` instance to format labels for x-coordinate gridlines. It defaults to None, which implies the use of a :class:`cartopy.mpl.ticker.LongitudeFormatter` initiated with the ``dms`` argument, if the crs is of :class:`~cartopy.crs.PlateCarree` type. yformatter: optional A :class:`matplotlib.ticker.Formatter` instance to format labels for y-coordinate gridlines. It defaults to None, which implies the use of a :class:`cartopy.mpl.ticker.LatitudeFormatter` initiated with the ``dms`` argument, if the crs is of :class:`~cartopy.crs.PlateCarree` type. collection_kwargs: optional Dictionary controlling line properties, passed to :class:`matplotlib.collections.Collection`. Defaults to None. dms: bool When default locators and formatters are used, ticks are able to stop on minutes and seconds if minutes is set to True, and not fraction of degrees. x_inline: optional Toggle whether the x labels drawn should be inline. y_inline: optional Toggle whether the y labels drawn should be inline. auto_inline: optional Set x_inline and y_inline automatically based on projection. Notes ----- The "x" and "y" labels for locators and formatters do not necessarily correspond to X and Y, but to the first and second coordinates of the specified CRS. For the common case of PlateCarree gridlines, these correspond to longitudes and latitudes. Depending on the projection used for the map, meridians and parallels can cross both the X axis and the Y axis. """ self.axes = axes #: The :class:`~matplotlib.ticker.Locator` to use for the x #: gridlines and labels. if xlocator is not None: if not isinstance(xlocator, mticker.Locator): xlocator = mticker.FixedLocator(xlocator) self.xlocator = xlocator elif isinstance(crs, cartopy.crs.PlateCarree): self.xlocator = LongitudeLocator(dms=dms) else: self.xlocator = classic_locator #: The :class:`~matplotlib.ticker.Locator` to use for the y #: gridlines and labels. if ylocator is not None: if not isinstance(ylocator, mticker.Locator): ylocator = mticker.FixedLocator(ylocator) self.ylocator = ylocator elif isinstance(crs, cartopy.crs.PlateCarree): self.ylocator = LatitudeLocator(dms=dms) else: self.ylocator = classic_locator if xformatter is None: if isinstance(crs, cartopy.crs.PlateCarree): xformatter = LongitudeFormatter(dms=dms) else: xformatter = classic_formatter() #: The :class:`~matplotlib.ticker.Formatter` to use for the lon labels. self.xformatter = xformatter if yformatter is None: if isinstance(crs, cartopy.crs.PlateCarree): yformatter = LatitudeFormatter(dms=dms) else: yformatter = classic_formatter() #: The :class:`~matplotlib.ticker.Formatter` to use for the lat labels. self.yformatter = yformatter #: Whether to draw labels on the top of the map. self.top_labels = draw_labels #: Whether to draw labels on the bottom of the map. self.bottom_labels = draw_labels #: Whether to draw labels on the left hand side of the map. self.left_labels = draw_labels #: Whether to draw labels on the right hand side of the map. self.right_labels = draw_labels if auto_inline: if isinstance(self.axes.projection, _X_INLINE_PROJS): self.x_inline = True self.y_inline = False elif isinstance(self.axes.projection, _POLAR_PROJS): self.x_inline = False self.y_inline = True else: self.x_inline = False self.y_inline = False # overwrite auto_inline if necessary if x_inline is not None: #: Whether to draw x labels inline self.x_inline = x_inline elif not auto_inline: self.x_inline = False if y_inline is not None: #: Whether to draw y labels inline self.y_inline = y_inline elif not auto_inline: self.y_inline = False #: Whether to draw the x gridlines. self.xlines = True #: Whether to draw the y gridlines. self.ylines = True #: A dictionary passed through to ``ax.text`` on x label creation #: for styling of the text labels. self.xlabel_style = {} #: A dictionary passed through to ``ax.text`` on y label creation #: for styling of the text labels. self.ylabel_style = {} #: The padding from the map edge to the x labels in points. self.xpadding = 5 #: The padding from the map edge to the y labels in points. self.ypadding = 5 #: Allow the rotation of labels. self.rotate_labels = True # Current transform self.crs = crs # if the user specifies tick labels at this point, check if they can # be drawn. The same check will take place at draw time in case # public attributes are changed after instantiation. if draw_labels and not (x_inline or y_inline or auto_inline): self._assert_can_draw_ticks() #: The number of interpolation points which are used to draw the #: gridlines. self.n_steps = 100 #: A dictionary passed through to #: ``matplotlib.collections.LineCollection`` on grid line creation. self.collection_kwargs = collection_kwargs #: The x gridlines which were created at draw time. self.xline_artists = [] #: The y gridlines which were created at draw time. self.yline_artists = [] # Plotted status self._plotted = False # Check visibility of labels at each draw event # (or once drawn, only at resize event ?) self.axes.figure.canvas.mpl_connect('draw_event', self._draw_event) @property def xlabels_top(self): warnings.warn('The .xlabels_top attribute is deprecated. Please ' 'use .top_labels to toggle visibility instead.') return self.top_labels @xlabels_top.setter def xlabels_top(self, value): warnings.warn('The .xlabels_top attribute is deprecated. Please ' 'use .top_labels to toggle visibility instead.') self.top_labels = value @property def xlabels_bottom(self): warnings.warn('The .xlabels_bottom attribute is deprecated. Please ' 'use .bottom_labels to toggle visibility instead.') return self.bottom_labels @xlabels_bottom.setter def xlabels_bottom(self, value): warnings.warn('The .xlabels_bottom attribute is deprecated. Please ' 'use .bottom_labels to toggle visibility instead.') self.bottom_labels = value @property def ylabels_left(self): warnings.warn('The .ylabels_left attribute is deprecated. Please ' 'use .left_labels to toggle visibility instead.') return self.left_labels @ylabels_left.setter def ylabels_left(self, value): warnings.warn('The .ylabels_left attribute is deprecated. Please ' 'use .left_labels to toggle visibility instead.') self.left_labels = value @property def ylabels_right(self): warnings.warn('The .ylabels_right attribute is deprecated. Please ' 'use .right_labels to toggle visibility instead.') return self.right_labels @ylabels_right.setter def ylabels_right(self, value): warnings.warn('The .ylabels_right attribute is deprecated. Please ' 'use .right_labels to toggle visibility instead.') self.right_labels = value def _draw_event(self, event): if self.has_labels(): self._update_labels_visibility(event.renderer) def has_labels(self): return hasattr(self, '_labels') and self._labels @property def label_artists(self): if self.has_labels(): return self._labels return [] def _crs_transform(self): """ Get the drawing transform for our gridlines. Note ---- The drawing transform depends on the transform of our 'axes', so it may change dynamically. """ transform = self.crs if not isinstance(transform, mtrans.Transform): transform = transform._as_mpl_transform(self.axes) return transform @staticmethod def _round(x, base=5): if np.isnan(base): base = 5 return int(base * round(float(x) / base)) def _find_midpoints(self, lim, ticks): # Find the center point between each lat gridline. if len(ticks) > 1: cent = np.diff(ticks).mean() / 2 else: cent = np.nan if isinstance(self.axes.projection, _POLAR_PROJS): lq = 90 uq = 90 else: lq = 25 uq = 75 midpoints = (self._round(np.percentile(lim, lq), cent), self._round(np.percentile(lim, uq), cent)) return midpoints def _draw_gridliner(self, nx=None, ny=None, renderer=None): """Create Artists for all visible elements and add to our Axes.""" # Check status if self._plotted: return self._plotted = True # Inits lon_lim, lat_lim = self._axes_domain(nx=nx, ny=ny) transform = self._crs_transform() rc_params = matplotlib.rcParams n_steps = self.n_steps crs = self.crs # Get nice ticks within crs domain lon_ticks = self.xlocator.tick_values(lon_lim[0], lon_lim[1]) lat_ticks = self.ylocator.tick_values(lat_lim[0], lat_lim[1]) lon_ticks = [value for value in lon_ticks if value >= max(lon_lim[0], crs.x_limits[0]) and value <= min(lon_lim[1], crs.x_limits[1])] lat_ticks = [value for value in lat_ticks if value >= max(lat_lim[0], crs.y_limits[0]) and value <= min(lat_lim[1], crs.y_limits[1])] ##################### # Gridlines drawing # ##################### collection_kwargs = self.collection_kwargs if collection_kwargs is None: collection_kwargs = {} collection_kwargs = collection_kwargs.copy() collection_kwargs['transform'] = transform # XXX doesn't gracefully handle lw vs linewidth aliases... collection_kwargs.setdefault('color', rc_params['grid.color']) collection_kwargs.setdefault('linestyle', rc_params['grid.linestyle']) collection_kwargs.setdefault('linewidth', rc_params['grid.linewidth']) # Meridians lat_min, lat_max = lat_lim if lat_ticks: lat_min = min(lat_min, min(lat_ticks)) lat_max = max(lat_max, max(lat_ticks)) lon_lines = np.empty((len(lon_ticks), n_steps, 2)) lon_lines[:, :, 0] = np.array(lon_ticks)[:, np.newaxis] lon_lines[:, :, 1] = np.linspace(lat_min, lat_max, n_steps)[np.newaxis, :] if self.xlines: nx = len(lon_lines) + 1 # XXX this bit is cartopy specific. (for circular longitudes) # Purpose: omit plotting the last x line, # as it may overlap the first. if (isinstance(crs, Projection) and isinstance(crs, _RectangularProjection) and abs(np.diff(lon_lim)) == abs(np.diff(crs.x_limits))): nx -= 1 lon_lc = mcollections.LineCollection(lon_lines, **collection_kwargs) self.xline_artists.append(lon_lc) self.axes.add_collection(lon_lc, autolim=False) # Parallels lon_min, lon_max = lon_lim if lon_ticks: lon_min = min(lon_min, min(lon_ticks)) lon_max = max(lon_max, max(lon_ticks)) lat_lines = np.empty((len(lat_ticks), n_steps, 2)) lat_lines[:, :, 0] = np.linspace(lon_min, lon_max, n_steps)[np.newaxis, :] lat_lines[:, :, 1] = np.array(lat_ticks)[:, np.newaxis] if self.ylines: lat_lc = mcollections.LineCollection(lat_lines, **collection_kwargs) self.yline_artists.append(lat_lc) self.axes.add_collection(lat_lc, autolim=False) ################# # Label drawing # ################# self.bottom_label_artists = [] self.top_label_artists = [] self.left_label_artists = [] self.right_label_artists = [] if not (self.left_labels or self.right_labels or self.bottom_labels or self.top_labels): return self._assert_can_draw_ticks() # Get the real map boundaries map_boundary_vertices = self.axes.patch.get_path().vertices map_boundary = sgeom.Polygon(map_boundary_vertices) self._labels = [] if self.x_inline: y_midpoints = self._find_midpoints(lat_lim, lat_ticks) if self.y_inline: x_midpoints = self._find_midpoints(lon_lim, lon_ticks) for lonlat, lines, line_ticks, formatter, label_style in ( ('lon', lon_lines, lon_ticks, self.xformatter, self.xlabel_style), ('lat', lat_lines, lat_ticks, self.yformatter, self.ylabel_style)): formatter.set_locs(line_ticks) for line, tick_value in zip(lines, line_ticks): # Intersection of line with map boundary line = self.axes.projection.transform_points( crs, line[:, 0], line[:, 1])[:, :2] infs = np.isinf(line).any(axis=1) line = line.compress(~infs, axis=0) if line.size == 0: continue line = sgeom.LineString(line) if line.intersects(map_boundary): intersection = line.intersection(map_boundary) del line if intersection.is_empty: continue if isinstance(intersection, sgeom.MultiPoint): if len(intersection) < 2: continue tails = [[(pt.x, pt.y) for pt in intersection[:2]]] heads = [[(pt.x, pt.y) for pt in intersection[-1:-3:-1]]] elif isinstance(intersection, (sgeom.LineString, sgeom.MultiLineString)): if isinstance(intersection, sgeom.LineString): intersection = [intersection] elif len(intersection) > 4: # Gridline and map boundary are parallel # and they intersect themselves too much # it results in a multiline string # that must be converted to a single linestring. # This is an empirical workaround for a problem # that can probably be solved in a cleaner way. xy = np.append(intersection[0], intersection[-1], axis=0) intersection = [sgeom.LineString(xy)] tails = [] heads = [] for inter in intersection: if len(inter.coords) < 2: continue tails.append(inter.coords[:2]) heads.append(inter.coords[-1:-3:-1]) if not tails: continue elif isinstance(intersection, sgeom.collection.GeometryCollection): # This is a collection of Point and LineString that # represent the same gridline. # We only consider the first geometries, merge their # coordinates and keep first two points to get only one # tail ... xy = [] for geom in intersection.geoms: for coord in geom.coords: xy.append(coord) if len(xy) == 2: break if len(xy) == 2: break tails = [xy] # ... and the last geometries, merge their coordinates # and keep last two points to get only one head. xy = [] for geom in reversed(intersection.geoms): for coord in reversed(geom.coords): xy.append(coord) if len(xy) == 2: break if len(xy) == 2: break heads = [xy] else: warnings.warn( 'Unsupported intersection geometry for gridline ' 'labels: ' + intersection.__class__.__name__) continue del intersection # Loop on head and tail and plot label by extrapolation for tail, head in zip(tails, heads): for i, (pt0, pt1) in enumerate([tail, head]): kw, angle, loc = self._segment_to_text_specs( pt0, pt1, lonlat) if not getattr(self, loc+'_labels'): continue kw.update(label_style, bbox={'pad': 0, 'visible': False}) text = formatter(tick_value) if self.y_inline and lonlat == 'lat': # 180 degrees isn't formatted with a # suffix and adds confusion if it's inline if abs(tick_value) == 180: continue x = x_midpoints[i] y = tick_value kw.update(clip_on=True) y_set = True else: x = pt0[0] y_set = False if self.x_inline and lonlat == 'lon': if abs(tick_value) == 180: continue x = tick_value y = y_midpoints[i] kw.update(clip_on=True) elif not y_set: y = pt0[1] tt = self.axes.text(x, y, text, **kw) tt._angle = angle priority = (((lonlat == 'lon') and loc in ('bottom', 'top')) or ((lonlat == 'lat') and loc in ('left', 'right'))) self._labels.append((lonlat, priority, tt)) getattr(self, loc + '_label_artists').append(tt) # Sort labels if self._labels: self._labels.sort(key=operator.itemgetter(0), reverse=True) self._update_labels_visibility(renderer) def _segment_to_text_specs(self, pt0, pt1, lonlat): """Get appropriate kwargs for a label from lon or lat line segment""" x0, y0 = pt0 x1, y1 = pt1 angle = np.arctan2(y0-y1, x0-x1) * 180 / np.pi kw, loc = self._segment_angle_to_text_specs(angle, lonlat) return kw, angle, loc def _text_angle_to_specs_(self, angle, lonlat): """Get specs for a rotated label from its angle in degrees""" angle %= 360 if angle > 180: angle -= 360 if ((self.x_inline and lonlat == 'lon') or (self.y_inline and lonlat == 'lat')): kw = {'rotation': 0, 'rotation_mode': 'anchor', 'ha': 'center', 'va': 'center'} loc = 'bottom' return kw, loc # Default options kw = {'rotation': angle, 'rotation_mode': 'anchor'} # Options that depend in which quarter the angle falls if abs(angle) <= 45: loc = 'right' kw.update(ha='left', va='center') elif abs(angle) >= 135: loc = 'left' kw.update(ha='right', va='center') kw['rotation'] -= np.sign(angle) * 180 elif angle > 45: loc = 'top' kw.update(ha='center', va='bottom', rotation=angle-90) else: loc = 'bottom' kw.update(ha='center', va='top', rotation=angle+90) return kw, loc def _segment_angle_to_text_specs(self, angle, lonlat): """Get appropriate kwargs for a given text angle""" kw, loc = self._text_angle_to_specs_(angle, lonlat) if not self.rotate_labels: angle = {'top': 90., 'right': 0., 'bottom': -90., 'left': 180.}[loc] del kw['rotation'] if ((self.x_inline and lonlat == 'lon') or (self.y_inline and lonlat == 'lat')): kw.update(transform=cartopy.crs.PlateCarree()) else: xpadding = (self.xpadding if self.xpadding is not None else matplotlib.rc_params['xtick.major.pad']) ypadding = (self.ypadding if self.ypadding is not None else matplotlib.rc_params['ytick.major.pad']) dx = ypadding * np.cos(angle * np.pi / 180) dy = xpadding * np.sin(angle * np.pi / 180) transform = mtrans.offset_copy( self.axes.transData, self.axes.figure, x=dx, y=dy, units='dots') kw.update(transform=transform) return kw, loc def _update_labels_visibility(self, renderer): """Update the visibility of each plotted label The following rules apply: - Labels are plotted and checked by order of priority, with a high priority for longitude labels at the bottom and top of the map, and the reverse for latitude labels. - A label must not overlap another label marked as visible. - A label must not overlap the map boundary. - When a label is about to be hidden, other angles are tried in the absolute given limit of max_delta_angle by increments of delta_angle of difference from the original angle. """ if renderer is None or not self._labels: return paths = [] outline_path = None delta_angle = 22.5 max_delta_angle = 45 axes_children = self.axes.get_children() def remove_path_dupes(path): """ Remove duplicate points in a path (zero-length segments). This is necessary only for Matplotlib 3.1.0 -- 3.1.2, because Path.intersects_path incorrectly returns True for any paths with such segments. """ segment_length = np.diff(path.vertices, axis=0) mask = np.logical_or.reduce(segment_length != 0, axis=1) mask = np.append(mask, True) path = mpath.Path(np.compress(mask, path.vertices, axis=0), np.compress(mask, path.codes, axis=0)) return path for lonlat, priority, artist in self._labels: if artist not in axes_children: warnings.warn('The labels of this gridliner do not belong to ' 'the gridliner axes') orig_specs = {'rotation': artist.get_rotation(), 'ha': artist.get_ha(), 'va': artist.get_va()} # Compute angles to try angles = [None] for abs_delta_angle in np.arange(delta_angle, max_delta_angle+1, delta_angle): angles.append(artist._angle + abs_delta_angle) angles.append(artist._angle - abs_delta_angle) # Loop on angles until it works for angle in angles: if ((self.x_inline and lonlat == 'lon') or (self.y_inline and lonlat == 'lat')): angle = 0 if angle is not None: specs, _ = self._segment_angle_to_text_specs(angle, lonlat) artist.update(specs) artist.update_bbox_position_size(renderer) this_patch = artist.get_bbox_patch() this_path = this_patch.get_path().transformed( this_patch.get_transform()) if '3.1.0' <= matplotlib.__version__ <= '3.1.2': this_path = remove_path_dupes(this_path) center = artist.get_transform().transform_point( artist.get_position()) visible = False for path in paths: # Check it does not overlap another label if this_path.intersects_path(path): break else: # Finally check that it does not overlap the map if outline_path is None: outline_path = (self.axes.patch.get_path() .transformed(self.axes.transData)) if '3.1.0' <= matplotlib.__version__ <= '3.1.2': outline_path = remove_path_dupes(outline_path) # Inline must be within the map. if ((lonlat == 'lon' and self.x_inline) or (lonlat == 'lat' and self.y_inline)): # TODO: When Matplotlib clip path works on text, this # clipping can be left to it. if outline_path.contains_point(center): visible = True # Non-inline must not run through the outline. elif not outline_path.intersects_path(this_path): visible = True # Good if visible: break if ((self.x_inline and lonlat == 'lon') or (self.y_inline and lonlat == 'lat')): break # Action artist.set_visible(visible) if not visible: artist.update(orig_specs) else: paths.append(this_path) def _assert_can_draw_ticks(self): """ Check to see if ticks can be drawn. Either returns True or raises an exception. """ # Check labelling is supported, currently a limited set of options. if not isinstance(self.crs, cartopy.crs.PlateCarree): raise TypeError('Cannot label {crs.__class__.__name__} gridlines.' ' Only PlateCarree gridlines are currently ' 'supported.'.format(crs=self.crs)) return True def _axes_domain(self, nx=None, ny=None): """Return lon_range, lat_range""" DEBUG = False transform = self._crs_transform() ax_transform = self.axes.transAxes desired_trans = ax_transform - transform nx = nx or 100 ny = ny or 100 x = np.linspace(1e-9, 1 - 1e-9, nx) y = np.linspace(1e-9, 1 - 1e-9, ny) x, y = np.meshgrid(x, y) coords = np.column_stack((x.ravel(), y.ravel())) in_data = desired_trans.transform(coords) ax_to_bkg_patch = self.axes.transAxes - self.axes.patch.get_transform() # convert the coordinates of the data to the background patches # coordinates background_coord = ax_to_bkg_patch.transform(coords) ok = self.axes.patch.get_path().contains_points(background_coord) if DEBUG: import matplotlib.pyplot as plt plt.plot(coords[ok, 0], coords[ok, 1], 'or', clip_on=False, transform=ax_transform) plt.plot(coords[~ok, 0], coords[~ok, 1], 'ob', clip_on=False, transform=ax_transform) inside = in_data[ok, :] # If there were no data points in the axes we just use the x and y # range of the projection. if inside.size == 0: lon_range = self.crs.x_limits lat_range = self.crs.y_limits else: # np.isfinite must be used to prevent np.inf values that # not filtered by np.nanmax for some projections lat_max = np.compress(np.isfinite(inside[:, 1]), inside[:, 1]) if lat_max.size == 0: lon_range = self.crs.x_limits lat_range = self.crs.y_limits else: lat_max = lat_max.max() lon_range = np.nanmin(inside[:, 0]), np.nanmax(inside[:, 0]) lat_range = np.nanmin(inside[:, 1]), lat_max # XXX Cartopy specific thing. Perhaps make this bit a specialisation # in a subclass... crs = self.crs if isinstance(crs, Projection): lon_range = np.clip(lon_range, *crs.x_limits) lat_range = np.clip(lat_range, *crs.y_limits) # if the limit is >90% of the full x limit, then just use the full # x limit (this makes circular handling better) prct = np.abs(np.diff(lon_range) / np.diff(crs.x_limits)) if prct > 0.9: lon_range = crs.x_limits return lon_range, lat_range
[ "numpy.arctan2", "cartopy.mpl.ticker.LongitudeFormatter", "numpy.isnan", "numpy.clip", "cartopy.mpl.ticker.LongitudeLocator", "numpy.sin", "numpy.arange", "numpy.meshgrid", "matplotlib.transforms.offset_copy", "numpy.logical_or.reduce", "shapely.geometry.Polygon", "matplotlib.ticker.MaxNLocator", "numpy.isfinite", "matplotlib.ticker.FixedLocator", "numpy.append", "shapely.geometry.LineString", "numpy.linspace", "matplotlib.collections.LineCollection", "numpy.isinf", "numpy.percentile", "numpy.cos", "numpy.compress", "cartopy.mpl.ticker.LatitudeFormatter", "cartopy.mpl.ticker.LatitudeLocator", "numpy.nanmax", "matplotlib.pyplot.plot", "numpy.nanmin", "numpy.diff", "numpy.array", "numpy.sign", "cartopy.crs.PlateCarree", "warnings.warn", "operator.itemgetter" ]
[((732, 794), 'matplotlib.ticker.MaxNLocator', 'mticker.MaxNLocator', ([], {'nbins': '(9)', 'steps': '[1, 1.5, 1.8, 2, 3, 6, 10]'}), '(nbins=9, steps=[1, 1.5, 1.8, 2, 3, 6, 10])\n', (751, 794), True, 'import matplotlib.ticker as mticker\n'), ((813, 841), 'matplotlib.ticker.MaxNLocator', 'mticker.MaxNLocator', ([], {'nbins': '(9)'}), '(nbins=9)\n', (832, 841), True, 'import matplotlib.ticker as mticker\n'), ((1327, 1362), 'numpy.array', 'np.array', (['lons'], {'copy': '(False)', 'ndmin': '(1)'}), '(lons, copy=False, ndmin=1)\n', (1335, 1362), True, 'import numpy as np\n'), ((11346, 11467), 'warnings.warn', 'warnings.warn', (['"""The .xlabels_top attribute is deprecated. Please use .top_labels to toggle visibility instead."""'], {}), "(\n 'The .xlabels_top attribute is deprecated. Please use .top_labels to toggle visibility instead.'\n )\n", (11359, 11467), False, 'import warnings\n'), ((11581, 11702), 'warnings.warn', 'warnings.warn', (['"""The .xlabels_top attribute is deprecated. Please use .top_labels to toggle visibility instead."""'], {}), "(\n 'The .xlabels_top attribute is deprecated. Please use .top_labels to toggle visibility instead.'\n )\n", (11594, 11702), False, 'import warnings\n'), ((11803, 11930), 'warnings.warn', 'warnings.warn', (['"""The .xlabels_bottom attribute is deprecated. Please use .bottom_labels to toggle visibility instead."""'], {}), "(\n 'The .xlabels_bottom attribute is deprecated. Please use .bottom_labels to toggle visibility instead.'\n )\n", (11816, 11930), False, 'import warnings\n'), ((12053, 12180), 'warnings.warn', 'warnings.warn', (['"""The .xlabels_bottom attribute is deprecated. Please use .bottom_labels to toggle visibility instead."""'], {}), "(\n 'The .xlabels_bottom attribute is deprecated. Please use .bottom_labels to toggle visibility instead.'\n )\n", (12066, 12180), False, 'import warnings\n'), ((12282, 12405), 'warnings.warn', 'warnings.warn', (['"""The .ylabels_left attribute is deprecated. Please use .left_labels to toggle visibility instead."""'], {}), "(\n 'The .ylabels_left attribute is deprecated. Please use .left_labels to toggle visibility instead.'\n )\n", (12295, 12405), False, 'import warnings\n'), ((12522, 12645), 'warnings.warn', 'warnings.warn', (['"""The .ylabels_left attribute is deprecated. Please use .left_labels to toggle visibility instead."""'], {}), "(\n 'The .ylabels_left attribute is deprecated. Please use .left_labels to toggle visibility instead.'\n )\n", (12535, 12645), False, 'import warnings\n'), ((12746, 12871), 'warnings.warn', 'warnings.warn', (['"""The .ylabels_right attribute is deprecated. Please use .right_labels to toggle visibility instead."""'], {}), "(\n 'The .ylabels_right attribute is deprecated. Please use .right_labels to toggle visibility instead.'\n )\n", (12759, 12871), False, 'import warnings\n'), ((12991, 13116), 'warnings.warn', 'warnings.warn', (['"""The .ylabels_right attribute is deprecated. Please use .right_labels to toggle visibility instead."""'], {}), "(\n 'The .ylabels_right attribute is deprecated. Please use .right_labels to toggle visibility instead.'\n )\n", (13004, 13116), False, 'import warnings\n'), ((13980, 13994), 'numpy.isnan', 'np.isnan', (['base'], {}), '(base)\n', (13988, 13994), True, 'import numpy as np\n'), ((18528, 18564), 'shapely.geometry.Polygon', 'sgeom.Polygon', (['map_boundary_vertices'], {}), '(map_boundary_vertices)\n', (18541, 18564), True, 'import shapely.geometry as sgeom\n'), ((33743, 33776), 'numpy.linspace', 'np.linspace', (['(1e-09)', '(1 - 1e-09)', 'nx'], {}), '(1e-09, 1 - 1e-09, nx)\n', (33754, 33776), True, 'import numpy as np\n'), ((33787, 33820), 'numpy.linspace', 'np.linspace', (['(1e-09)', '(1 - 1e-09)', 'ny'], {}), '(1e-09, 1 - 1e-09, ny)\n', (33798, 33820), True, 'import numpy as np\n'), ((33834, 33851), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (33845, 33851), True, 'import numpy as np\n'), ((16474, 16493), 'numpy.array', 'np.array', (['lon_ticks'], {}), '(lon_ticks)\n', (16482, 16493), True, 'import numpy as np\n'), ((16538, 16576), 'numpy.linspace', 'np.linspace', (['lat_min', 'lat_max', 'n_steps'], {}), '(lat_min, lat_max, n_steps)\n', (16549, 16576), True, 'import numpy as np\n'), ((17096, 17155), 'matplotlib.collections.LineCollection', 'mcollections.LineCollection', (['lon_lines'], {}), '(lon_lines, **collection_kwargs)\n', (17123, 17155), True, 'import matplotlib.collections as mcollections\n'), ((17579, 17617), 'numpy.linspace', 'np.linspace', (['lon_min', 'lon_max', 'n_steps'], {}), '(lon_min, lon_max, n_steps)\n', (17590, 17617), True, 'import numpy as np\n'), ((17703, 17722), 'numpy.array', 'np.array', (['lat_ticks'], {}), '(lat_ticks)\n', (17711, 17722), True, 'import numpy as np\n'), ((17783, 17842), 'matplotlib.collections.LineCollection', 'mcollections.LineCollection', (['lat_lines'], {}), '(lat_lines, **collection_kwargs)\n', (17810, 17842), True, 'import matplotlib.collections as mcollections\n'), ((27935, 28023), 'matplotlib.transforms.offset_copy', 'mtrans.offset_copy', (['self.axes.transData', 'self.axes.figure'], {'x': 'dx', 'y': 'dy', 'units': '"""dots"""'}), "(self.axes.transData, self.axes.figure, x=dx, y=dy, units\n ='dots')\n", (27953, 28023), True, 'import matplotlib.transforms as mtrans\n'), ((29364, 29394), 'numpy.diff', 'np.diff', (['path.vertices'], {'axis': '(0)'}), '(path.vertices, axis=0)\n', (29371, 29394), True, 'import numpy as np\n'), ((29414, 29463), 'numpy.logical_or.reduce', 'np.logical_or.reduce', (['(segment_length != 0)'], {'axis': '(1)'}), '(segment_length != 0, axis=1)\n', (29434, 29463), True, 'import numpy as np\n'), ((29483, 29504), 'numpy.append', 'np.append', (['mask', '(True)'], {}), '(mask, True)\n', (29492, 29504), True, 'import numpy as np\n'), ((30160, 30216), 'numpy.arange', 'np.arange', (['delta_angle', '(max_delta_angle + 1)', 'delta_angle'], {}), '(delta_angle, max_delta_angle + 1, delta_angle)\n', (30169, 30216), True, 'import numpy as np\n'), ((34347, 34435), 'matplotlib.pyplot.plot', 'plt.plot', (['coords[ok, 0]', 'coords[ok, 1]', '"""or"""'], {'clip_on': '(False)', 'transform': 'ax_transform'}), "(coords[ok, 0], coords[ok, 1], 'or', clip_on=False, transform=\n ax_transform)\n", (34355, 34435), True, 'import matplotlib.pyplot as plt\n'), ((34464, 34554), 'matplotlib.pyplot.plot', 'plt.plot', (['coords[~ok, 0]', 'coords[~ok, 1]', '"""ob"""'], {'clip_on': '(False)', 'transform': 'ax_transform'}), "(coords[~ok, 0], coords[~ok, 1], 'ob', clip_on=False, transform=\n ax_transform)\n", (34472, 34554), True, 'import matplotlib.pyplot as plt\n'), ((35595, 35628), 'numpy.clip', 'np.clip', (['lon_range', '*crs.x_limits'], {}), '(lon_range, *crs.x_limits)\n', (35602, 35628), True, 'import numpy as np\n'), ((35653, 35686), 'numpy.clip', 'np.clip', (['lat_range', '*crs.y_limits'], {}), '(lat_range, *crs.y_limits)\n', (35660, 35686), True, 'import numpy as np\n'), ((6922, 6952), 'matplotlib.ticker.FixedLocator', 'mticker.FixedLocator', (['xlocator'], {}), '(xlocator)\n', (6942, 6952), True, 'import matplotlib.ticker as mticker\n'), ((7073, 7098), 'cartopy.mpl.ticker.LongitudeLocator', 'LongitudeLocator', ([], {'dms': 'dms'}), '(dms=dms)\n', (7089, 7098), False, 'from cartopy.mpl.ticker import LongitudeLocator, LatitudeLocator, LongitudeFormatter, LatitudeFormatter\n'), ((7377, 7407), 'matplotlib.ticker.FixedLocator', 'mticker.FixedLocator', (['ylocator'], {}), '(ylocator)\n', (7397, 7407), True, 'import matplotlib.ticker as mticker\n'), ((7528, 7552), 'cartopy.mpl.ticker.LatitudeLocator', 'LatitudeLocator', ([], {'dms': 'dms'}), '(dms=dms)\n', (7543, 7552), False, 'from cartopy.mpl.ticker import LongitudeLocator, LatitudeLocator, LongitudeFormatter, LatitudeFormatter\n'), ((7729, 7756), 'cartopy.mpl.ticker.LongitudeFormatter', 'LongitudeFormatter', ([], {'dms': 'dms'}), '(dms=dms)\n', (7747, 7756), False, 'from cartopy.mpl.ticker import LongitudeLocator, LatitudeLocator, LongitudeFormatter, LatitudeFormatter\n'), ((8059, 8085), 'cartopy.mpl.ticker.LatitudeFormatter', 'LatitudeFormatter', ([], {'dms': 'dms'}), '(dms=dms)\n', (8076, 8085), False, 'from cartopy.mpl.ticker import LongitudeLocator, LatitudeLocator, LongitudeFormatter, LatitudeFormatter\n'), ((14468, 14490), 'numpy.percentile', 'np.percentile', (['lim', 'lq'], {}), '(lim, lq)\n', (14481, 14490), True, 'import numpy as np\n'), ((14532, 14554), 'numpy.percentile', 'np.percentile', (['lim', 'uq'], {}), '(lim, uq)\n', (14545, 14554), True, 'import numpy as np\n'), ((19517, 19539), 'shapely.geometry.LineString', 'sgeom.LineString', (['line'], {}), '(line)\n', (19533, 19539), True, 'import shapely.geometry as sgeom\n'), ((25743, 25771), 'numpy.arctan2', 'np.arctan2', (['(y0 - y1)', '(x0 - x1)'], {}), '(y0 - y1, x0 - x1)\n', (25753, 25771), True, 'import numpy as np\n'), ((27827, 27854), 'numpy.cos', 'np.cos', (['(angle * np.pi / 180)'], {}), '(angle * np.pi / 180)\n', (27833, 27854), True, 'import numpy as np\n'), ((27883, 27910), 'numpy.sin', 'np.sin', (['(angle * np.pi / 180)'], {}), '(angle * np.pi / 180)\n', (27889, 27910), True, 'import numpy as np\n'), ((29535, 29575), 'numpy.compress', 'np.compress', (['mask', 'path.vertices'], {'axis': '(0)'}), '(mask, path.vertices, axis=0)\n', (29546, 29575), True, 'import numpy as np\n'), ((29607, 29644), 'numpy.compress', 'np.compress', (['mask', 'path.codes'], {'axis': '(0)'}), '(mask, path.codes, axis=0)\n', (29618, 29644), True, 'import numpy as np\n'), ((29786, 29872), 'warnings.warn', 'warnings.warn', (['"""The labels of this gridliner do not belong to the gridliner axes"""'], {}), "(\n 'The labels of this gridliner do not belong to the gridliner axes')\n", (29799, 29872), False, 'import warnings\n'), ((35006, 35031), 'numpy.isfinite', 'np.isfinite', (['inside[:, 1]'], {}), '(inside[:, 1])\n', (35017, 35031), True, 'import numpy as np\n'), ((25459, 25481), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (25478, 25481), False, 'import operator\n'), ((26730, 26744), 'numpy.sign', 'np.sign', (['angle'], {}), '(angle)\n', (26737, 26744), True, 'import numpy as np\n'), ((27484, 27509), 'cartopy.crs.PlateCarree', 'cartopy.crs.PlateCarree', ([], {}), '()\n', (27507, 27509), False, 'import cartopy\n'), ((35293, 35316), 'numpy.nanmin', 'np.nanmin', (['inside[:, 0]'], {}), '(inside[:, 0])\n', (35302, 35316), True, 'import numpy as np\n'), ((35318, 35341), 'numpy.nanmax', 'np.nanmax', (['inside[:, 0]'], {}), '(inside[:, 0])\n', (35327, 35341), True, 'import numpy as np\n'), ((35370, 35393), 'numpy.nanmin', 'np.nanmin', (['inside[:, 1]'], {}), '(inside[:, 1])\n', (35379, 35393), True, 'import numpy as np\n'), ((35853, 35871), 'numpy.diff', 'np.diff', (['lon_range'], {}), '(lon_range)\n', (35860, 35871), True, 'import numpy as np\n'), ((35874, 35895), 'numpy.diff', 'np.diff', (['crs.x_limits'], {}), '(crs.x_limits)\n', (35881, 35895), True, 'import numpy as np\n'), ((14216, 14230), 'numpy.diff', 'np.diff', (['ticks'], {}), '(ticks)\n', (14223, 14230), True, 'import numpy as np\n'), ((17001, 17017), 'numpy.diff', 'np.diff', (['lon_lim'], {}), '(lon_lim)\n', (17008, 17017), True, 'import numpy as np\n'), ((17026, 17047), 'numpy.diff', 'np.diff', (['crs.x_limits'], {}), '(crs.x_limits)\n', (17033, 17047), True, 'import numpy as np\n'), ((19351, 19365), 'numpy.isinf', 'np.isinf', (['line'], {}), '(line)\n', (19359, 19365), True, 'import numpy as np\n'), ((22950, 23060), 'warnings.warn', 'warnings.warn', (["('Unsupported intersection geometry for gridline labels: ' + intersection.\n __class__.__name__)"], {}), "('Unsupported intersection geometry for gridline labels: ' +\n intersection.__class__.__name__)\n", (22963, 23060), False, 'import warnings\n'), ((20900, 20952), 'numpy.append', 'np.append', (['intersection[0]', 'intersection[-1]'], {'axis': '(0)'}), '(intersection[0], intersection[-1], axis=0)\n', (20909, 20952), True, 'import numpy as np\n'), ((21040, 21060), 'shapely.geometry.LineString', 'sgeom.LineString', (['xy'], {}), '(xy)\n', (21056, 21060), True, 'import shapely.geometry as sgeom\n')]
import math import subprocess import einops as eo from loguru import logger import numpy as np import pandas as pd from PIL import Image from scipy.signal import savgol_filter import torch from torch import optim, nn from collections import Counter from pytti import ( format_input, set_t, print_vram_usage, freeze_vram_usage, vram_usage_mode, ) from pytti.Image import DifferentiableImage, PixelImage from pytti.Notebook import tqdm, make_hbox from pytti.rotoscoper import update_rotoscopers from pytti.Transforms import ( animate_2d, zoom_3d, animate_video_source, ) # deprecate this from labellines import labelLines from IPython import display def unpack_dict(D, n=2): """ Given a dictionary D and a number n, return a tuple of n dictionaries, each containing the same keys as D and values corresponding to those values of D at the corresponding index :param D: a dictionary :param n: number of samples to draw, defaults to 2 (optional) :return: A tuple of dictionaries. """ ds = [{k: V[i] for k, V in D.items()} for i in range(n)] return tuple(ds) # this only gets used in the plot_losses method below. # deprecate (tensorboard) def smooth_dataframe(df, window_size): """applies a moving average filter to the columns of df""" smoothed_df = pd.DataFrame().reindex_like(df) for key in df.columns: smoothed_df[key] = savgol_filter(df[key], window_size, 2, mode="nearest") return smoothed_df class DirectImageGuide: """ Image guide that uses an optimizer and torch autograd to optimize an image representation Based on the BigGan+CLIP algorithm by advadnoun (https://twitter.com/advadnoun) image_rep: (DifferentiableImage) image representation embedder: (Module) image embedder optimizer: (Class) optimizer class to use. Defaults to Adam all other arguments are passed as kwargs to the optimizer. """ def __init__( self, image_rep: DifferentiableImage, embedder: nn.Module, optimizer: optim.Optimizer = None, lr: float = None, null_update=True, params=None, writer=None, OUTPATH=None, base_name=None, fig=None, axs=None, video_frames=None, optical_flows=None, stabilization_augs=None, last_frame_semantic=None, semantic_init_prompt=None, init_augs=None, **optimizer_params, # pretty sure passing in optimizer_params isn't being used anywhere # We pass in the optimizer object itself anyway... why not just give it # initialize it with `**optimizer_params`` before passing it to this? ): self.image_rep = image_rep self.embedder = embedder if lr is None: lr = image_rep.lr optimizer_params["lr"] = lr self.optimizer_params = optimizer_params if optimizer is None: self.optimizer = optim.Adam(image_rep.parameters(), **optimizer_params) else: self.optimizer = optimizer self.dataframe = [] self.null_update = null_update self.params = params self.writer = writer self.OUTPATH = OUTPATH self.base_name = base_name self.fig = fig self.axs = axs self.video_frames = video_frames self.optical_flows = optical_flows if stabilization_augs is None: stabilization_augs = [] self.stabilization_augs = stabilization_augs self.last_frame_semantic = last_frame_semantic self.semantic_init_prompt = semantic_init_prompt if init_augs is None: init_augs = [] self.init_augs = init_augs def run_steps( self, n_steps, prompts, interp_prompts, loss_augs, stop=-math.inf, interp_steps=0, i_offset=0, skipped_steps=0, gradient_accumulation_steps: int = 1, ): """ runs the optimizer prompts: (ClipPrompt list) list of prompts n_steps: (positive integer) steps to run returns: the number of steps run """ for i in tqdm(range(n_steps)): # not a huge fan of this. # currently need it for PixelImage.encode_image # TO DO: all that stuff we just moved around: # let's attach it to a "Renderer" class, # and here we can check if the DirectImageGuide was # initialized with a renderer or not, and call self.renderer.update() # if appropriate if not self.null_update: self.update(i + i_offset, i + skipped_steps) losses = self.train( i + skipped_steps, prompts, interp_prompts, loss_augs, interp_steps=interp_steps, gradient_accumulation_steps=gradient_accumulation_steps, ) if losses["TOTAL"] <= stop: break return i + 1 def set_optim(self, opt=None): if opt is not None: self.optimizer = opt else: self.optimizer = optim.Adam( self.image_rep.parameters(), **self.optimizer_params ) def clear_dataframe(self): """ The .dataframe attribute is just a list of pd.DataFrames that are tracking losses for the current scene. I wanna say one for each prompt. To do: flush all that out and let tensorboard handle it. """ self.dataframe = [] # deprecate (tensorboard) def plot_losses(self, axs): def plot_dataframe(df, ax, legend=False): keys = list(df) keys.sort(reverse=True, key=lambda k: df[k].iloc[-1]) ax.clear() df[keys].plot(ax=ax, legend=legend) if legend: ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") ax.tick_params( labelbottom=True, labeltop=False, labelleft=True, labelright=False, bottom=True, top=False, left=True, right=False, ) last_x = df.last_valid_index() lines = ax.get_lines() colors = [l.get_color() for l in lines] labels = [l.get_label() for l in lines] ax.relim() ax.autoscale_view() labelLines(ax.get_lines(), align=False) return dict(zip(labels, colors)) dfs = self.dataframe[:] if dfs != []: dfs[0] = smooth_dataframe(dfs[0], 17) for i, (df, ax) in enumerate(zip(dfs, axs)): if len(df.index) < 2: return False # m = df.apply(lambda col: col.first_valid_index()) # print(m) # print(df.lookup(m, m.index)) # rel_loss = (df-df.lookup(m, m.index)) if not df.empty: plot_dataframe(df, ax, legend=i == 0) ax.set_ylabel("Loss") ax.set_xlabel("Step") return True def train( self, i, prompts, interp_prompts, loss_augs, interp_steps=0, save_loss=True, gradient_accumulation_steps: int = 1, ): """ steps the optimizer promts: (ClipPrompt list) list of prompts """ self.optimizer.zero_grad() z = self.image_rep.decode_training_tensor() # logger.debug(z.shape) # [1, 3, height, width] losses = [] aug_losses = { aug: aug(format_input(z, self.image_rep, aug), self.image_rep) for aug in loss_augs } image_augs = self.image_rep.image_loss() image_losses = {aug: aug(self.image_rep) for aug in image_augs} # losses_accumulator, losses_raw_accumulator = Counter(), Counter() losses, losses_raw = [], [] # just... don't care total_loss = 0 if self.embedder is not None: for mb_i in range(gradient_accumulation_steps): # logger.debug(mb_i) image_embeds, offsets, sizes = self.embedder(self.image_rep, input=z) t = 1 interp_losses = [0] if i < interp_steps: t = i / interp_steps interp_losses = [ prompt( format_input(image_embeds, self.embedder, prompt), format_input(offsets, self.embedder, prompt), format_input(sizes, self.embedder, prompt), )[0] * (1 - t) for prompt in interp_prompts ] prompt_losses = { prompt: prompt( format_input(image_embeds, self.embedder, prompt), format_input(offsets, self.embedder, prompt), format_input(sizes, self.embedder, prompt), ) for prompt in prompts } losses, losses_raw = zip( *map(unpack_dict, [prompt_losses, aug_losses, image_losses]) # *map(unpack_dict, [prompt_losses]) ) # logger.debug(losses) losses = list(losses) # logger.debug(losses) # losses = Counter(losses) # logger.debug(losses) losses_raw = list(losses_raw) # losses_raw = Counter(losses_raw) # losses_accumulator += losses # losses_raw_accumulator += losses_raw for v in prompt_losses.values(): v[0].mul_(t) total_loss_mb = sum(map(lambda x: sum(x.values()), losses)) + sum( interp_losses ) total_loss_mb /= gradient_accumulation_steps # total_loss_mb.backward() total_loss_mb.backward(retain_graph=True) # total_loss += total_loss_mb # this is causing it to break # total_loss = total_loss_mb # losses = [{k:v} for k,v in losses_accumulator.items()] # losses_raw = [{k:v} for k,v in losses_raw_accumulator.items()] losses_raw.append({"TOTAL": total_loss}) # this needs to be fixed self.optimizer.step() self.image_rep.update() self.optimizer.zero_grad() # if t != 0: # for v in prompt_losses.values(): # v[0].div_(t) if save_loss: if not self.dataframe: self.dataframe = [ pd.DataFrame({str(k): float(v) for k, v in loss.items()}, index=[i]) for loss in losses_raw ] for df in self.dataframe: df.index.name = "Step" else: for j, (df, loss) in enumerate(zip(self.dataframe, losses_raw)): self.dataframe[j] = df.append( pd.DataFrame( {str(k): float(v) for k, v in loss.items()}, index=[i] ), ignore_index=False, ) self.dataframe[j].name = "Step" return {"TOTAL": float(total_loss)} def report_out( self, i, stage_i, # model, writer, fig, # default to None... axs, # default to None... clear_every, display_every, approximate_vram_usage, display_scale, show_graphs, show_palette, ): model = self img = self.image_rep # pretty sure this is right # DM: I bet this could be abstracted out into a report_out() function or whatever if clear_every > 0 and i > 0 and i % clear_every == 0: display.clear_output() if display_every > 0 and i % display_every == 0: logger.debug(f"Step {i} losses:") if model.dataframe: rec = model.dataframe[0].iloc[-1] logger.debug(rec) for k, v in rec.iteritems(): writer.add_scalar(tag=f"losses/{k}", scalar_value=v, global_step=i) # does this VRAM stuff even do anything? if approximate_vram_usage: logger.debug("VRAM Usage:") print_vram_usage() # update this function to use logger # update this stuff to use/rely on tensorboard display_width = int(img.image_shape[0] * display_scale) display_height = int(img.image_shape[1] * display_scale) if stage_i > 0 and show_graphs: model.plot_losses(axs) im = img.decode_image() sidebyside = make_hbox( im.resize((display_width, display_height), Image.LANCZOS), fig, ) display.display(sidebyside) else: im = img.decode_image() display.display( im.resize((display_width, display_height), Image.LANCZOS) ) if show_palette and isinstance(img, PixelImage): logger.debug("Palette:") display.display(img.render_pallet()) def save_out( self, i, # img, writer, OUTPATH, base_name, save_every, file_namespace, backups, ): img = self.image_rep # save # if i > 0 and save_every > 0 and i % save_every == 0: if i > 0 and save_every > 0 and (i + 1) % save_every == 0: im = ( img.decode_image() ) # let's turn this into a property so decoding is cheap # n = i // save_every n = (i + 1) // save_every filename = f"{OUTPATH}/{file_namespace}/{base_name}_{n}.png" logger.debug(filename) im.save(filename) im_np = np.array(im) writer.add_image( tag="pytti output", # img_tensor=filename, # thought this would work? img_tensor=im_np, global_step=i, dataformats="HWC", # this was the key ) if backups > 0: filename = f"backup/{file_namespace}/{base_name}_{n}.bak" torch.save(img.state_dict(), filename) if n > backups: # YOOOOOOO let's not start shell processes unnecessarily # and then execute commands using string interpolation. # Replace this with a pythonic folder removal, then see # if we can't deprecate the folder removal entirely. What # is the purpose of "backups" here? Just use the frames that # are being written to disk. subprocess.run( [ "rm", f"backup/{file_namespace}/{base_name}_{n-backups}.bak", ] ) def update( self, # params, # move to class i, stage_i, ): """ Orchestrates animation transformations and reporting """ # logger.debug("model.update called") # ... I have regrets. params = self.params writer = self.writer OUTPATH = self.OUTPATH base_name = self.base_name fig = self.fig axs = self.axs video_frames = self.video_frames optical_flows = self.optical_flows stabilization_augs = self.stabilization_augs last_frame_semantic = self.last_frame_semantic semantic_init_prompt = self.semantic_init_prompt init_augs = self.init_augs model = self img = self.image_rep embedder = self.embedder model.report_out( i=i, stage_i=stage_i, # model=model, writer=writer, fig=fig, # default to None... axs=axs, # default to None... clear_every=params.clear_every, display_every=params.display_every, approximate_vram_usage=params.approximate_vram_usage, display_scale=params.display_scale, show_graphs=params.show_graphs, show_palette=params.show_palette, ) model.save_out( i=i, # img=img, writer=writer, OUTPATH=OUTPATH, base_name=base_name, save_every=params.save_every, file_namespace=params.file_namespace, backups=params.backups, ) # animate ################ ## TO DO: attach T as a class attribute t = (i - params.pre_animation_steps) / ( params.steps_per_frame * params.frames_per_second ) set_t(t) # this won't need to be a thing with `t`` attached to the class if i >= params.pre_animation_steps: # next_step_pil = None if (i - params.pre_animation_steps) % params.steps_per_frame == 0: logger.debug(f"Time: {t:.4f} seconds") update_rotoscopers( ((i - params.pre_animation_steps) // params.steps_per_frame + 1) * params.frame_stride ) if params.reset_lr_each_frame: model.set_optim(None) if params.animation_mode == "2D": next_step_pil = animate_2d( translate_y=params.translate_y, translate_x=params.translate_x, rotate_2d=params.rotate_2d, zoom_x_2d=params.zoom_x_2d, zoom_y_2d=params.zoom_y_2d, infill_mode=params.infill_mode, sampling_mode=params.sampling_mode, writer=writer, i=i, img=img, t=t, # just here for logging ) ########################### elif params.animation_mode == "3D": try: im except NameError: im = img.decode_image() with vram_usage_mode("Optical Flow Loss"): # zoom_3d -> rename to animate_3d or transform_3d flow, next_step_pil = zoom_3d( img, ( params.translate_x, params.translate_y, params.translate_z_3d, ), params.rotate_3d, params.field_of_view, params.near_plane, params.far_plane, border_mode=params.infill_mode, sampling_mode=params.sampling_mode, stabilize=params.lock_camera, ) freeze_vram_usage() for optical_flow in optical_flows: optical_flow.set_last_step(im) optical_flow.set_target_flow(flow) optical_flow.set_enabled(True) elif params.animation_mode == "Video Source": flow_im, next_step_pil = animate_video_source( i=i, img=img, video_frames=video_frames, optical_flows=optical_flows, base_name=base_name, pre_animation_steps=params.pre_animation_steps, frame_stride=params.frame_stride, steps_per_frame=params.steps_per_frame, file_namespace=params.file_namespace, reencode_each_frame=params.reencode_each_frame, lock_palette=params.lock_palette, save_every=params.save_every, infill_mode=params.infill_mode, sampling_mode=params.sampling_mode, ) if params.animation_mode != "off": try: for aug in stabilization_augs: aug.set_comp(next_step_pil) aug.set_enabled(True) if last_frame_semantic is not None: last_frame_semantic.set_image(embedder, next_step_pil) last_frame_semantic.set_enabled(True) for aug in init_augs: aug.set_enabled(False) if semantic_init_prompt is not None: semantic_init_prompt.set_enabled(False) except UnboundLocalError: logger.critical( "\n\n-----< PYTTI-TOOLS > ------" "If you are seeing this error, it might mean " "you are using an option that expects you have " "provided an init_image or video_file.\n\nIf you " "think you are seeing this message in error, please " "file an issue here: " "https://github.com/pytti-tools/pytti-core/issues/new" "-----< PYTTI-TOOLS > ------\n\n" ) raise
[ "pandas.DataFrame", "subprocess.run", "scipy.signal.savgol_filter", "loguru.logger.debug", "IPython.display.display", "pytti.rotoscoper.update_rotoscopers", "pytti.vram_usage_mode", "pytti.Transforms.animate_2d", "pytti.Transforms.zoom_3d", "pytti.format_input", "numpy.array", "pytti.freeze_vram_usage", "loguru.logger.critical", "pytti.Transforms.animate_video_source", "IPython.display.clear_output", "pytti.print_vram_usage", "pytti.set_t" ]
[((1422, 1476), 'scipy.signal.savgol_filter', 'savgol_filter', (['df[key]', 'window_size', '(2)'], {'mode': '"""nearest"""'}), "(df[key], window_size, 2, mode='nearest')\n", (1435, 1476), False, 'from scipy.signal import savgol_filter\n'), ((17269, 17277), 'pytti.set_t', 'set_t', (['t'], {}), '(t)\n', (17274, 17277), False, 'from pytti import format_input, set_t, print_vram_usage, freeze_vram_usage, vram_usage_mode\n'), ((1336, 1350), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1348, 1350), True, 'import pandas as pd\n'), ((12133, 12155), 'IPython.display.clear_output', 'display.clear_output', ([], {}), '()\n', (12153, 12155), False, 'from IPython import display\n'), ((12226, 12259), 'loguru.logger.debug', 'logger.debug', (['f"""Step {i} losses:"""'], {}), "(f'Step {i} losses:')\n", (12238, 12259), False, 'from loguru import logger\n'), ((14220, 14242), 'loguru.logger.debug', 'logger.debug', (['filename'], {}), '(filename)\n', (14232, 14242), False, 'from loguru import logger\n'), ((14294, 14306), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (14302, 14306), True, 'import numpy as np\n'), ((7750, 7786), 'pytti.format_input', 'format_input', (['z', 'self.image_rep', 'aug'], {}), '(z, self.image_rep, aug)\n', (7762, 7786), False, 'from pytti import format_input, set_t, print_vram_usage, freeze_vram_usage, vram_usage_mode\n'), ((12358, 12375), 'loguru.logger.debug', 'logger.debug', (['rec'], {}), '(rec)\n', (12370, 12375), False, 'from loguru import logger\n'), ((12618, 12645), 'loguru.logger.debug', 'logger.debug', (['"""VRAM Usage:"""'], {}), "('VRAM Usage:')\n", (12630, 12645), False, 'from loguru import logger\n'), ((12662, 12680), 'pytti.print_vram_usage', 'print_vram_usage', ([], {}), '()\n', (12678, 12680), False, 'from pytti import format_input, set_t, print_vram_usage, freeze_vram_usage, vram_usage_mode\n'), ((13216, 13243), 'IPython.display.display', 'display.display', (['sidebyside'], {}), '(sidebyside)\n', (13231, 13243), False, 'from IPython import display\n'), ((13508, 13532), 'loguru.logger.debug', 'logger.debug', (['"""Palette:"""'], {}), "('Palette:')\n", (13520, 13532), False, 'from loguru import logger\n'), ((17517, 17555), 'loguru.logger.debug', 'logger.debug', (['f"""Time: {t:.4f} seconds"""'], {}), "(f'Time: {t:.4f} seconds')\n", (17529, 17555), False, 'from loguru import logger\n'), ((17572, 17683), 'pytti.rotoscoper.update_rotoscopers', 'update_rotoscopers', (['(((i - params.pre_animation_steps) // params.steps_per_frame + 1) * params.\n frame_stride)'], {}), '(((i - params.pre_animation_steps) // params.\n steps_per_frame + 1) * params.frame_stride)\n', (17590, 17683), False, 'from pytti.rotoscoper import update_rotoscopers\n'), ((15221, 15306), 'subprocess.run', 'subprocess.run', (["['rm', f'backup/{file_namespace}/{base_name}_{n - backups}.bak']"], {}), "(['rm', f'backup/{file_namespace}/{base_name}_{n - backups}.bak']\n )\n", (15235, 15306), False, 'import subprocess\n'), ((17914, 18188), 'pytti.Transforms.animate_2d', 'animate_2d', ([], {'translate_y': 'params.translate_y', 'translate_x': 'params.translate_x', 'rotate_2d': 'params.rotate_2d', 'zoom_x_2d': 'params.zoom_x_2d', 'zoom_y_2d': 'params.zoom_y_2d', 'infill_mode': 'params.infill_mode', 'sampling_mode': 'params.sampling_mode', 'writer': 'writer', 'i': 'i', 'img': 'img', 't': 't'}), '(translate_y=params.translate_y, translate_x=params.translate_x,\n rotate_2d=params.rotate_2d, zoom_x_2d=params.zoom_x_2d, zoom_y_2d=\n params.zoom_y_2d, infill_mode=params.infill_mode, sampling_mode=params.\n sampling_mode, writer=writer, i=i, img=img, t=t)\n', (17924, 18188), False, 'from pytti.Transforms import animate_2d, zoom_3d, animate_video_source\n'), ((9013, 9062), 'pytti.format_input', 'format_input', (['image_embeds', 'self.embedder', 'prompt'], {}), '(image_embeds, self.embedder, prompt)\n', (9025, 9062), False, 'from pytti import format_input, set_t, print_vram_usage, freeze_vram_usage, vram_usage_mode\n'), ((9088, 9132), 'pytti.format_input', 'format_input', (['offsets', 'self.embedder', 'prompt'], {}), '(offsets, self.embedder, prompt)\n', (9100, 9132), False, 'from pytti import format_input, set_t, print_vram_usage, freeze_vram_usage, vram_usage_mode\n'), ((9158, 9200), 'pytti.format_input', 'format_input', (['sizes', 'self.embedder', 'prompt'], {}), '(sizes, self.embedder, prompt)\n', (9170, 9200), False, 'from pytti import format_input, set_t, print_vram_usage, freeze_vram_usage, vram_usage_mode\n'), ((18751, 18787), 'pytti.vram_usage_mode', 'vram_usage_mode', (['"""Optical Flow Loss"""'], {}), "('Optical Flow Loss')\n", (18766, 18787), False, 'from pytti import format_input, set_t, print_vram_usage, freeze_vram_usage, vram_usage_mode\n'), ((18909, 19176), 'pytti.Transforms.zoom_3d', 'zoom_3d', (['img', '(params.translate_x, params.translate_y, params.translate_z_3d)', 'params.rotate_3d', 'params.field_of_view', 'params.near_plane', 'params.far_plane'], {'border_mode': 'params.infill_mode', 'sampling_mode': 'params.sampling_mode', 'stabilize': 'params.lock_camera'}), '(img, (params.translate_x, params.translate_y, params.translate_z_3d\n ), params.rotate_3d, params.field_of_view, params.near_plane, params.\n far_plane, border_mode=params.infill_mode, sampling_mode=params.\n sampling_mode, stabilize=params.lock_camera)\n', (18916, 19176), False, 'from pytti.Transforms import animate_2d, zoom_3d, animate_video_source\n'), ((19592, 19611), 'pytti.freeze_vram_usage', 'freeze_vram_usage', ([], {}), '()\n', (19609, 19611), False, 'from pytti import format_input, set_t, print_vram_usage, freeze_vram_usage, vram_usage_mode\n'), ((19946, 20426), 'pytti.Transforms.animate_video_source', 'animate_video_source', ([], {'i': 'i', 'img': 'img', 'video_frames': 'video_frames', 'optical_flows': 'optical_flows', 'base_name': 'base_name', 'pre_animation_steps': 'params.pre_animation_steps', 'frame_stride': 'params.frame_stride', 'steps_per_frame': 'params.steps_per_frame', 'file_namespace': 'params.file_namespace', 'reencode_each_frame': 'params.reencode_each_frame', 'lock_palette': 'params.lock_palette', 'save_every': 'params.save_every', 'infill_mode': 'params.infill_mode', 'sampling_mode': 'params.sampling_mode'}), '(i=i, img=img, video_frames=video_frames, optical_flows\n =optical_flows, base_name=base_name, pre_animation_steps=params.\n pre_animation_steps, frame_stride=params.frame_stride, steps_per_frame=\n params.steps_per_frame, file_namespace=params.file_namespace,\n reencode_each_frame=params.reencode_each_frame, lock_palette=params.\n lock_palette, save_every=params.save_every, infill_mode=params.\n infill_mode, sampling_mode=params.sampling_mode)\n', (19966, 20426), False, 'from pytti.Transforms import animate_2d, zoom_3d, animate_video_source\n'), ((21500, 21850), 'loguru.logger.critical', 'logger.critical', (['"""\n\n-----< PYTTI-TOOLS > ------If you are seeing this error, it might mean you are using an option that expects you have provided an init_image or video_file.\n\nIf you think you are seeing this message in error, please file an issue here: https://github.com/pytti-tools/pytti-core/issues/new-----< PYTTI-TOOLS > ------\n\n"""'], {}), '(\n """\n\n-----< PYTTI-TOOLS > ------If you are seeing this error, it might mean you are using an option that expects you have provided an init_image or video_file.\n\nIf you think you are seeing this message in error, please file an issue here: https://github.com/pytti-tools/pytti-core/issues/new-----< PYTTI-TOOLS > ------\n\n"""\n )\n', (21515, 21850), False, 'from loguru import logger\n'), ((8583, 8632), 'pytti.format_input', 'format_input', (['image_embeds', 'self.embedder', 'prompt'], {}), '(image_embeds, self.embedder, prompt)\n', (8595, 8632), False, 'from pytti import format_input, set_t, print_vram_usage, freeze_vram_usage, vram_usage_mode\n'), ((8662, 8706), 'pytti.format_input', 'format_input', (['offsets', 'self.embedder', 'prompt'], {}), '(offsets, self.embedder, prompt)\n', (8674, 8706), False, 'from pytti import format_input, set_t, print_vram_usage, freeze_vram_usage, vram_usage_mode\n'), ((8736, 8778), 'pytti.format_input', 'format_input', (['sizes', 'self.embedder', 'prompt'], {}), '(sizes, self.embedder, prompt)\n', (8748, 8778), False, 'from pytti import format_input, set_t, print_vram_usage, freeze_vram_usage, vram_usage_mode\n')]
# encoding: utf-8 """ @author: sherlock @contact: <EMAIL> """ import logging import torch import torch.nn as nn from ignite.engine import Engine import pickle from utils.reid_metric import R1_mAP, R1_mAP_reranking from torch.autograd import Variable from torch.nn import functional as F import numpy as np worddict_tmp = pickle.load(open('/home/ECIT.QUB.AC.UK/3054256/Code/reid_strong/reid-strong-baseline/reid_data/wordlist_reid.p', 'rb')) wordlist = [l for l in iter(worddict_tmp.keys()) if l != '</S>'] wordlist_final = ['EOS'] + sorted(wordlist) max_tokens = 20 def create_supervised_evaluator(model, metrics, device=None): """ Factory function for creating an evaluator for supervised models Args: model (`torch.nn.Module`): the model to train metrics (dict of str - :class:`ignite.metrics.Metric`): a map of metric names to Metrics device (str, optional): device type specification (default: None). Applies to both model and batches. Returns: Engine: an evaluator engine with supervised inference function """ if device: if torch.cuda.device_count() > 1: model = nn.DataParallel(model) model.to(device) def _inference(engine, batch): model.eval() with torch.no_grad(): data, language, pids, camids = batch batchsize = language.size(0) wordclass_feed = np.zeros((batchsize, max_tokens), dtype='int64') wordclass_feed[:,0] = wordlist_final.index('<S>') outcaps = np.empty((batchsize, 0)).tolist() data = data.to(device) if torch.cuda.device_count() >= 1 else data # language = language.to(device) if torch.cuda.device_count() >= 1 else language for j in range(max_tokens-1): wordclass = Variable(torch.from_numpy(wordclass_feed)).cuda() features, wordact, _= model(data, wordclass) wordact = wordact[:,:,:-1] wordact_t = wordact.permute(0, 2, 1).contiguous().view(batchsize*(max_tokens-1), -1) wordprobs = F.softmax(wordact_t).cpu().data.numpy() wordids = np.argmax(wordprobs, axis=1) for k in range(batchsize): word = wordlist_final[wordids[j+k*(max_tokens-1)]] outcaps[k].append(word) if(j < max_tokens-1): wordclass_feed[k, j+1] = wordids[j+k*(max_tokens-1)] for j in range(batchsize): num_words = len(outcaps[j]) if 'EOS' in outcaps[j]: num_words = outcaps[j].index('EOS') outcap = ' '.join(outcaps[j][:num_words]) feat, _, _ = model(data, wordclass) print (outcap) return feat, pids, camids engine = Engine(_inference) for name, metric in metrics.items(): metric.attach(engine, name) return engine def inference( cfg, model, val_loader, num_query ): device = cfg.MODEL.DEVICE logger = logging.getLogger("reid_baseline.inference") logger.info("Enter inferencing") if cfg.TEST.RE_RANKING == 'no': print("Create evaluator") evaluator = create_supervised_evaluator(model, metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)}, device=device) elif cfg.TEST.RE_RANKING == 'yes': print("Create evaluator for reranking") evaluator = create_supervised_evaluator(model, metrics={'r1_mAP': R1_mAP_reranking(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)}, device=device) else: print("Unsupported re_ranking config. Only support for no or yes, but got {}.".format(cfg.TEST.RE_RANKING)) evaluator.run(val_loader) cmc, mAP = evaluator.state.metrics['r1_mAP'] logger.info('Validation Results') logger.info("mAP: {:.1%}".format(mAP)) for r in [1, 5, 10]: logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
[ "utils.reid_metric.R1_mAP_reranking", "numpy.argmax", "numpy.empty", "numpy.zeros", "torch.cuda.device_count", "torch.nn.functional.softmax", "ignite.engine.Engine", "utils.reid_metric.R1_mAP", "torch.nn.DataParallel", "torch.no_grad", "logging.getLogger", "torch.from_numpy" ]
[((2872, 2890), 'ignite.engine.Engine', 'Engine', (['_inference'], {}), '(_inference)\n', (2878, 2890), False, 'from ignite.engine import Engine\n'), ((3118, 3162), 'logging.getLogger', 'logging.getLogger', (['"""reid_baseline.inference"""'], {}), "('reid_baseline.inference')\n", (3135, 3162), False, 'import logging\n'), ((1141, 1166), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1164, 1166), False, 'import torch\n'), ((1192, 1214), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (1207, 1214), True, 'import torch.nn as nn\n'), ((1310, 1325), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1323, 1325), False, 'import torch\n'), ((1446, 1494), 'numpy.zeros', 'np.zeros', (['(batchsize, max_tokens)'], {'dtype': '"""int64"""'}), "((batchsize, max_tokens), dtype='int64')\n", (1454, 1494), True, 'import numpy as np\n'), ((2205, 2233), 'numpy.argmax', 'np.argmax', (['wordprobs'], {'axis': '(1)'}), '(wordprobs, axis=1)\n', (2214, 2233), True, 'import numpy as np\n'), ((1579, 1603), 'numpy.empty', 'np.empty', (['(batchsize, 0)'], {}), '((batchsize, 0))\n', (1587, 1603), True, 'import numpy as np\n'), ((1652, 1677), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1675, 1677), False, 'import torch\n'), ((3344, 3404), 'utils.reid_metric.R1_mAP', 'R1_mAP', (['num_query'], {'max_rank': '(50)', 'feat_norm': 'cfg.TEST.FEAT_NORM'}), '(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)\n', (3350, 3404), False, 'from utils.reid_metric import R1_mAP, R1_mAP_reranking\n'), ((3631, 3701), 'utils.reid_metric.R1_mAP_reranking', 'R1_mAP_reranking', (['num_query'], {'max_rank': '(50)', 'feat_norm': 'cfg.TEST.FEAT_NORM'}), '(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)\n', (3647, 3701), False, 'from utils.reid_metric import R1_mAP, R1_mAP_reranking\n'), ((1865, 1897), 'torch.from_numpy', 'torch.from_numpy', (['wordclass_feed'], {}), '(wordclass_feed)\n', (1881, 1897), False, 'import torch\n'), ((2139, 2159), 'torch.nn.functional.softmax', 'F.softmax', (['wordact_t'], {}), '(wordact_t)\n', (2148, 2159), True, 'from torch.nn import functional as F\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- import types import numpy import scipy import warnings from scipy.signal import savgol_filter import statsmodels.api as sm lowess = sm.nonparametric.lowess import logging from scipy.signal import savgol_filter import time import sys import copy from datetime import datetime, timedelta from ..objects._msDataset import MSDataset from ..enumerations import AssayRole, SampleType def correctMSdataset(data, window=11, method='LOWESS', align='median', parallelise=True, excludeFailures=True): """ Conduct run-order correction and batch alignment on the :py:class:`~nPYc.objects.MSDataset` instance *data*, returning a new instance with corrected intensity values. Sample are seperated into batches acording to the *'Correction Batch'* column in *data.sampleMetadata*. :param data: MSDataset object with measurements to be corrected :type data: MSDataset :param int window: When calculating trends, consider this many reference samples, centred on the current position :param str method: Correction method, one of 'LOWESS' (default), 'SavitzkyGolay' or None for no correction :param str align: Average calculation of batch and feature intensity for correction, one of 'median' (default) or 'mean' :param bool parallelise: If ``True``, use multiple cores :param bool excludeFailures: If ``True``, remove features where a correct fit could not be calculated from the dataset :return: Duplicate of *data*, with run-order correction applied :rtype: MSDataset """ import copy # Check inputs if not isinstance(data, MSDataset): raise TypeError("data must be a MSDataset instance") if not isinstance(window, int) & (window>0): raise TypeError('window must be a positive integer') if method is not None: if not isinstance(method, str) & (method in {'LOWESS', 'SavitzkyGolay'}): raise ValueError('method must be == LOWESS or SavitzkyGolay') if not isinstance(align, str) & (align in {'mean', 'median'}): raise ValueError('align must be == mean or median') if not isinstance(parallelise, bool): raise TypeError("parallelise must be a boolean") if not isinstance(excludeFailures, bool): raise TypeError("excludeFailures must be a boolean") with warnings.catch_warnings(): warnings.simplefilter('ignore', category=RuntimeWarning) correctedP = _batchCorrectionHead(data.intensityData, data.sampleMetadata['Run Order'].values, (data.sampleMetadata['SampleType'].values == SampleType.StudyPool) & (data.sampleMetadata['AssayRole'].values == AssayRole.PrecisionReference), data.sampleMetadata['Correction Batch'].values, window=window, method=method, align=align, parallelise=parallelise) correctedData = copy.deepcopy(data) correctedData.intensityData = correctedP[0] correctedData.fit = correctedP[1] correctedData.Attributes['Log'].append([datetime.now(),'Batch and run order correction applied']) return correctedData def _batchCorrectionHead(data, runOrder, referenceSamples, batchList, window=11, method='LOWESS', align='median', parallelise=True, savePlots=False): """ Conduct run-order correction and batch alignment. :param data: Raw *n* × *m* numpy array of measurements to be corrected :type data: numpy.array :param runOrder: *n* item list of order of analysis :type runOrder: numpy.series :param referenceSamples: *n* element boolean array indicating reference samples to base the correction on :type referenceSamples: numpy.series :param batchList: *n* item list of correction batch, defines sample groupings into discrete batches for correction :type batchList: numpy.series :param int window: When calculating trends, use a consider this many reference samples, centred on the current position :param str method: Correction method, one of 'LOWESS' (default), 'SavitzkyGolay' or None for no correction :param str align: Average calculation of batch and feature intensity for correction, one of 'median' (default) or 'mean' """ # Validate inputs if not isinstance(data, numpy.ndarray): raise TypeError('data must be a numpy array') if not isinstance(runOrder, numpy.ndarray): raise TypeError('runOrder must be a numpy array') if not isinstance(referenceSamples, numpy.ndarray): raise TypeError('referenceSamples must be a numpy array') if not isinstance(batchList, numpy.ndarray): raise TypeError('batchList must be a numpy array') if not isinstance(window, int) & (window>0): raise TypeError('window must be a positive integer') if method is not None: if not isinstance(method, str) & (method in {'LOWESS', 'SavitzkyGolay'}): raise ValueError('method must be == LOWESS or SavitzkyGolay') if not isinstance(align, str) & (align in {'mean', 'median'}): raise ValueError('align must be == mean or median') if not isinstance(parallelise, bool): raise TypeError('parallelise must be True or False') if not isinstance(savePlots, bool): raise TypeError('savePlots must be True or False') # Store paramaters in a dict to avoid arg lists going out of control parameters = dict() parameters['window'] = window parameters['method'] = method parameters['align'] = align if parallelise: # Set up multiprocessing enviroment import multiprocessing # Generate an index and set up pool # Use one less workers than CPU cores if multiprocessing.cpu_count()-1 <= 0: cores = 1 else: cores = multiprocessing.cpu_count()-1 pool = multiprocessing.Pool(processes=cores) instances = range(0, cores) # Break features into no cores chunks featureIndex = _chunkMatrix(range(0, data.shape[1]), cores) # run _batchCorection ## # At present pickle args and returns and reassemble after - possiblly share memory in the future. ## results2 = [pool.apply_async(_batchCorrection, args=(data, runOrder, referenceSamples, batchList, featureIndex, parameters, w)) for w in instances] results2 = [p.get(None) for p in results2] results = list() # Unpack results for instanceOutput in results2: for item in instanceOutput: results.append(item) # Shut down the pool pool.close() else: # Just run it # Iterate over features in one batch and correct them results = _batchCorrection(data, runOrder, referenceSamples, batchList, range(0, data.shape[1]), # All features parameters, 0) correctedData = numpy.empty_like(data) fits = numpy.empty_like(data) # Extract return values from tuple for (w, feature, fit) in results: correctedData[:, w] = feature fits[:, w] = fit return (correctedData, fits) def _batchCorrection(data, runOrder, QCsamples, batchList, featureIndex, parameters, w): """ Break the dataset into batches to be corrected together. """ # Check if we have a list of lists, or just one list: if isinstance(featureIndex[0], range): featureList = featureIndex[w] else: featureList = range(0, len(featureIndex)) # add results to this list: results = list() # Loop over all elements in featureList for i in featureList: # Create a matrix to be used with `nonlocal` to store fits try: feature = copy.deepcopy(data[:,i]) except IndexError: feature = copy.deepcopy(data) fit = numpy.empty_like(feature) fit.fill(numpy.nan) # Identify number of unique batches batches = list(set(batchList)) # Get overall average intensity if parameters['align'] == 'mean': featureAverage = numpy.mean(feature[QCsamples]) elif parameters['align'] == 'median': featureAverage = numpy.median(feature[QCsamples]) else: return numpy.zeros_like(data) # Iterate over batches. for batch in batches: # Skip the NaN batch if not numpy.isnan(batch): batchMask = numpy.squeeze(numpy.asarray(batchList == batch, 'bool')) if parameters['method'] == None: # Skip RO correction if method is none pass else: (feature[batchMask], fit[batchMask]) = runOrderCompensation(feature[batchMask], runOrder[batchMask], QCsamples[batchMask], parameters) # Correct batch average to overall feature average if parameters['align'] == 'mean': batchMean = numpy.mean(feature[batchMask & QCsamples]) elif parameters['align'] == 'median': batchMean = numpy.median(feature[batchMask & QCsamples]) else: batchMean = numpy.nan_like(feature[batchMask]) feature[batchMask] = numpy.divide(feature[batchMask], batchMean) feature[batchMask] = numpy.multiply(feature[batchMask], featureAverage) # # If negative data mark for exclusion (occurs when too many QCsamples have intensity==0) # if sum(feature[batchMask]<0) != 0: # CJS 240816 # exclude = exclude + '; negativeData=' + str(sum(feature[batchMask]<0)) # results.append((i, feature, fit, exclude)) # CJS 240816 results.append((i, feature, fit)) return results def runOrderCompensation(data, runOrder, referenceSamples, parameters): """ Model and remove longitudinal effects. """ # Break the QCs out of the dataset QCdata = data[referenceSamples] QCrunorder = runOrder[referenceSamples] # Select model # Optimisation of window would happen here. window = parameters['window'] if parameters['method'] == 'LOWESS': (data, fit) = doLOESScorrection(QCdata, QCrunorder, data, runOrder, window=window) elif parameters['method'] == 'SavitzkyGolay': (data, fit) = doSavitzkyGolayCorrection(QCdata, QCrunorder, data, runOrder, window=window) # Potentially exclude features with poor fits that retuned NaN &c here. return (data, fit) def doLOESScorrection(QCdata, QCrunorder, data, runorder, window=11): """ Fit a LOWESS regression to the data. """ # Convert window number of samples to fraction of the dataset: noSamples = QCrunorder.shape if noSamples == 0: fit = numpy.zeros(shape=runorder.shape) corrected = data else: frac = window / float(numpy.squeeze(noSamples)) frac = min([1, frac]) # actually do the work z = lowess(QCdata, QCrunorder, frac=frac) # Divide by fit, then rescale to batch median fit = numpy.interp(runorder, z[:,0], z[:,1]) # Fit can go negative if too many adjacent QC samples == 0; set any negative fit values to zero fit[fit < 0] = 0 corrected = numpy.divide(data, fit) corrected = numpy.multiply(corrected, numpy.median(QCdata)) return (corrected, fit) def doSavitzkyGolayCorrection(QCdata, QCrunorder, data, runorder, window=11, polyOrder=3): """ Fit a Savitzky-Golay curve to the data. """ # Sort the array sortedRO = numpy.argsort(QCrunorder) sortedRO2 = QCrunorder[sortedRO] QCdataSorted = QCdata[sortedRO] # actually do the work z = savgol_filter(QCdataSorted, window, polyOrder) fit = numpy.interp(runorder, sortedRO2, z) corrected = numpy.divide(data, fit) corrected = numpy.multiply(corrected, numpy.median(QCdata)) return (corrected, fit) def optimiseCorrection(feature, optimise): """ Optimise the window function my mimising the output of `optimise(data)` """ pass ## # Adapted from http://stackoverflow.com/questions/2130016/splitting-a-list-of-arbitrary-size-into-only-roughly-n-equal-parts ## def _chunkMatrix(seq, num): avg = round(len(seq) / float(num)) out = [] last = 0.0 for i in range(0, num-1): out.append(seq[int(last):int(last + avg)]) last += avg out.append(seq[int(last):max(seq)+1]) return out
[ "numpy.isnan", "numpy.argsort", "numpy.mean", "numpy.interp", "multiprocessing.cpu_count", "numpy.zeros_like", "numpy.multiply", "warnings.simplefilter", "numpy.empty_like", "warnings.catch_warnings", "datetime.datetime.now", "numpy.divide", "copy.deepcopy", "numpy.nan_like", "numpy.median", "numpy.asarray", "multiprocessing.Pool", "numpy.squeeze", "scipy.signal.savgol_filter", "numpy.zeros" ]
[((2752, 2771), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (2765, 2771), False, 'import copy\n'), ((6424, 6446), 'numpy.empty_like', 'numpy.empty_like', (['data'], {}), '(data)\n', (6440, 6446), False, 'import numpy\n'), ((6455, 6477), 'numpy.empty_like', 'numpy.empty_like', (['data'], {}), '(data)\n', (6471, 6477), False, 'import numpy\n'), ((10672, 10697), 'numpy.argsort', 'numpy.argsort', (['QCrunorder'], {}), '(QCrunorder)\n', (10685, 10697), False, 'import numpy\n'), ((10795, 10841), 'scipy.signal.savgol_filter', 'savgol_filter', (['QCdataSorted', 'window', 'polyOrder'], {}), '(QCdataSorted, window, polyOrder)\n', (10808, 10841), False, 'from scipy.signal import savgol_filter\n'), ((10850, 10886), 'numpy.interp', 'numpy.interp', (['runorder', 'sortedRO2', 'z'], {}), '(runorder, sortedRO2, z)\n', (10862, 10886), False, 'import numpy\n'), ((10901, 10924), 'numpy.divide', 'numpy.divide', (['data', 'fit'], {}), '(data, fit)\n', (10913, 10924), False, 'import numpy\n'), ((2220, 2245), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2243, 2245), False, 'import warnings\n'), ((2249, 2305), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (2270, 2305), False, 'import warnings\n'), ((5457, 5494), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'cores'}), '(processes=cores)\n', (5477, 5494), False, 'import multiprocessing\n'), ((7253, 7278), 'numpy.empty_like', 'numpy.empty_like', (['feature'], {}), '(feature)\n', (7269, 7278), False, 'import numpy\n'), ((9952, 9985), 'numpy.zeros', 'numpy.zeros', ([], {'shape': 'runorder.shape'}), '(shape=runorder.shape)\n', (9963, 9985), False, 'import numpy\n'), ((10213, 10253), 'numpy.interp', 'numpy.interp', (['runorder', 'z[:, 0]', 'z[:, 1]'], {}), '(runorder, z[:, 0], z[:, 1])\n', (10225, 10253), False, 'import numpy\n'), ((10386, 10409), 'numpy.divide', 'numpy.divide', (['data', 'fit'], {}), '(data, fit)\n', (10398, 10409), False, 'import numpy\n'), ((10964, 10984), 'numpy.median', 'numpy.median', (['QCdata'], {}), '(QCdata)\n', (10976, 10984), False, 'import numpy\n'), ((2893, 2907), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2905, 2907), False, 'from datetime import datetime, timedelta\n'), ((7166, 7191), 'copy.deepcopy', 'copy.deepcopy', (['data[:, i]'], {}), '(data[:, i])\n', (7179, 7191), False, 'import copy\n'), ((7467, 7497), 'numpy.mean', 'numpy.mean', (['feature[QCsamples]'], {}), '(feature[QCsamples])\n', (7477, 7497), False, 'import numpy\n'), ((10450, 10470), 'numpy.median', 'numpy.median', (['QCdata'], {}), '(QCdata)\n', (10462, 10470), False, 'import numpy\n'), ((5348, 5375), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (5373, 5375), False, 'import multiprocessing\n'), ((5417, 5444), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (5442, 5444), False, 'import multiprocessing\n'), ((7225, 7244), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (7238, 7244), False, 'import copy\n'), ((7558, 7590), 'numpy.median', 'numpy.median', (['feature[QCsamples]'], {}), '(feature[QCsamples])\n', (7570, 7590), False, 'import numpy\n'), ((7609, 7631), 'numpy.zeros_like', 'numpy.zeros_like', (['data'], {}), '(data)\n', (7625, 7631), False, 'import numpy\n'), ((7721, 7739), 'numpy.isnan', 'numpy.isnan', (['batch'], {}), '(batch)\n', (7732, 7739), False, 'import numpy\n'), ((8461, 8504), 'numpy.divide', 'numpy.divide', (['feature[batchMask]', 'batchMean'], {}), '(feature[batchMask], batchMean)\n', (8473, 8504), False, 'import numpy\n'), ((8530, 8580), 'numpy.multiply', 'numpy.multiply', (['feature[batchMask]', 'featureAverage'], {}), '(feature[batchMask], featureAverage)\n', (8544, 8580), False, 'import numpy\n'), ((10037, 10061), 'numpy.squeeze', 'numpy.squeeze', (['noSamples'], {}), '(noSamples)\n', (10050, 10061), False, 'import numpy\n'), ((7772, 7813), 'numpy.asarray', 'numpy.asarray', (['(batchList == batch)', '"""bool"""'], {}), "(batchList == batch, 'bool')\n", (7785, 7813), False, 'import numpy\n'), ((8226, 8268), 'numpy.mean', 'numpy.mean', (['feature[batchMask & QCsamples]'], {}), '(feature[batchMask & QCsamples])\n', (8236, 8268), False, 'import numpy\n'), ((8328, 8372), 'numpy.median', 'numpy.median', (['feature[batchMask & QCsamples]'], {}), '(feature[batchMask & QCsamples])\n', (8340, 8372), False, 'import numpy\n'), ((8400, 8434), 'numpy.nan_like', 'numpy.nan_like', (['feature[batchMask]'], {}), '(feature[batchMask])\n', (8414, 8434), False, 'import numpy\n')]
"""Parse data in the format: Age Uncertainty Sample data assumning a normal distrubtion with mean defined by Age and sigma defined by Uncertainty """ import numpy as np from scipy.stats import norm from QuakeRates.dataman.event_dates import EventDate, EventSet def parse_age_sigma(filename, sigma_level, event_order, truncation=3, delimiter=None, header = 1): """Parse a text file containing a list of event ages and associated uncertainties :param filename: string of path to input file :param sigma_level: Number of sigmas represented by the uncertainty columm :param event_order: String, 'Forwards' or 'Backwards' in time. I.e. if 'Forwards', oldest event is in the first row of the file. :param truncation: Number of sigma levels to sample from :param delimiter: Delimiter of input text file. :param header: Number of header lines to discard """ event_list = [] # data = np.genfromtxt(filename, delimiter=delimiter, skip_header=header) data = np.genfromtxt(filename, delimiter=delimiter, names=True) print(data) print(type(data)) print(data.dtype) print(data.dtype.names) # We want time to be running forwards if event_order == 'Backwards': data = np.flip(data, axis=0) print(data) if data.dtype.names[0]=='Date': dates = data['Date'] sigmas = data['Uncertainty']/sigma_level #Convert, e.g. 2 sigma to 1 sigma elif data.dtype.names[0]=='Date1': dates = np.mean([data['Date1'],data['Date2']], axis=0) sigmas = abs(data['Date1'] - data['Date2'])/(2*sigma_level) elif data.dtype.names[0]=='Age': # Conver to dates assuming age before 1950 dates = 1950 - data['Age'] sigmas = data['Uncertainty']/sigma_level #Convert, e.g. 2 sigma to 1 sigma # Deal with age ranges, rather than mean and standard deviation, assuming # range covers 95% of the distirbution (i.e. +/- 2 sigma) elif data.dtype.names[0]=='Age1': dates = np.mean([(1950 - data['Age1']),(1950 - data['Age2'])], axis=0) sigmas = abs(data['Age1'] - data['Age2'])/(2*sigma_level) print(dates) for i,mean_age in enumerate(dates): event_id = i # Special case of zero uncertainty if sigmas[i]==0: ages = np.array([mean_age]) probs = np.array([1.]) else: ages = np.arange(mean_age-truncation*sigmas[i], mean_age+truncation*sigmas[i]+1, 1) probs = norm.pdf(ages, mean_age, sigmas[i]) # Normalise probs due to truncation of distribution probs = probs/sum(probs) event = EventDate(event_id, 'manual', 'age_sigma') event.add_dates_and_probs(ages, probs) # print(event.dates) # print(event.probabilities) event_list.append(event) # Note cases with uncertain event occurrences try: if data.dtype.names[2]=='Certain': event_certainty = data['Certain'] except: event_certainty = np.ones(len(dates)) print(event_certainty) return event_list, event_certainty if __name__ == "__main__": # filename = '../data/Elsinore_Rockwell_1986_simple.txt' filename = '../data/Yammouneh_Daeron_2007_simple.txt' event_list, event_certainty = parse_age_sigma(filename, sigma_level=2, event_order='Backwards') event_set = EventSet(event_list) print(event_list) print(event_set) n_samples = 10000 event_set.gen_chronologies(n_samples) event_set.calculate_cov() event_set.cov_density()
[ "QuakeRates.dataman.event_dates.EventDate", "numpy.flip", "numpy.genfromtxt", "scipy.stats.norm.pdf", "numpy.mean", "numpy.array", "numpy.arange", "QuakeRates.dataman.event_dates.EventSet" ]
[((1036, 1092), 'numpy.genfromtxt', 'np.genfromtxt', (['filename'], {'delimiter': 'delimiter', 'names': '(True)'}), '(filename, delimiter=delimiter, names=True)\n', (1049, 1092), True, 'import numpy as np\n'), ((3467, 3487), 'QuakeRates.dataman.event_dates.EventSet', 'EventSet', (['event_list'], {}), '(event_list)\n', (3475, 3487), False, 'from QuakeRates.dataman.event_dates import EventDate, EventSet\n'), ((1273, 1294), 'numpy.flip', 'np.flip', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (1280, 1294), True, 'import numpy as np\n'), ((2696, 2738), 'QuakeRates.dataman.event_dates.EventDate', 'EventDate', (['event_id', '"""manual"""', '"""age_sigma"""'], {}), "(event_id, 'manual', 'age_sigma')\n", (2705, 2738), False, 'from QuakeRates.dataman.event_dates import EventDate, EventSet\n'), ((1516, 1563), 'numpy.mean', 'np.mean', (["[data['Date1'], data['Date2']]"], {'axis': '(0)'}), "([data['Date1'], data['Date2']], axis=0)\n", (1523, 1563), True, 'import numpy as np\n'), ((2328, 2348), 'numpy.array', 'np.array', (['[mean_age]'], {}), '([mean_age])\n', (2336, 2348), True, 'import numpy as np\n'), ((2369, 2384), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (2377, 2384), True, 'import numpy as np\n'), ((2417, 2508), 'numpy.arange', 'np.arange', (['(mean_age - truncation * sigmas[i])', '(mean_age + truncation * sigmas[i] + 1)', '(1)'], {}), '(mean_age - truncation * sigmas[i], mean_age + truncation * sigmas\n [i] + 1, 1)\n', (2426, 2508), True, 'import numpy as np\n'), ((2543, 2578), 'scipy.stats.norm.pdf', 'norm.pdf', (['ages', 'mean_age', 'sigmas[i]'], {}), '(ages, mean_age, sigmas[i])\n', (2551, 2578), False, 'from scipy.stats import norm\n'), ((2034, 2093), 'numpy.mean', 'np.mean', (["[1950 - data['Age1'], 1950 - data['Age2']]"], {'axis': '(0)'}), "([1950 - data['Age1'], 1950 - data['Age2']], axis=0)\n", (2041, 2093), True, 'import numpy as np\n')]
# @brief: provides a method to solve the modified Ricatti Equation import numpy as np from scipy.linalg import solve_continuous_are def createLowLevelParams(A, B, Q, R, g, w): # A, B are control matrices # Q, R are cost weights # g = \gamma controls the sensitivity # w is the magnitude of the noise # determine size of the problem n = Q.shape[1] G = B @ np.linalg.inv(R) @ B.transpose() - (1.0/g**2) * np.eye(n) Bmod = np.eye(n) P = solve_continuous_are(A, Bmod, Q, np.linalg.inv(G)) try: np.linalg.cholesky(P) except: raise "P is not positive definite! - choose a larger gamma?" K = 0.5 * np.linalg.inv(R) @ B.transpose() @ P # control input is u = - K @ x Vmax = 0.5 * g**2 * np.max(np.linalg.eigvals(P)) / np.min(np.linalg.eigvals(Q)) * w**2 d_tighten = np.sqrt(2 * Vmax / np.min(np.linalg.eigvals(P))) return P, Vmax, d_tighten, K def double_integrator_params(w, gamma, Q=np.eye(4), R = np.eye(2)): A = np.array([[0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]]) B = np.array([[ 0, 0], [ 0, 0], [1, 0], [ 0, 1]]) return createLowLevelParams(A, B, Q, R, gamma, w)
[ "numpy.linalg.eigvals", "numpy.linalg.inv", "numpy.array", "numpy.eye", "numpy.linalg.cholesky" ]
[((445, 454), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (451, 454), True, 'import numpy as np\n'), ((941, 950), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (947, 950), True, 'import numpy as np\n'), ((956, 965), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (962, 965), True, 'import numpy as np\n'), ((974, 1040), 'numpy.array', 'np.array', (['[[0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]]'], {}), '([[0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]])\n', (982, 1040), True, 'import numpy as np\n'), ((1096, 1138), 'numpy.array', 'np.array', (['[[0, 0], [0, 0], [1, 0], [0, 1]]'], {}), '([[0, 0], [0, 0], [1, 0], [0, 1]])\n', (1104, 1138), True, 'import numpy as np\n'), ((495, 511), 'numpy.linalg.inv', 'np.linalg.inv', (['G'], {}), '(G)\n', (508, 511), True, 'import numpy as np\n'), ((525, 546), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['P'], {}), '(P)\n', (543, 546), True, 'import numpy as np\n'), ((425, 434), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (431, 434), True, 'import numpy as np\n'), ((377, 393), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (390, 393), True, 'import numpy as np\n'), ((637, 653), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (650, 653), True, 'import numpy as np\n'), ((770, 790), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['Q'], {}), '(Q)\n', (787, 790), True, 'import numpy as np\n'), ((842, 862), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['P'], {}), '(P)\n', (859, 862), True, 'import numpy as np\n'), ((739, 759), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['P'], {}), '(P)\n', (756, 759), True, 'import numpy as np\n')]
import pandas as pd import numpy as np import matplotlib.pyplot as plt import random import os print('Welcome to the game!') input('Press enter to continue: ') print('') print('- This is a population simulator for a fictional town.') print('- The town starts with 20 people. With each year that passes, babies will be born and people will die.') print('- Females can have a baby at any age between 18 and 40, but are most likely to give birth a in their late 20s.') print('- People can die at any age, but more likely as they get older.') print('- Names will be randomly assigned based on names in USA datasets.') print('- Surnames are inherited from the mother.') print('') print('loading...') # read in surnames from data source # snlist = pd.read_csv('https://raw.githubusercontent.com/fivethirtyeight/data/master/most-common-name/surnames.csv', nrows=20)['name'].tolist() snlist = ['SMITH', 'JOHNSON', 'WILLIAMS', 'BROWN', 'JONES', 'MILLER', 'DAVIS', 'GARCIA', 'RODRIGUEZ', 'WILSON', 'MARTINEZ', 'ANDERSON', 'TAYLOR', 'THOMAS', 'HERNANDEZ', 'MOORE', 'MARTIN', 'JACKSON', 'THOMPSON', 'WHITE'] # read in first names from data source fnlistcsv = pd.read_csv('https://raw.githubusercontent.com/MatthiasWinkelmann/firstname-database/master/firstnames.csv', delimiter=';') # select only relevent data fnlist = fnlistcsv fnlist = fnlist[fnlist['U.S.A.'].notnull()][['name', 'gender', 'U.S.A.']].rename(columns={'U.S.A.':'Freq'}) fnlist['Freq'] = fnlist['Freq'].astype(int) # clean gender values fnlist.replace({'1F': 'F', '?F': 'F', '1M': 'M', '?M': 'M'}, inplace=True) fnlist = fnlist[fnlist['gender'].isin(['F', 'M'])] # apply factors to 'Freq' column to represent popularity fnlist['Freq'] = (10-(fnlist[['Freq']]*-1+1))**3 fnlistm = fnlist[fnlist['gender'] == 'M'].sort_values('Freq', ascending=False).reset_index(drop=True) fnlistf = fnlist[fnlist['gender'] == 'F'].sort_values('Freq', ascending=False).reset_index(drop=True) fnlistm = fnlistm.reindex(fnlistm.index.repeat(fnlistm['Freq']))['name'].tolist() fnlistf = fnlistf.reindex(fnlistf.index.repeat(fnlistf['Freq']))['name'].tolist() town = input('Enter the name of your town: ') FirstName = [] for i in range(20): FirstName.append(random.choice(fnlistf)) MiddleName = [] for i in range(20): MiddleName.append(random.choice(fnlistf)) # create dataframe data = {'FirstName':FirstName, 'MiddleName':MiddleName, 'Surname':snlist, 'Sex':'F', 'YearBorn':list(range(0,20))} df = pd.DataFrame(data) # add columns year = 19 df['YearDeceased'] = np.nan df['CurrentYear'] = year df['Age'] = (df[['CurrentYear','YearDeceased']].min(axis=1) - df['YearBorn']).astype(int) df['ParentID'] = np.nan df['Generation'] = 1 df['NoOfChildren'] = 0 # probability of dying at age # manually enter probablities prob = [0.001] * 40 + [0.002] * 10 + [0.008] * 10 + [0.012] * 10 + [0.025] * 10 + [0.05] * 5 + [0.1] * 5 + [0.2] * 5 + [0.25] * 15 + [0.35] * 6 + [0.5] * 3 + [1] * 1 data = {'Age':list(range(1,121)), 'Prob':prob} probdeath = pd.DataFrame(data) # probability of having a baby at age # min age=18, max age=40. manually enter probablities # rapid growth data = {'Age':list(range(18,40)), 'Prob':[0.02, 0.04, 0.06, 0.08, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.4, 0.35, 0.3, 0.25, 0.2, 0.15, 0.1, 0.08, 0.06, 0.04, 0.02]} probbabyRG = pd.DataFrame(data) # neutral growth data = {'Age':list(range(18,40)), 'Prob':[0.01, 0.02, 0.03, 0.04, 0.05, 0.075, 0.1, 0.13, 0.155, 0.19, 0.215, 0.215, 0.19, 0.155, 0.13, 0.1, 0.075, 0.05, 0.04, 0.03, 0.02, 0.01]} probbabyNU = pd.DataFrame(data) # moderate decline data = {'Age':list(range(18,40)), 'Prob':[0.007, 0.015, 0.02, 0.03, 0.04, 0.05, 0.07, 0.1, 0.12, 0.16, 0.21, 0.21, 0.16, 0.12, 0.1, 0.07, 0.05, 0.04, 0.03, 0.02, 0.015, 0.007]} probbabyMD = pd.DataFrame(data) # productivity output by age r1 = list(range(1,16)) l1 = [0.0625]*15 lr1 = [r1*l1 for r1,l1 in zip(r1,l1)] r2 = list(reversed(range(1,35))) l2 = [0.0286]*34 lr2 = [r2*l2 for r2,l2 in zip(r2,l2)] prodout = [0]*14 + lr1 + [1]*11 + lr2 + [0]*46 data = {'Age':list(range(1,121)), 'ProdOutput':prodout} prodout = pd.DataFrame(data) # productivity used by age produse = [0.25]*120 data = {'Age':list(range(1,121)), 'ProdUsed':produse} produse = pd.DataFrame(data) playing = 'y' while playing == 'y': # add years yearsadd = int(input('Run for how many more years? (enter between 1-50 years): ')) probbabyint = int(input('At what population growth rate? (1=rapid growth, 2=neutral, 3=moderate decline): ')) if probbabyint == 1: probbaby = probbabyRG elif probbabyint == 2: probbaby = probbabyNU elif probbabyint == 3: probbaby = probbabyMD else: print('incorrect input!') endyear = year + yearsadd while year < endyear: year += 1 alive = df[df['YearDeceased'].isnull()] df['CurrentYear'] = year df['Age'] = np.where(df['YearDeceased'].isnull(), (df['CurrentYear'] - df['YearBorn']), (df['YearDeceased'] - df['YearBorn'])).astype(int) # did anyone die? if so enter in YearDeceased temp1 = df[df['YearDeceased'].isnull()].reset_index().merge(probdeath).set_index('index')[['Prob']] temp1['rand'] = [random.random() for i in temp1.index] temp1['YearDeceased1'] = np.where(temp1['rand'] < temp1['Prob'], year, np.nan) temp1.drop(columns={'Prob', 'rand'}, inplace=True) df = pd.concat([df, temp1], axis=1) df['YearDeceased'] = np.where(df['YearDeceased'].isnull() == True, df['YearDeceased1'], df['YearDeceased']) df.drop(columns={'YearDeceased1'}, inplace=True) # did anyone have a baby? if so enter new row for each babies = df[(df['YearDeceased'].isnull()) & (df['Sex'] == 'F')].reset_index().merge(probbaby, on='Age').set_index('index') lst = [] for i in range(babies.shape[0]): lst.append(random.random()) babies['rand'] = lst babies['baby?'] = babies['Prob'] > babies['rand'] babies = babies[babies['baby?']][['Surname', 'Generation']] babies['Generation'] += 1 babies = babies.reset_index().rename(columns={'index':'ParentID'}) if len(babies) > 0: Sex = [] for i in range(babies.shape[0]): Sex.append(random.choice(['F', 'M'])) babies['Sex'] = Sex MFirstName = [] for i in range(babies.shape[0]): MFirstName.append(random.choice(fnlistm)) babies['MFirstName'] = MFirstName MMiddleName = [] for i in range(babies.shape[0]): MMiddleName.append(random.choice(fnlistm)) babies['MMiddleName'] = MMiddleName FFirstName = [] for i in range(babies.shape[0]): FFirstName.append(random.choice(fnlistf)) babies['FFirstName'] = FFirstName FMiddleName = [] for i in range(babies.shape[0]): FMiddleName.append(random.choice(fnlistf)) babies['FMiddleName'] = FMiddleName babies['FirstName'] = np.where(babies['Sex'] == 'F', babies['FFirstName'], babies['MFirstName']) babies['MiddleName'] = np.where(babies['Sex'] == 'F', babies['FMiddleName'], babies['MMiddleName']) babies.drop(columns={'MFirstName', 'MMiddleName', 'FFirstName', 'FMiddleName'}, inplace=True) babies['YearBorn'] = year babies['YearDeceased'] = np.nan babies['CurrentYear'] = year babies['Age'] = 0 babies['NoOfChildren'] = 0 babies = babies[['FirstName', 'MiddleName', 'Surname', 'Sex', 'YearBorn', 'YearDeceased', 'CurrentYear', 'Age', 'ParentID', 'Generation', 'NoOfChildren']] df = pd.concat([df, babies]).reset_index(drop=True) childadd = babies['ParentID'].tolist() df['NoOfChildren'] = np.where(df.index.isin(childadd) == True, 1, 0) + df['NoOfChildren'] # add column for productivity df['ProdOutput'] = df[df['YearDeceased'].isnull()].reset_index().merge(prodout, on='Age').set_index('index')[['ProdOutput']] print('') print('--------------------------------------------------') title = 'Statistics for {} at the end of year {}'.format(town, year) print(title) print('--------------------------------------------------') print('') print('Current population: ') print(len(alive)) print('') print('All people that ever lived: ') print(len(df)) print('') print('Total productive output') print(df['ProdOutput'].sum()) print('') print('A randomly selected living person:') alivelist = alive.index.tolist() print(df.iloc[random.choice(alivelist)]) print('') print('Average age: ') print(alive['Age'].mean()) print('') print('Average age at death: ') print(df[df['YearDeceased'].notnull()]['Age'].mean()) print('') print('Record for oldest person:') print(df['Age'].max()) print('') print('Record for most children:') print(df[df['NoOfChildren'] == df['NoOfChildren'].max()]) print('') print('Most popular surnames:') print(df.groupby('Surname').count()['FirstName'].sort_values(ascending=False)) print('') print('Number of babies born in each year:') df.groupby('YearBorn')['FirstName'].count().plot() plt.show() print('') print('Any duplicate names?') temp2 = df.groupby(['FirstName', 'MiddleName', 'Surname']).count().sort_values(by='Sex', ascending=False)[['Sex']].reset_index() if len(temp2[temp2['Sex'] > 1]) == 0: print('none, everyone has a unique name so far!') else: print(temp2[temp2['Sex'] > 1].rename(columns={'Sex': 'Count'})) print('') while True: playagain = str(input('Keep going? (enter y or n): ')) if playagain == 'n': input('Thanks for playing! - press enter to exit') playing = 'n' break elif playagain =='y': playing = 'y' break else: print('invalid response, please enter y or n') df.drop(columns={'ProdOutput'}, inplace=True) csvfileloc = '{}\\TheTownOf{}.csv'.format(os.getcwd(), town) df.to_csv(csvfileloc, index=False) # TO DO: # add population vs time chart # try except to validate user input # figure out how to get charts to show # change write to csv to be after all the prints so less wait time # write to csv to only append added rows, df to be only the current population alive # add in NoOfGrandchildren # add in written personal bio # age demographics histogram by bins # option to add in an immigrant, enter name and age # add a write to csv that records aggregated data for each year, population etc # annual productivity # limit list of duplicate names to be only the most # clear output after each cycle # family tree
[ "pandas.DataFrame", "matplotlib.pyplot.show", "pandas.read_csv", "os.getcwd", "random.choice", "random.random", "numpy.where", "pandas.concat" ]
[((1151, 1284), 'pandas.read_csv', 'pd.read_csv', (['"""https://raw.githubusercontent.com/MatthiasWinkelmann/firstname-database/master/firstnames.csv"""'], {'delimiter': '""";"""'}), "(\n 'https://raw.githubusercontent.com/MatthiasWinkelmann/firstname-database/master/firstnames.csv'\n , delimiter=';')\n", (1162, 1284), True, 'import pandas as pd\n'), ((2455, 2473), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (2467, 2473), True, 'import pandas as pd\n'), ((2997, 3015), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (3009, 3015), True, 'import pandas as pd\n'), ((3305, 3323), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (3317, 3323), True, 'import pandas as pd\n'), ((3534, 3552), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (3546, 3552), True, 'import pandas as pd\n'), ((3763, 3781), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (3775, 3781), True, 'import pandas as pd\n'), ((4092, 4110), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (4104, 4110), True, 'import pandas as pd\n'), ((4225, 4243), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (4237, 4243), True, 'import pandas as pd\n'), ((9406, 9416), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9414, 9416), True, 'import matplotlib.pyplot as plt\n'), ((2204, 2226), 'random.choice', 'random.choice', (['fnlistf'], {}), '(fnlistf)\n', (2217, 2226), False, 'import random\n'), ((2287, 2309), 'random.choice', 'random.choice', (['fnlistf'], {}), '(fnlistf)\n', (2300, 2309), False, 'import random\n'), ((5291, 5344), 'numpy.where', 'np.where', (["(temp1['rand'] < temp1['Prob'])", 'year', 'np.nan'], {}), "(temp1['rand'] < temp1['Prob'], year, np.nan)\n", (5299, 5344), True, 'import numpy as np\n'), ((5417, 5447), 'pandas.concat', 'pd.concat', (['[df, temp1]'], {'axis': '(1)'}), '([df, temp1], axis=1)\n', (5426, 5447), True, 'import pandas as pd\n'), ((10263, 10274), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (10272, 10274), False, 'import os\n'), ((5220, 5235), 'random.random', 'random.random', ([], {}), '()\n', (5233, 5235), False, 'import random\n'), ((7115, 7189), 'numpy.where', 'np.where', (["(babies['Sex'] == 'F')", "babies['FFirstName']", "babies['MFirstName']"], {}), "(babies['Sex'] == 'F', babies['FFirstName'], babies['MFirstName'])\n", (7123, 7189), True, 'import numpy as np\n'), ((7225, 7301), 'numpy.where', 'np.where', (["(babies['Sex'] == 'F')", "babies['FMiddleName']", "babies['MMiddleName']"], {}), "(babies['Sex'] == 'F', babies['FMiddleName'], babies['MMiddleName'])\n", (7233, 7301), True, 'import numpy as np\n'), ((8749, 8773), 'random.choice', 'random.choice', (['alivelist'], {}), '(alivelist)\n', (8762, 8773), False, 'import random\n'), ((5897, 5912), 'random.random', 'random.random', ([], {}), '()\n', (5910, 5912), False, 'import random\n'), ((6301, 6326), 'random.choice', 'random.choice', (["['F', 'M']"], {}), "(['F', 'M'])\n", (6314, 6326), False, 'import random\n'), ((6468, 6490), 'random.choice', 'random.choice', (['fnlistm'], {}), '(fnlistm)\n', (6481, 6490), False, 'import random\n'), ((6648, 6670), 'random.choice', 'random.choice', (['fnlistm'], {}), '(fnlistm)\n', (6661, 6670), False, 'import random\n'), ((6828, 6850), 'random.choice', 'random.choice', (['fnlistf'], {}), '(fnlistf)\n', (6841, 6850), False, 'import random\n'), ((7008, 7030), 'random.choice', 'random.choice', (['fnlistf'], {}), '(fnlistf)\n', (7021, 7030), False, 'import random\n'), ((7786, 7809), 'pandas.concat', 'pd.concat', (['[df, babies]'], {}), '([df, babies])\n', (7795, 7809), True, 'import pandas as pd\n')]
import numpy as np import cv2 import grpc from tritonclient.grpc import service_pb2, service_pb2_grpc import tritonclient.grpc.model_config_pb2 as mc np.random.seed(123) palette = np.random.randint(0, 256, (100, 3)) # url = '10.128.61.7:8001' url = '127.0.0.1:8001' model_name = 'bisenetv2' model_version = '1' inp_name = 'input_image' outp_name = 'preds' inp_dtype = 'FP32' outp_dtype = np.int64 inp_shape = [1, 3, 1024, 2048] outp_shape = [1024, 2048] impth = '../example.png' mean = [0.3257, 0.3690, 0.3223] # city, rgb std = [0.2112, 0.2148, 0.2115] option = [ ('grpc.max_receive_message_length', 1073741824), ('grpc.max_send_message_length', 1073741824), ] channel = grpc.insecure_channel(url, options=option) grpc_stub = service_pb2_grpc.GRPCInferenceServiceStub(channel) metadata_request = service_pb2.ModelMetadataRequest( name=model_name, version=model_version) metadata_response = grpc_stub.ModelMetadata(metadata_request) print(metadata_response) config_request = service_pb2.ModelConfigRequest( name=model_name, version=model_version) config_response = grpc_stub.ModelConfig(config_request) print(config_response) request = service_pb2.ModelInferRequest() request.model_name = model_name request.model_version = model_version inp = service_pb2.ModelInferRequest().InferInputTensor() inp.name = inp_name inp.datatype = inp_dtype inp.shape.extend(inp_shape) mean = np.array(mean).reshape(1, 1, 3) std = np.array(std).reshape(1, 1, 3) im = cv2.imread(impth)[:, :, ::-1] im = cv2.resize(im, dsize=tuple(inp_shape[-1:-3:-1])) im = ((im / 255.) - mean) / std im = im[None, ...].transpose(0, 3, 1, 2) inp_bytes = im.astype(np.float32).tobytes() request.ClearField("inputs") request.ClearField("raw_input_contents") request.inputs.extend([inp,]) request.raw_input_contents.extend([inp_bytes,]) outp = service_pb2.ModelInferRequest().InferRequestedOutputTensor() outp.name = outp_name request.outputs.extend([outp,]) # sync # resp = grpc_stub.ModelInfer(request).raw_output_contents[0] # async resp = grpc_stub.ModelInfer.future(request) resp = resp.result().raw_output_contents[0] out = np.frombuffer(resp, dtype=outp_dtype).reshape(*outp_shape) out = palette[out] cv2.imwrite('res.png', out)
[ "tritonclient.grpc.service_pb2.ModelInferRequest", "tritonclient.grpc.service_pb2_grpc.GRPCInferenceServiceStub", "numpy.random.seed", "cv2.imwrite", "numpy.frombuffer", "grpc.insecure_channel", "cv2.imread", "tritonclient.grpc.service_pb2.ModelConfigRequest", "numpy.random.randint", "numpy.array", "tritonclient.grpc.service_pb2.ModelMetadataRequest" ]
[((155, 174), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (169, 174), True, 'import numpy as np\n'), ((185, 220), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)', '(100, 3)'], {}), '(0, 256, (100, 3))\n', (202, 220), True, 'import numpy as np\n'), ((707, 749), 'grpc.insecure_channel', 'grpc.insecure_channel', (['url'], {'options': 'option'}), '(url, options=option)\n', (728, 749), False, 'import grpc\n'), ((762, 812), 'tritonclient.grpc.service_pb2_grpc.GRPCInferenceServiceStub', 'service_pb2_grpc.GRPCInferenceServiceStub', (['channel'], {}), '(channel)\n', (803, 812), False, 'from tritonclient.grpc import service_pb2, service_pb2_grpc\n'), ((834, 906), 'tritonclient.grpc.service_pb2.ModelMetadataRequest', 'service_pb2.ModelMetadataRequest', ([], {'name': 'model_name', 'version': 'model_version'}), '(name=model_name, version=model_version)\n', (866, 906), False, 'from tritonclient.grpc import service_pb2, service_pb2_grpc\n'), ((1017, 1087), 'tritonclient.grpc.service_pb2.ModelConfigRequest', 'service_pb2.ModelConfigRequest', ([], {'name': 'model_name', 'version': 'model_version'}), '(name=model_name, version=model_version)\n', (1047, 1087), False, 'from tritonclient.grpc import service_pb2, service_pb2_grpc\n'), ((1196, 1227), 'tritonclient.grpc.service_pb2.ModelInferRequest', 'service_pb2.ModelInferRequest', ([], {}), '()\n', (1225, 1227), False, 'from tritonclient.grpc import service_pb2, service_pb2_grpc\n'), ((2239, 2266), 'cv2.imwrite', 'cv2.imwrite', (['"""res.png"""', 'out'], {}), "('res.png', out)\n", (2250, 2266), False, 'import cv2\n'), ((1512, 1529), 'cv2.imread', 'cv2.imread', (['impth'], {}), '(impth)\n', (1522, 1529), False, 'import cv2\n'), ((1305, 1336), 'tritonclient.grpc.service_pb2.ModelInferRequest', 'service_pb2.ModelInferRequest', ([], {}), '()\n', (1334, 1336), False, 'from tritonclient.grpc import service_pb2, service_pb2_grpc\n'), ((1438, 1452), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (1446, 1452), True, 'import numpy as np\n'), ((1476, 1489), 'numpy.array', 'np.array', (['std'], {}), '(std)\n', (1484, 1489), True, 'import numpy as np\n'), ((1871, 1902), 'tritonclient.grpc.service_pb2.ModelInferRequest', 'service_pb2.ModelInferRequest', ([], {}), '()\n', (1900, 1902), False, 'from tritonclient.grpc import service_pb2, service_pb2_grpc\n'), ((2160, 2197), 'numpy.frombuffer', 'np.frombuffer', (['resp'], {'dtype': 'outp_dtype'}), '(resp, dtype=outp_dtype)\n', (2173, 2197), True, 'import numpy as np\n')]
from __future__ import division import math import matplotlib as mpl import numpy as np from matplotlib.ticker import AutoMinorLocator from matplotlib.ticker import MultipleLocator from matplotlib.ticker import FixedLocator from matplotlib.ticker import LogLocator from matplotlib.ticker import FormatStrFormatter from sets import Set import sys import math from collections import defaultdict # matplotlib import matplotlib as mpl import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Rectangle import matplotlib.patches as mpatches from matplotlib.legend_handler import HandlerLine2D from mpl_toolkits.mplot3d import axes3d from matplotlib import cm # Stuff for calculating areas. from scipy.integrate import simps from scipy import interpolate from scipy import optimize from numpy import trapz from matplotlib import gridspec from matplotlib.cbook import get_sample_data from matplotlib._png import read_png from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox from scipy.stats import norm from scipy.stats import gamma from scipy import arange, array, exp from scipy.stats import binned_statistic import sympy as sp from scipy.misc import comb from scipy.misc import factorial from scipy.special import gamma from scipy.special import zeta from scipy.misc import derivative mpl.rcParams['axes.linewidth'] = 5.0 #set the value globally mpl.rcParams['text.usetex'] = True mpl.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"] mpl.rcParams['text.latex.preamble'] = [r'\boldmath'] plt.rc('font', family='serif', size=43) def get_eigenvalues(n): # Construct creation operator first. a_creation = np.zeros((n, n)) for i in range(n): a_creation[i, i - 1] = np.sqrt(i) a_anhilation = np.zeros((n, n)) for i in range(n): a_anhilation[i - 1, i] = np.sqrt(i) position = a_creation + a_anhilation momentum = 1.j * ( a_creation - a_anhilation) berry_H = (position * momentum + momentum * position) / 2 # print berry_H eigenvalues = sorted(np.linalg.eigvalsh(berry_H)) return eigenvalues def get_eigenvalue_differences(n): eigenvalues = get_eigenvalues(n) normalized_differences = np.diff(eigenvalues) normalized_differences *= 1 / np.mean(normalized_differences) return normalized_differences def construct_operators(n): normalized_differences = get_eigenvalue_differences(n) plt.hist(normalized_differences, color="red", bins=100, lw=5, histtype='step', edgecolor="red", normed=1) plt.autoscale() plt.xlim(0, 3) plt.gca().xaxis.set_minor_locator(MultipleLocator(0.1)) plt.gca().yaxis.set_minor_locator(MultipleLocator(0.1)) plt.tick_params(which='major', width=5, length=25, labelsize=70) plt.tick_params(which='minor', width=3, length=15) plt.xlabel("Normalized Zero Difference", labelpad=50) plt.ylabel("Normalized Frequency", labelpad=50) plt.gcf().set_size_inches(30, 24, forward=1) plt.savefig("plots/qm.pdf") plt.clf() return normalized_differences def write_eigenvalues(filename, eigenvalues): f = open(filename, "w") for eigenvalue in eigenvalues: f.write(str(eigenvalue) + "\n") f.close() def write_min_eigenvalue_diff_vs_N(): n_range = range(1000, 2000) f = open("data/min_difference.dat", "a") for N in n_range: minimum = min(get_eigenvalue_differences(N)) f.write("{},{}\n".format(N, minimum)) f.close() def read_min_eigenvalues_differences_vs_N(): f = open("data/min_differences.dat", "r") lines = f.read().split("\n") N_s, mins = [], [] for line in lines: if len(line) != 0: content = line.split(",") N_s.append( int( content[0] ) ) mins.append( float( content[1] ) ) return N_s, mins def plot_min_eigenvalues_differences_vs_N(): max_N = 200 N_s, mins = read_min_eigenvalues_differences_vs_N() N_s, mins = N_s[:max_N], mins[:max_N] plt.plot(N_s, mins, color="orange", lw=5) # plt.hist(N_s, weights=mins, bins=100, color="purple", histtype='step', lw=5, normed=True) # plt.xscale('log') # plt.autoscale() plt.xlabel("Matrix Size, $N$", labelpad=30, fontsize=70) plt.ylabel("Min. Eigenvalue Difference", labelpad=30, fontsize=70) plt.xlim(0, max_N) plt.ylim(0, plt.ylim()[1]) plt.gca().xaxis.set_minor_locator(MultipleLocator(10)) plt.gca().yaxis.set_minor_locator(MultipleLocator(0.02)) plt.tick_params(which='major', width=5, length=25, labelsize=70) plt.tick_params(which='minor', width=3, length=15) plt.gcf().set_size_inches(30, 24, forward=1) plt.grid() plt.savefig("plots/qm_min_eigenvalues_differences.pdf") plt.clf() def plot_max_eigenvalue_vs_N(): N_s = [] max_s = [] for i in range(5, 100): N_s.append(i) max_s.append( max(get_eigenvalues(i)) ) plt.plot(N_s, max_s, lw=5, color="green") plt.autoscale() plt.gcf().set_size_inches(30, 24, forward=1) plt.savefig("plots/max_eigenvalues.pdf") plt.clf() def plot_max_eigenvalue_diff_vs_N(): N_s = [] max_s = [] for i in range(5, 100): N_s.append(i) max_s.append( max(get_eigenvalue_differences(i)) ) plt.plot(N_s, max_s, lw=5, color="green") plt.autoscale() plt.gcf().set_size_inches(30, 24, forward=1) plt.savefig("plots/max_eigenvalue_diff.pdf") plt.clf() def plot_min_eigenvalue_vs_N(): N_s = [] minimum_eigenvalues = [] for i in range(5, 101): N_s.append(i) minimum_eigenvalues.append( min([ l for l in np.abs(get_eigenvalues(i)) if l > 1e-5 ] ) ) plt.plot(N_s, minimum_eigenvalues, lw=5, color="green") plt.xlabel("Matrix Size, $N$", labelpad=30, fontsize=70) plt.ylabel("Min. Eigenvalue", labelpad=30, fontsize=70) plt.autoscale() plt.gca().xaxis.set_minor_locator(MultipleLocator(10)) plt.gca().yaxis.set_minor_locator(MultipleLocator(0.05)) plt.tick_params(which='major', width=5, length=25, labelsize=70) plt.tick_params(which='minor', width=3, length=15) plt.gcf().set_size_inches(30, 24, forward=1) plt.savefig("plots/qm_min_eigenvalues.pdf") plt.clf() def plot_qm_eigenvalues(): plt.hist(get_eigenvalues(100), label="N = 100", bins=20, color="red", edgecolor='red', histtype='step', lw=5, normed=1) plt.hist(get_eigenvalues(200), label="N = 200", bins=50, color="blue", edgecolor='blue', histtype='step', lw=5, normed=1) plt.hist(get_eigenvalues(500), label="N = 500", bins=50, color="green", edgecolor='green', histtype='step', lw=5, normed=1) # plt.xscale('log') # plt.autoscale() plt.xlabel("Eigenvalues", labelpad=30, fontsize=70) plt.legend() # plt.xlim(0, max_N) plt.ylim(0, plt.ylim()[1] * 1.2) plt.gca().xaxis.set_minor_locator(MultipleLocator(100)) plt.gca().yaxis.set_minor_locator(MultipleLocator(0.00025)) plt.tick_params(which='major', width=5, length=25, labelsize=70) plt.tick_params(which='minor', width=3, length=15) plt.gcf().set_size_inches(30, 24, forward=1) plt.grid() plt.savefig("plots/qm_eigenvalues.pdf") plt.clf() if __name__ == '__main__': # eigenvalues = get_eigenvalue_differences(n=1000) # write_eigenvalues("data/qm.dat", eigenvalues) # plot_ # write_min_eigenvalue_diff_vs_N() # plot_min_vs_N() # plot_max_eigenvalue_vs_N() # plot_max_eigenvalue_diff_vs_N() # plot_min_eigenvalue_vs_N() # plot_min_eigenvalues_differences_vs_N() # plot_qm_eigenvalues() pass
[ "matplotlib.pyplot.clf", "numpy.mean", "matplotlib.pyplot.gca", "matplotlib.pyplot.tick_params", "matplotlib.pyplot.autoscale", "matplotlib.pyplot.rc", "matplotlib.ticker.MultipleLocator", "matplotlib.pyplot.ylim", "matplotlib.pyplot.legend", "numpy.linalg.eigvalsh", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.gcf", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlim", "matplotlib.pyplot.hist", "matplotlib.pyplot.plot", "numpy.zeros", "numpy.diff", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "numpy.sqrt" ]
[((1575, 1614), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""', 'size': '(43)'}), "('font', family='serif', size=43)\n", (1581, 1614), True, 'import matplotlib.pyplot as plt\n'), ((1697, 1713), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (1705, 1713), True, 'import numpy as np\n'), ((1787, 1803), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (1795, 1803), True, 'import numpy as np\n'), ((2201, 2221), 'numpy.diff', 'np.diff', (['eigenvalues'], {}), '(eigenvalues)\n', (2208, 2221), True, 'import numpy as np\n'), ((2409, 2519), 'matplotlib.pyplot.hist', 'plt.hist', (['normalized_differences'], {'color': '"""red"""', 'bins': '(100)', 'lw': '(5)', 'histtype': '"""step"""', 'edgecolor': '"""red"""', 'normed': '(1)'}), "(normalized_differences, color='red', bins=100, lw=5, histtype=\n 'step', edgecolor='red', normed=1)\n", (2417, 2519), True, 'import matplotlib.pyplot as plt\n'), ((2517, 2532), 'matplotlib.pyplot.autoscale', 'plt.autoscale', ([], {}), '()\n', (2530, 2532), True, 'import matplotlib.pyplot as plt\n'), ((2534, 2548), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(3)'], {}), '(0, 3)\n', (2542, 2548), True, 'import matplotlib.pyplot as plt\n'), ((2667, 2731), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'which': '"""major"""', 'width': '(5)', 'length': '(25)', 'labelsize': '(70)'}), "(which='major', width=5, length=25, labelsize=70)\n", (2682, 2731), True, 'import matplotlib.pyplot as plt\n'), ((2733, 2783), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'which': '"""minor"""', 'width': '(3)', 'length': '(15)'}), "(which='minor', width=3, length=15)\n", (2748, 2783), True, 'import matplotlib.pyplot as plt\n'), ((2788, 2841), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Normalized Zero Difference"""'], {'labelpad': '(50)'}), "('Normalized Zero Difference', labelpad=50)\n", (2798, 2841), True, 'import matplotlib.pyplot as plt\n'), ((2843, 2890), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Normalized Frequency"""'], {'labelpad': '(50)'}), "('Normalized Frequency', labelpad=50)\n", (2853, 2890), True, 'import matplotlib.pyplot as plt\n'), ((2941, 2968), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/qm.pdf"""'], {}), "('plots/qm.pdf')\n", (2952, 2968), True, 'import matplotlib.pyplot as plt\n'), ((2970, 2979), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2977, 2979), True, 'import matplotlib.pyplot as plt\n'), ((3862, 3903), 'matplotlib.pyplot.plot', 'plt.plot', (['N_s', 'mins'], {'color': '"""orange"""', 'lw': '(5)'}), "(N_s, mins, color='orange', lw=5)\n", (3870, 3903), True, 'import matplotlib.pyplot as plt\n'), ((4041, 4097), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Matrix Size, $N$"""'], {'labelpad': '(30)', 'fontsize': '(70)'}), "('Matrix Size, $N$', labelpad=30, fontsize=70)\n", (4051, 4097), True, 'import matplotlib.pyplot as plt\n'), ((4099, 4165), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Min. Eigenvalue Difference"""'], {'labelpad': '(30)', 'fontsize': '(70)'}), "('Min. Eigenvalue Difference', labelpad=30, fontsize=70)\n", (4109, 4165), True, 'import matplotlib.pyplot as plt\n'), ((4168, 4186), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'max_N'], {}), '(0, max_N)\n', (4176, 4186), True, 'import matplotlib.pyplot as plt\n'), ((4333, 4397), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'which': '"""major"""', 'width': '(5)', 'length': '(25)', 'labelsize': '(70)'}), "(which='major', width=5, length=25, labelsize=70)\n", (4348, 4397), True, 'import matplotlib.pyplot as plt\n'), ((4399, 4449), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'which': '"""minor"""', 'width': '(3)', 'length': '(15)'}), "(which='minor', width=3, length=15)\n", (4414, 4449), True, 'import matplotlib.pyplot as plt\n'), ((4499, 4509), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4507, 4509), True, 'import matplotlib.pyplot as plt\n'), ((4511, 4566), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/qm_min_eigenvalues_differences.pdf"""'], {}), "('plots/qm_min_eigenvalues_differences.pdf')\n", (4522, 4566), True, 'import matplotlib.pyplot as plt\n'), ((4568, 4577), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4575, 4577), True, 'import matplotlib.pyplot as plt\n'), ((4721, 4762), 'matplotlib.pyplot.plot', 'plt.plot', (['N_s', 'max_s'], {'lw': '(5)', 'color': '"""green"""'}), "(N_s, max_s, lw=5, color='green')\n", (4729, 4762), True, 'import matplotlib.pyplot as plt\n'), ((4765, 4780), 'matplotlib.pyplot.autoscale', 'plt.autoscale', ([], {}), '()\n', (4778, 4780), True, 'import matplotlib.pyplot as plt\n'), ((4830, 4870), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/max_eigenvalues.pdf"""'], {}), "('plots/max_eigenvalues.pdf')\n", (4841, 4870), True, 'import matplotlib.pyplot as plt\n'), ((4872, 4881), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4879, 4881), True, 'import matplotlib.pyplot as plt\n'), ((5041, 5082), 'matplotlib.pyplot.plot', 'plt.plot', (['N_s', 'max_s'], {'lw': '(5)', 'color': '"""green"""'}), "(N_s, max_s, lw=5, color='green')\n", (5049, 5082), True, 'import matplotlib.pyplot as plt\n'), ((5085, 5100), 'matplotlib.pyplot.autoscale', 'plt.autoscale', ([], {}), '()\n', (5098, 5100), True, 'import matplotlib.pyplot as plt\n'), ((5150, 5194), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/max_eigenvalue_diff.pdf"""'], {}), "('plots/max_eigenvalue_diff.pdf')\n", (5161, 5194), True, 'import matplotlib.pyplot as plt\n'), ((5196, 5205), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5203, 5205), True, 'import matplotlib.pyplot as plt\n'), ((5414, 5469), 'matplotlib.pyplot.plot', 'plt.plot', (['N_s', 'minimum_eigenvalues'], {'lw': '(5)', 'color': '"""green"""'}), "(N_s, minimum_eigenvalues, lw=5, color='green')\n", (5422, 5469), True, 'import matplotlib.pyplot as plt\n'), ((5473, 5529), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Matrix Size, $N$"""'], {'labelpad': '(30)', 'fontsize': '(70)'}), "('Matrix Size, $N$', labelpad=30, fontsize=70)\n", (5483, 5529), True, 'import matplotlib.pyplot as plt\n'), ((5531, 5586), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Min. Eigenvalue"""'], {'labelpad': '(30)', 'fontsize': '(70)'}), "('Min. Eigenvalue', labelpad=30, fontsize=70)\n", (5541, 5586), True, 'import matplotlib.pyplot as plt\n'), ((5589, 5604), 'matplotlib.pyplot.autoscale', 'plt.autoscale', ([], {}), '()\n', (5602, 5604), True, 'import matplotlib.pyplot as plt\n'), ((5723, 5787), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'which': '"""major"""', 'width': '(5)', 'length': '(25)', 'labelsize': '(70)'}), "(which='major', width=5, length=25, labelsize=70)\n", (5738, 5787), True, 'import matplotlib.pyplot as plt\n'), ((5789, 5839), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'which': '"""minor"""', 'width': '(3)', 'length': '(15)'}), "(which='minor', width=3, length=15)\n", (5804, 5839), True, 'import matplotlib.pyplot as plt\n'), ((5889, 5932), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/qm_min_eigenvalues.pdf"""'], {}), "('plots/qm_min_eigenvalues.pdf')\n", (5900, 5932), True, 'import matplotlib.pyplot as plt\n'), ((5934, 5943), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5941, 5943), True, 'import matplotlib.pyplot as plt\n'), ((6395, 6446), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Eigenvalues"""'], {'labelpad': '(30)', 'fontsize': '(70)'}), "('Eigenvalues', labelpad=30, fontsize=70)\n", (6405, 6446), True, 'import matplotlib.pyplot as plt\n'), ((6450, 6462), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6460, 6462), True, 'import matplotlib.pyplot as plt\n'), ((6642, 6706), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'which': '"""major"""', 'width': '(5)', 'length': '(25)', 'labelsize': '(70)'}), "(which='major', width=5, length=25, labelsize=70)\n", (6657, 6706), True, 'import matplotlib.pyplot as plt\n'), ((6708, 6758), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'which': '"""minor"""', 'width': '(3)', 'length': '(15)'}), "(which='minor', width=3, length=15)\n", (6723, 6758), True, 'import matplotlib.pyplot as plt\n'), ((6808, 6818), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (6816, 6818), True, 'import matplotlib.pyplot as plt\n'), ((6820, 6859), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/qm_eigenvalues.pdf"""'], {}), "('plots/qm_eigenvalues.pdf')\n", (6831, 6859), True, 'import matplotlib.pyplot as plt\n'), ((6861, 6870), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6868, 6870), True, 'import matplotlib.pyplot as plt\n'), ((1759, 1769), 'numpy.sqrt', 'np.sqrt', (['i'], {}), '(i)\n', (1766, 1769), True, 'import numpy as np\n'), ((1851, 1861), 'numpy.sqrt', 'np.sqrt', (['i'], {}), '(i)\n', (1858, 1861), True, 'import numpy as np\n'), ((2049, 2076), 'numpy.linalg.eigvalsh', 'np.linalg.eigvalsh', (['berry_H'], {}), '(berry_H)\n', (2067, 2076), True, 'import numpy as np\n'), ((2253, 2284), 'numpy.mean', 'np.mean', (['normalized_differences'], {}), '(normalized_differences)\n', (2260, 2284), True, 'import numpy as np\n'), ((2585, 2605), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.1)'], {}), '(0.1)\n', (2600, 2605), False, 'from matplotlib.ticker import MultipleLocator\n'), ((2642, 2662), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.1)'], {}), '(0.1)\n', (2657, 2662), False, 'from matplotlib.ticker import MultipleLocator\n'), ((4251, 4270), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(10)'], {}), '(10)\n', (4266, 4270), False, 'from matplotlib.ticker import MultipleLocator\n'), ((4307, 4328), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.02)'], {}), '(0.02)\n', (4322, 4328), False, 'from matplotlib.ticker import MultipleLocator\n'), ((5641, 5660), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(10)'], {}), '(10)\n', (5656, 5660), False, 'from matplotlib.ticker import MultipleLocator\n'), ((5697, 5718), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.05)'], {}), '(0.05)\n', (5712, 5718), False, 'from matplotlib.ticker import MultipleLocator\n'), ((6556, 6576), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(100)'], {}), '(100)\n', (6571, 6576), False, 'from matplotlib.ticker import MultipleLocator\n'), ((6613, 6637), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.00025)'], {}), '(0.00025)\n', (6628, 6637), False, 'from matplotlib.ticker import MultipleLocator\n'), ((2893, 2902), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2900, 2902), True, 'import matplotlib.pyplot as plt\n'), ((4200, 4210), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (4208, 4210), True, 'import matplotlib.pyplot as plt\n'), ((4452, 4461), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4459, 4461), True, 'import matplotlib.pyplot as plt\n'), ((4783, 4792), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4790, 4792), True, 'import matplotlib.pyplot as plt\n'), ((5103, 5112), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5110, 5112), True, 'import matplotlib.pyplot as plt\n'), ((5842, 5851), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5849, 5851), True, 'import matplotlib.pyplot as plt\n'), ((6761, 6770), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (6768, 6770), True, 'import matplotlib.pyplot as plt\n'), ((2551, 2560), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2558, 2560), True, 'import matplotlib.pyplot as plt\n'), ((2608, 2617), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2615, 2617), True, 'import matplotlib.pyplot as plt\n'), ((4217, 4226), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4224, 4226), True, 'import matplotlib.pyplot as plt\n'), ((4273, 4282), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4280, 4282), True, 'import matplotlib.pyplot as plt\n'), ((5607, 5616), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5614, 5616), True, 'import matplotlib.pyplot as plt\n'), ((5663, 5672), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5670, 5672), True, 'import matplotlib.pyplot as plt\n'), ((6499, 6509), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (6507, 6509), True, 'import matplotlib.pyplot as plt\n'), ((6522, 6531), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6529, 6531), True, 'import matplotlib.pyplot as plt\n'), ((6579, 6588), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6586, 6588), True, 'import matplotlib.pyplot as plt\n')]
import sys import gym import tensorflow as tf import numpy as np import random import datetime import os from collections import deque os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' """ Hyper Parameters """ GAMMA = 0.99 # discount factor for target Q INITIAL_EPSILON = 0.8 # starting value of epsilon FINAL_EPSILON = 0.01 # final value of epsilon EPSILON_DECAY_STEPS = 200 REPLAY_SIZE = 100000 # experience replay buffer size BATCH_SIZE = 128 # size of minibatch TEST_FREQUENCY = 1000 # How many episodes to run before visualizing test accuracy SAVE_FREQUENCY = 1000 # How many episodes to run before saving model (unused) # The number of test iters (with epsilon set to 0) to run every TEST_FREQUENCY episodes NUM_TEST_EPS = 100 NUM_EPISODES = TEST_FREQUENCY + NUM_TEST_EPS # Episode limitation EP_MAX_STEPS = 1000 # Step limitation in an episode HIDDEN_NODES = 256 FIRST_TARGET_UPDATE = 0 TARGET_UPDATE_FREQ = 1000 # How often to update target network weights AVERAGE_OVER = 100 latest_100 = deque(maxlen=AVERAGE_OVER) def init(env, env_name): """ Initialise any globals, e.g. the replay_buffer, epsilon, etc. return: state_dim: The length of the state vector for the env action_dim: The length of the action space, i.e. the number of actions NB: for discrete action envs such as the cartpole and mountain car, this function can be left unchanged. Hints for envs with continuous action spaces, e.g. "Pendulum-v0" 1) you'll need to modify this function to discretise the action space and create a global dictionary mapping from action index to action (which you can use in `get_env_action()`) 2) for Pendulum-v0 `env.action_space.low[0]` and `env.action_space.high[0]` are the limits of the action space. 3) setting a global flag iscontinuous which you can use in `get_env_action()` might help in using the same code for discrete and (discretised) continuous action spaces """ global replay_buffer, epsilon, iscontinuous, action_map, action_dim replay_buffer = [] epsilon = INITIAL_EPSILON state_dim = env.observation_space.shape[0] iscontinuous = not isinstance(env.action_space,gym.spaces.discrete.Discrete) if iscontinuous: action_map = dict() BIN_FACTOR = 100 action_dim = int((env.action_space.high[0] - env.action_space.low[0]) * BIN_FACTOR +1) values = np.arange(env.action_space.low[0],env.action_space.high[0]+1,1/BIN_FACTOR) for i in range(action_dim): action_map[i] = values[i] else: action_dim = env.action_space.n return state_dim, action_dim def get_network(state_dim, action_dim, hidden_nodes=HIDDEN_NODES): """Define the neural network used to approximate the q-function The suggested structure is to have each output node represent a Q value for one action. e.g. for cartpole there will be two output nodes. Hints: 1) Given how q-values are used within RL, is it necessary to have output activation functions? 2) You will set `target_in` in `get_train_batch` further down. Probably best to implement that before implementing the loss (there are further hints there) """ state_in = tf.placeholder("float", [None, state_dim]) action_in = tf.placeholder("float", [None, action_dim]) # one hot target_in = tf.placeholder("float", [None]) # q value for the target network # TO IMPLEMENT: Q network, whose input is state_in, and has action_dim outputs # which are the network's esitmation of the Q values for those actions and the # input state. The final layer should be assigned to the variable q_values initializer = tf.random_normal_initializer(0., 0.1) with tf.variable_scope("q_network"): layer1 = tf.layers.dense(state_in,hidden_nodes,activation=tf.nn.relu, kernel_initializer=initializer) layer2 = tf.layers.dense(layer1,hidden_nodes,activation=tf.nn.relu, kernel_initializer=initializer) q_values = tf.layers.dense(layer2,action_dim,activation=None, kernel_initializer=initializer) q_selected_action = \ tf.reduce_sum(tf.multiply(q_values, action_in), reduction_indices=1) # TO IMPLEMENT: loss function # should only be one line, if target_in is implemented correctly loss = tf.reduce_mean(tf.square(target_in - q_selected_action,name="loss")) optimise_step = tf.train.AdamOptimizer().minimize(loss) train_loss_summary_op = tf.summary.scalar("TrainingLoss", tf.reduce_mean(loss)) return state_in, action_in, target_in, q_values, q_selected_action, \ loss, optimise_step, train_loss_summary_op def get_target_network(state_dim, action_dim, hidden_nodes=HIDDEN_NODES): state_in = tf.placeholder("float", [None, state_dim]) action_in = tf.placeholder("float", [None, action_dim]) w1 = tf.placeholder("float", [state_dim, hidden_nodes]) b1 = tf.placeholder("float", [hidden_nodes]) w2 = tf.placeholder("float", [hidden_nodes, hidden_nodes]) b2 = tf.placeholder("float", [hidden_nodes]) w3 = tf.placeholder("float", [hidden_nodes, action_dim]) b3 = tf.placeholder("float", [action_dim]) train_weights = w1, b1, w2, b2, w3, b3 initializer = tf.random_normal_initializer(0., 0.1) with tf.variable_scope("target_network"): layer1 = tf.layers.dense(state_in,hidden_nodes,activation=tf.nn.relu, kernel_initializer=initializer) layer2 = tf.layers.dense(layer1,hidden_nodes,activation=tf.nn.relu, kernel_initializer=initializer) q_values = tf.layers.dense(layer2,action_dim,activation=None, kernel_initializer=initializer) q_selected_action = \ tf.reduce_sum(tf.multiply(q_values, action_in), reduction_indices=1) update_weights_op = copy_vars_op(train_weights) return state_in, action_in, q_selected_action, update_weights_op, train_weights def copy_vars_op(train_weights): targets = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_network') assign_ops = [] for i in range(len(targets)): v = train_weights[i] op = targets[i].assign(v) assign_ops.append(op) return assign_ops #return tf.group(*assign_ops) def init_session(): global session, writer session = tf.InteractiveSession() session.run(tf.global_variables_initializer()) # Setup Logging logdir = "tensorboard/" + datetime.datetime.now().strftime( "%Y%m%d-%H%M%S") + "/" writer = tf.summary.FileWriter(logdir, session.graph) def get_action(state, state_in, q_values, epsilon, test_mode, action_dim): Q_estimates = q_values.eval(feed_dict={state_in: [state]})[0] epsilon_to_use = 0.0 if test_mode else epsilon if random.random() < epsilon_to_use: action = random.randint(0, action_dim - 1) else: action = np.argmax(Q_estimates) return action def get_env_action(action): """ Modify for continous action spaces that you have discretised, see hints in `init()` """ if iscontinuous: action=[action_map[action]] return action def update_replay_buffer(replay_buffer, state, action, reward, next_state, done, action_dim): """ Update the replay buffer with provided input in the form: (state, one_hot_action, reward, next_state, done) Hint: the minibatch passed to do_train_step is one entry (randomly sampled) from the replay_buffer """ global replay_priority_total # TO IMPLEMENT: append to the replay_buffer # ensure the action is encoded one hot one_hot_action = np.int32(np.eye(action_dim)[action]) replay_buffer.append([state, one_hot_action, reward, next_state, done]) # Ensure replay_buffer doesn't grow larger than REPLAY_SIZE if len(replay_buffer) > REPLAY_SIZE: replay_buffer.pop(0) return None def do_train_step(replay_buffer, state_in, action_in, target_in, q_values, q_selected_action, loss, optimise_step, train_loss_summary_op, batch_presentations_count): minibatch = random.sample(replay_buffer, BATCH_SIZE) target_batch, state_batch, action_batch = \ get_train_batch(q_values,state_in, minibatch) summary, _, new_loss = session.run([train_loss_summary_op, optimise_step,loss], feed_dict={ target_in: target_batch, state_in: state_batch, action_in: action_batch }) writer.add_summary(summary, batch_presentations_count) def get_train_batch(q_values,state_in, minibatch): """ Generate Batch samples for training by sampling the replay buffer" Batches values are suggested to be the following; state_batch: Batch of state values action_batch: Batch of action values target_batch: Target batch for (s,a) pair i.e. one application of the bellman update rule. return: target_batch, state_batch, action_batch Hints: 1) To calculate the target batch values, you will need to use the q_values for the next_state for each entry in the batch. 2) The target value, combined with your loss defined in `get_network()` should reflect the equation in the middle of slide 12 of Deep RL 1 Lecture notes here: https://webcms3.cse.unsw.edu.au/COMP9444/17s2/resources/12494 """ state_batch = [data[0] for data in minibatch] action_batch = [data[1] for data in minibatch] reward_batch = [data[2] for data in minibatch] next_state_batch = [data[3] for data in minibatch] target_batch = [] Q_values_batch = q_values.eval(feed_dict={ state_in: next_state_batch }) Q_action_batch = np.argmax(Q_values_batch,1) Q_action_batch = [np.eye(action_dim)[x] for x in Q_action_batch] Q_eval = q_eval_action.eval(feed_dict={ state_in_eval: next_state_batch, action_in_eval: Q_action_batch }) for i in range(0, BATCH_SIZE): sample_is_done = minibatch[i][4] if sample_is_done: target_batch.append(reward_batch[i]) else: # TO IMPLEMENT: set the target_val to the correct Q value update target_val = reward_batch[i]+ GAMMA*Q_eval[i] target_batch.append(target_val) return target_batch, state_batch, action_batch def qtrain(env, state_dim, action_dim, state_in, action_in, target_in, q_values, q_selected_action, loss, optimise_step, train_loss_summary_op, num_episodes=NUM_EPISODES, ep_max_steps=EP_MAX_STEPS, test_frequency=TEST_FREQUENCY, num_test_eps=NUM_TEST_EPS, final_epsilon=FINAL_EPSILON, epsilon_decay_steps=EPSILON_DECAY_STEPS, force_test_mode=False, render=True): global batch_init, epsilon,Q_loss, q_eval_action,update_weights_op, train_weights, state_in_eval, action_in_eval # Record the number of times we do a training batch, take a step, and # the total_reward across all eps batch_presentations_count = total_steps = total_reward = 0 state_in_eval, action_in_eval, q_eval_action, update_weights_op, train_weights = get_target_network(state_dim, action_dim) q_network_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_network')[:6] for episode in range(num_episodes): # initialize task state = env.reset() if render: env.render() # Update epsilon once per episode - exp decaying epsilon -= (epsilon - final_epsilon) / epsilon_decay_steps # in test mode we set epsilon to 0 test_mode = force_test_mode or \ ((episode % test_frequency) < num_test_eps and episode > num_test_eps ) if test_mode: print("Test mode (epsilon set to 0.0)") #if (episode == 0 or episode > FIRST_TARGET_UPDATE) and (episode % TARGET_UPDATE_FREQ == 0): if (episode == 0): weight_upates = [x.eval() for x in q_network_vars] for i in range(len(update_weights_op)): result= session.run(update_weights_op[i], feed_dict={train_weights[i]:weight_upates[i]}) ep_reward = 0 for step in range(ep_max_steps): total_steps += 1 # get an action and take a step in the environment action = get_action(state, state_in, q_values, epsilon, test_mode, action_dim) env_action = get_env_action(action) next_state, reward, done, _ = env.step(env_action) ep_reward += reward # display the updated environment if render: env.render() # comment this line to possibly reduce training time update_replay_buffer(replay_buffer, state, action, reward, next_state, done, action_dim) state = next_state # perform a training step if the replay_buffer has a batch worth of samples if (len(replay_buffer) > BATCH_SIZE and not test_mode): do_train_step(replay_buffer, state_in, action_in, target_in, q_values, q_selected_action, loss, optimise_step, train_loss_summary_op, batch_presentations_count) batch_presentations_count += 1 if (total_steps > FIRST_TARGET_UPDATE) and (total_steps % TARGET_UPDATE_FREQ == 0) and not test_mode: #print("============== COPYING WEIGHTS ===============") weight_upates = [x.eval() for x in q_network_vars] for i in range(len(update_weights_op)): result= session.run(update_weights_op[i], feed_dict={train_weights[i]:weight_upates[i]}) if done: break total_reward += ep_reward if len(latest_100) > AVERAGE_OVER: latest_100.popleft() latest_100.append(ep_reward) avg_reward = np.mean(latest_100) test_or_train = "test" if test_mode else "train" #print(replay_buffer.qsize()) print("end {0} episode {1}, ep reward: {2}, ave reward: {3}, \ Batch presentations: {4}, epsilon: {5}, total_steps: {6}".format( #test_or_train, episode, ep_reward, total_reward / (episode + 1), test_or_train, episode, ep_reward, avg_reward, batch_presentations_count, epsilon, total_steps )) #if avg_reward >= 195: # print("COMPLETE") # break def setup(): default_env_name = 'CartPole-v0' #default_env_name = 'MountainCar-v0' #default_env_name = 'Pendulum-v0' # if env_name provided as cmd line arg, then use that env_name = sys.argv[1] if len(sys.argv) > 1 else default_env_name env = gym.make(env_name) state_dim, action_dim = init(env, env_name) network_vars = get_network(state_dim, action_dim) init_session() return env, state_dim, action_dim, network_vars def main(): env, state_dim, action_dim, network_vars = setup() qtrain(env, state_dim, action_dim, *network_vars, render=False) if __name__ == "__main__": main()
[ "numpy.argmax", "tensorflow.get_collection", "random.sample", "tensorflow.multiply", "numpy.mean", "numpy.arange", "tensorflow.InteractiveSession", "collections.deque", "random.randint", "tensorflow.variable_scope", "tensorflow.placeholder", "tensorflow.summary.FileWriter", "datetime.datetime.now", "tensorflow.global_variables_initializer", "tensorflow.reduce_mean", "random.random", "tensorflow.random_normal_initializer", "gym.make", "tensorflow.layers.dense", "tensorflow.square", "numpy.eye", "tensorflow.train.AdamOptimizer" ]
[((1033, 1059), 'collections.deque', 'deque', ([], {'maxlen': 'AVERAGE_OVER'}), '(maxlen=AVERAGE_OVER)\n', (1038, 1059), False, 'from collections import deque\n'), ((3314, 3356), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, state_dim]'], {}), "('float', [None, state_dim])\n", (3328, 3356), True, 'import tensorflow as tf\n'), ((3374, 3417), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, action_dim]'], {}), "('float', [None, action_dim])\n", (3388, 3417), True, 'import tensorflow as tf\n'), ((3446, 3477), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None]'], {}), "('float', [None])\n", (3460, 3477), True, 'import tensorflow as tf\n'), ((3813, 3851), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (3841, 3851), True, 'import tensorflow as tf\n'), ((4997, 5039), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, state_dim]'], {}), "('float', [None, state_dim])\n", (5011, 5039), True, 'import tensorflow as tf\n'), ((5057, 5100), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, action_dim]'], {}), "('float', [None, action_dim])\n", (5071, 5100), True, 'import tensorflow as tf\n'), ((5113, 5163), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[state_dim, hidden_nodes]'], {}), "('float', [state_dim, hidden_nodes])\n", (5127, 5163), True, 'import tensorflow as tf\n'), ((5174, 5213), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[hidden_nodes]'], {}), "('float', [hidden_nodes])\n", (5188, 5213), True, 'import tensorflow as tf\n'), ((5224, 5277), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[hidden_nodes, hidden_nodes]'], {}), "('float', [hidden_nodes, hidden_nodes])\n", (5238, 5277), True, 'import tensorflow as tf\n'), ((5288, 5327), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[hidden_nodes]'], {}), "('float', [hidden_nodes])\n", (5302, 5327), True, 'import tensorflow as tf\n'), ((5338, 5389), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[hidden_nodes, action_dim]'], {}), "('float', [hidden_nodes, action_dim])\n", (5352, 5389), True, 'import tensorflow as tf\n'), ((5400, 5437), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[action_dim]'], {}), "('float', [action_dim])\n", (5414, 5437), True, 'import tensorflow as tf\n'), ((5503, 5541), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (5531, 5541), True, 'import tensorflow as tf\n'), ((6338, 6410), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""target_network"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_network')\n", (6355, 6410), True, 'import tensorflow as tf\n'), ((6689, 6712), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (6710, 6712), True, 'import tensorflow as tf\n'), ((6899, 6943), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['logdir', 'session.graph'], {}), '(logdir, session.graph)\n', (6920, 6943), True, 'import tensorflow as tf\n'), ((8550, 8590), 'random.sample', 'random.sample', (['replay_buffer', 'BATCH_SIZE'], {}), '(replay_buffer, BATCH_SIZE)\n', (8563, 8590), False, 'import random\n'), ((10169, 10197), 'numpy.argmax', 'np.argmax', (['Q_values_batch', '(1)'], {}), '(Q_values_batch, 1)\n', (10178, 10197), True, 'import numpy as np\n'), ((15438, 15456), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (15446, 15456), False, 'import gym\n'), ((2472, 2557), 'numpy.arange', 'np.arange', (['env.action_space.low[0]', '(env.action_space.high[0] + 1)', '(1 / BIN_FACTOR)'], {}), '(env.action_space.low[0], env.action_space.high[0] + 1, 1 / BIN_FACTOR\n )\n', (2481, 2557), True, 'import numpy as np\n'), ((3867, 3897), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""q_network"""'], {}), "('q_network')\n", (3884, 3897), True, 'import tensorflow as tf\n'), ((3917, 4015), 'tensorflow.layers.dense', 'tf.layers.dense', (['state_in', 'hidden_nodes'], {'activation': 'tf.nn.relu', 'kernel_initializer': 'initializer'}), '(state_in, hidden_nodes, activation=tf.nn.relu,\n kernel_initializer=initializer)\n', (3932, 4015), True, 'import tensorflow as tf\n'), ((4061, 4157), 'tensorflow.layers.dense', 'tf.layers.dense', (['layer1', 'hidden_nodes'], {'activation': 'tf.nn.relu', 'kernel_initializer': 'initializer'}), '(layer1, hidden_nodes, activation=tf.nn.relu,\n kernel_initializer=initializer)\n', (4076, 4157), True, 'import tensorflow as tf\n'), ((4205, 4294), 'tensorflow.layers.dense', 'tf.layers.dense', (['layer2', 'action_dim'], {'activation': 'None', 'kernel_initializer': 'initializer'}), '(layer2, action_dim, activation=None, kernel_initializer=\n initializer)\n', (4220, 4294), True, 'import tensorflow as tf\n'), ((4377, 4409), 'tensorflow.multiply', 'tf.multiply', (['q_values', 'action_in'], {}), '(q_values, action_in)\n', (4388, 4409), True, 'import tensorflow as tf\n'), ((4568, 4621), 'tensorflow.square', 'tf.square', (['(target_in - q_selected_action)'], {'name': '"""loss"""'}), "(target_in - q_selected_action, name='loss')\n", (4577, 4621), True, 'import tensorflow as tf\n'), ((4750, 4770), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (4764, 4770), True, 'import tensorflow as tf\n'), ((5557, 5592), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""target_network"""'], {}), "('target_network')\n", (5574, 5592), True, 'import tensorflow as tf\n'), ((5612, 5710), 'tensorflow.layers.dense', 'tf.layers.dense', (['state_in', 'hidden_nodes'], {'activation': 'tf.nn.relu', 'kernel_initializer': 'initializer'}), '(state_in, hidden_nodes, activation=tf.nn.relu,\n kernel_initializer=initializer)\n', (5627, 5710), True, 'import tensorflow as tf\n'), ((5756, 5852), 'tensorflow.layers.dense', 'tf.layers.dense', (['layer1', 'hidden_nodes'], {'activation': 'tf.nn.relu', 'kernel_initializer': 'initializer'}), '(layer1, hidden_nodes, activation=tf.nn.relu,\n kernel_initializer=initializer)\n', (5771, 5852), True, 'import tensorflow as tf\n'), ((5900, 5989), 'tensorflow.layers.dense', 'tf.layers.dense', (['layer2', 'action_dim'], {'activation': 'None', 'kernel_initializer': 'initializer'}), '(layer2, action_dim, activation=None, kernel_initializer=\n initializer)\n', (5915, 5989), True, 'import tensorflow as tf\n'), ((6072, 6104), 'tensorflow.multiply', 'tf.multiply', (['q_values', 'action_in'], {}), '(q_values, action_in)\n', (6083, 6104), True, 'import tensorflow as tf\n'), ((6730, 6763), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6761, 6763), True, 'import tensorflow as tf\n'), ((7151, 7166), 'random.random', 'random.random', ([], {}), '()\n', (7164, 7166), False, 'import random\n'), ((7203, 7236), 'random.randint', 'random.randint', (['(0)', '(action_dim - 1)'], {}), '(0, action_dim - 1)\n', (7217, 7236), False, 'import random\n'), ((7266, 7288), 'numpy.argmax', 'np.argmax', (['Q_estimates'], {}), '(Q_estimates)\n', (7275, 7288), True, 'import numpy as np\n'), ((11713, 11780), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""q_network"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_network')\n", (11730, 11780), True, 'import tensorflow as tf\n'), ((14592, 14611), 'numpy.mean', 'np.mean', (['latest_100'], {}), '(latest_100)\n', (14599, 14611), True, 'import numpy as np\n'), ((4645, 4669), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), '()\n', (4667, 4669), True, 'import tensorflow as tf\n'), ((8061, 8079), 'numpy.eye', 'np.eye', (['action_dim'], {}), '(action_dim)\n', (8067, 8079), True, 'import numpy as np\n'), ((10220, 10238), 'numpy.eye', 'np.eye', (['action_dim'], {}), '(action_dim)\n', (10226, 10238), True, 'import numpy as np\n'), ((6819, 6842), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6840, 6842), False, 'import datetime\n')]
import numpy as np from PIL import Image from deephar.utils.io import WARNING from deephar.utils.io import FAIL from deephar.utils.io import printcn from deephar.utils.pose import pa16j2d from deephar.utils.pose import pa17j3d from deephar.utils.pose import pa20j3d from deephar.utils.colors import hex_colors try: from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt except Exception as e: printcn(FAIL, str(e)) plt = None def data_to_image(x, gray_scale=False): """ Convert 'x' to a RGB Image object. # Arguments x: image in the format (num_cols, num_rows, 3) for RGB images or (num_cols, num_rows) for gray scale images. If None, return a light gray image with size 100x100. gray_scale: convert the RGB color space to a RGB gray scale space. """ if x is None: x = 224 * np.ones((100, 100, 3), dtype=np.uint8) if x.max() - x.min() > 0.: buf = 255. * (x - x.min()) / (x.max() - x.min()) else: buf = x.copy() if len(buf.shape) == 3: (w, h) = buf.shape[0:2] num_ch = buf.shape[2] else: (h, w) = buf.shape num_ch = 1 if ((num_ch is 3) and gray_scale): g = 0.2989*buf[:,:,0] + 0.5870*buf[:,:,1] + 0.1140*buf[:,:,2] buf[:,:,0] = g buf[:,:,1] = g buf[:,:,2] = g elif num_ch is 1: aux = np.zeros((h, w, 3), dtype=buf.dtype) aux[:,:,0] = buf aux[:,:,1] = buf aux[:,:,2] = buf buf = aux return Image.fromarray(buf.astype(np.uint8), 'RGB') def show(x, gray_scale=False, jet_cmap=False, filename=None): """ Show 'x' as an image on the screen. """ if jet_cmap is False: img = data_to_image(x, gray_scale=gray_scale) else: if plt is None: printcn(WARNING, 'pyplot not defined!') return cmap = plt.cm.jet norm = plt.Normalize(vmin=x.min(), vmax=x.max()) img = cmap(norm(x)) if filename: plt.imsave(filename, img) else: plt.imshow(img) plt.show() def draw(x=None, skels=[], bboxes=[], bbox_color='g', abs_pos=False, plot3d=False, single_window=False, figsize=(16,9), axis='on', facecolor='white', azimuth=65, dpi=100, filename=None, predicted=False): # Configure the plotting environment if plt is None: printcn(WARNING, 'pyplot not defined!') return """ Plot 'x' and draw over it the skeletons and the bounding boxes. """ img = data_to_image(x) if abs_pos: w = None h = None else: w,h = img.size def add_subimage(f, subplot, img): ax = f.add_subplot(subplot) plt.imshow(img, zorder=-1) return ax fig = [plt.figure(figsize=figsize)] ax = [] if plot3d: if single_window: ax.append(add_subimage(fig[0], 121, img)) ax.append(fig[0].add_subplot(122, projection='3d')) else: ax.append(add_subimage(fig[0], 111, img)) fig.append(plt.figure(figsize=figsize)) ax.append(fig[1].add_subplot(111, projection='3d')) else: ax.append(add_subimage(fig[0], 111, img)) plt.axis(axis) # Plotting skeletons if not None if skels is not None: if isinstance(skels, list) or len(skels.shape) == 3: for s in skels: plot_skeleton_2d(ax[0], s, h=h, w=w, predicted=predicted) if plot3d: plot_3d_pose(s, subplot=ax[-1], azimuth=azimuth) else: plot_skeleton_2d(ax[0], skels, h=h, w=w, predicted=predicted) if plot3d: plot_3d_pose(skels, subplot=ax[-1], azimuth=azimuth) # Plotting bounding boxes if not None if bboxes is not None: if isinstance(bboxes, list) or len(bboxes.shape) == 3: for b, c in zip(bboxes, bbox_color): _plot_bbox(ax[0], b, h=h, w=w, c=c, lw=4) else: _plot_bbox(ax[0], bboxes, h=h, w=w, c=bbox_color, lw=4) if filename: fig[0].savefig(filename, bbox_inches='tight', pad_inches=0, facecolor=facecolor, dpi=dpi) if plot3d and (single_window is False): fig[-1].savefig(filename + '.eps', bbox_inches='tight', pad_inches=0) else: plt.show() for i in range(len(fig)): plt.close(fig[i]) def _get_poselayout(num_joints, predicted=False): if num_joints == 16: if predicted: predicted_links = [[14,12],[12,10],[10,11],[13,0],[0,2],[15,1],[1,3],[8,4],[4,7],[6,5],[5,9]] cmap_pred = [1, 2, 1, 2, 3, 4, 4, 3, 3, 4, 0, 0, 0, 1, 0, 2] return pa16j2d.color, cmap_pred, predicted_links else: return pa16j2d.color, pa16j2d.cmap, pa16j2d.links elif num_joints == 17: return pa17j3d.color, pa17j3d.cmap, pa17j3d.links elif num_joints == 20: return pa20j3d.color, pa20j3d.cmap, pa20j3d.links def plot_3d_pose(pose, subplot=None, filename=None, color=None, lw=3, azimuth=65): if plt is None: raise Exception('"matplotlib" is required for 3D pose plotting!') num_joints, dim = pose.shape assert dim in [2, 3], 'Invalid pose dimension (%d)' % dim assert ((num_joints == 16) or (num_joints == 17)) or (num_joints == 20), \ 'Unsupported number of joints (%d)' % num_joints col, cmap, links = _get_poselayout(num_joints) if color is None: color = col def _func_and(x): if x.all(): return 1 return 0 points = np.zeros((num_joints, 3)) for d in range(dim): points[:,d] = pose[:,d] for i in range(num_joints): points[i, 2] = max(0, points[i, 2]) valid = np.apply_along_axis(_func_and, axis=1, arr=(points[:,0:2] > -1e6)) if subplot is None: fig = plt.figure() ax = fig.add_subplot(111, projection='3d') else: fig = None ax = subplot for j in range(num_joints): if valid[j]: x, y, z = points[j] ax.scatter([z], [x], [y], lw=lw, c=color[cmap[j]]) for i in links: if valid[i[0]] and valid[i[1]]: c = color[cmap[i[0]]] ax.plot(points[i, 2], points[i, 0], points[i, 1], c=c, lw=lw) ax.view_init(10, azimuth) ax.set_aspect('equal') ax.set_xlabel('Z (depth)') ax.set_ylabel('X (width)') ax.set_zlabel('Y (height)') ax.set_xlim([0, 1.]) ax.set_ylim([0, 1.]) ax.set_zlim([0, 1.]) plt.gca().invert_xaxis() plt.gca().invert_zaxis() if fig is not None: if filename: fig.savefig(filename, bbox_inches='tight', pad_inches=0) else: plt.show() plt.close(fig) def _plot_bbox(subplot, bbox, h=None, w=None, scale=16, lw=2, c=None): assert len(bbox) == 4 b = bbox.copy() if w is not None: b[0] *= w b[2] *= w if h is not None: b[1] *= h b[3] *= h if c is None: c = hex_colors[np.random.randint(len(hex_colors))] x = np.array([b[0], b[2], b[2], b[0], b[0]]) y = np.array([b[1], b[1], b[3], b[3], b[1]]) subplot.plot(x, y, lw=lw, c=c, zorder=1) def plot_skeleton_2d(subplot, skel, h=None, w=None, joints=True, links=True, scale=16, lw=4, predicted=False): s = skel.copy() num_joints = len(s) assert ((num_joints == 16) or (num_joints == 17)) or (num_joints == 20), \ 'Unsupported number of joints (%d)' % num_joints color, cmap, links = _get_poselayout(num_joints,predicted=predicted) x = s[:,0] y = s[:,1] v = s > -1e6 v = v.any(axis=1).astype(np.float32) # Convert normalized skeletons to image coordinates. if w is not None: x *= w if h is not None: y *= h if joints: for i in range(len(v)): if v[i] > 0: c = color[cmap[i]] subplot.scatter(x=x[i], y=y[i], c=c, lw=lw, s=scale, zorder=2) if links: for i in links: if ((v[i[0]] > 0) and (v[i[1]] > 0)): c = color[cmap[i[0]]] subplot.plot(x[i], y[i], lw=lw, c=c, zorder=1) # def drawhm(hm, zero_clip=False, vmax=None, filename=None): # #heatmaps = np.transpose(heatmaps, (0, 3, 1, 2)) # fb = hm.copy() # if zero_clip: # fb = (fb > 0) * fb # vmin = fb.min() # if vmax is None: # vmax = fb.max() # print (vmin, vmax) # cmap = plt.cm.jet # norm = plt.Normalize(vmin=vmin, vmax=vmax) # image = cmap(norm(fb)) # print (filename) # if filename is not None: # plt.imsave(filename, image) # else: # plt.show(image) # plt.close()
[ "matplotlib.pyplot.show", "matplotlib.pyplot.imshow", "matplotlib.pyplot.close", "deephar.utils.io.printcn", "numpy.zeros", "matplotlib.pyplot.axis", "numpy.ones", "numpy.apply_along_axis", "matplotlib.pyplot.figure", "numpy.array", "matplotlib.pyplot.imsave", "matplotlib.pyplot.gca" ]
[((3314, 3328), 'matplotlib.pyplot.axis', 'plt.axis', (['axis'], {}), '(axis)\n', (3322, 3328), True, 'import matplotlib.pyplot as plt\n'), ((5713, 5738), 'numpy.zeros', 'np.zeros', (['(num_joints, 3)'], {}), '((num_joints, 3))\n', (5721, 5738), True, 'import numpy as np\n'), ((5885, 5956), 'numpy.apply_along_axis', 'np.apply_along_axis', (['_func_and'], {'axis': '(1)', 'arr': '(points[:, 0:2] > -1000000.0)'}), '(_func_and, axis=1, arr=points[:, 0:2] > -1000000.0)\n', (5904, 5956), True, 'import numpy as np\n'), ((7202, 7242), 'numpy.array', 'np.array', (['[b[0], b[2], b[2], b[0], b[0]]'], {}), '([b[0], b[2], b[2], b[0], b[0]])\n', (7210, 7242), True, 'import numpy as np\n'), ((7251, 7291), 'numpy.array', 'np.array', (['[b[1], b[1], b[3], b[3], b[1]]'], {}), '([b[1], b[1], b[3], b[3], b[1]])\n', (7259, 7291), True, 'import numpy as np\n'), ((2026, 2051), 'matplotlib.pyplot.imsave', 'plt.imsave', (['filename', 'img'], {}), '(filename, img)\n', (2036, 2051), True, 'import matplotlib.pyplot as plt\n'), ((2070, 2085), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (2080, 2085), True, 'import matplotlib.pyplot as plt\n'), ((2094, 2104), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2102, 2104), True, 'import matplotlib.pyplot as plt\n'), ((2477, 2516), 'deephar.utils.io.printcn', 'printcn', (['WARNING', '"""pyplot not defined!"""'], {}), "(WARNING, 'pyplot not defined!')\n", (2484, 2516), False, 'from deephar.utils.io import printcn\n'), ((2807, 2833), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'zorder': '(-1)'}), '(img, zorder=-1)\n', (2817, 2833), True, 'import matplotlib.pyplot as plt\n'), ((2864, 2891), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (2874, 2891), True, 'import matplotlib.pyplot as plt\n'), ((4447, 4457), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4455, 4457), True, 'import matplotlib.pyplot as plt\n'), ((4497, 4514), 'matplotlib.pyplot.close', 'plt.close', (['fig[i]'], {}), '(fig[i])\n', (4506, 4514), True, 'import matplotlib.pyplot as plt\n'), ((5991, 6003), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6001, 6003), True, 'import matplotlib.pyplot as plt\n'), ((6868, 6882), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (6877, 6882), True, 'import matplotlib.pyplot as plt\n'), ((879, 917), 'numpy.ones', 'np.ones', (['(100, 100, 3)'], {'dtype': 'np.uint8'}), '((100, 100, 3), dtype=np.uint8)\n', (886, 917), True, 'import numpy as np\n'), ((1402, 1438), 'numpy.zeros', 'np.zeros', (['(h, w, 3)'], {'dtype': 'buf.dtype'}), '((h, w, 3), dtype=buf.dtype)\n', (1410, 1438), True, 'import numpy as np\n'), ((1831, 1870), 'deephar.utils.io.printcn', 'printcn', (['WARNING', '"""pyplot not defined!"""'], {}), "(WARNING, 'pyplot not defined!')\n", (1838, 1870), False, 'from deephar.utils.io import printcn\n'), ((6654, 6663), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6661, 6663), True, 'import matplotlib.pyplot as plt\n'), ((6683, 6692), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6690, 6692), True, 'import matplotlib.pyplot as plt\n'), ((6849, 6859), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6857, 6859), True, 'import matplotlib.pyplot as plt\n'), ((3156, 3183), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (3166, 3183), True, 'import matplotlib.pyplot as plt\n')]
#!/usr/bin/python3 from __future__ import print_function import tensorflow as tf import math from utility.generate_sample import generate_sample import numpy as np import matplotlib import matplotlib.pyplot as plt import matplotlib.animation as animation import argparse import os #LSTM runs faster on CPU os.environ['CUDA_VISIBLE_DEVICES'] = '0' tf.compat.v1.disable_eager_execution() matplotlib.use("Agg") _DISCRIPTION = ''' The file dR11Devs has following devices:\n idx Device\n 00 -> 'Dev#8'\n 01 -> 'Dev#9'\n 02 -> 'Dev#11'\n 03 -> 'Dev#12'\n 04 -> 'Dev#14'\n 05 -> 'Dev#24'\n 06 -> 'Dev#29'\n 07 -> 'Dev#32'\n 08 -> 'Dev#35'\n 09 -> 'Dev#36'\n 10 -> 'Dev#38'\n ''' parser = argparse.ArgumentParser( description='dR Transistor Degradation predicion Based on Stacked LSTM Approch.') parser.add_argument('--test-dev', type=int, default=0, help="Device test ID. {}".format(_DISCRIPTION)) args = parser.parse_args() if not(-1 < args.test_dev < 11): print( "test-dev should be a number in [0,10]. Please run thte program with --help for more information.") raise ValueError # noinspection PyUnresolvedReferences """ Copyright (c) 2020, University of North Carolina at Charlotte All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Authors: <NAME> - Transformative Computer Systems Architecture Research (TeCSAR) at UNC Charlotte """ """ A Recurrent Neural Network (LSTM) implementation example using TensorFlow library. Inspired by https://github.com/aymericdamien/TensorFlow-Examples/ and http://mourafiq.com/2016/05/15/predicting-sequences-using-rnn-in-tensorflow.html and https://github.com/sunsided/tensorflow-lstm-sin """ # Parameters data_file = "./utility/dR11Devs.mat" batch_size = 10 # because we have 10 devices to learn, and one device to test learning_rate = 0.003 training_iters = 1000 training_iter_step_down_every = 250000 display_step = 100 updating_plot = 10 # Network Parameters n_input = 1 # Delta{R} n_steps = 21 # time steps n_hidden = 32 # Num of features n_outputs = 104 # output is a series of Delta{R}+ n_layers = 4 # number of stacked LSTM layers reach_to_test_error = 0.00005 test_device = [args.test_dev] save_movie = True save_res_as_file = True Monte_Carlo_test = False loss_test = [] loss_train = [] min_test_mse = float('Inf') plt.ioff() fig = plt.figure() if not Monte_Carlo_test: ax1 = fig.add_subplot(1, 2, 1) # Plots train and test losses. ax2 = fig.add_subplot(1, 2, 2) # Plots prediction plt.tight_layout(pad=1.5, w_pad=3.0, h_pad=1.0) else: ax1 = fig.add_subplot(1, 1, 1) # Plots train and test losses. ''' Computation graph ''' with tf.compat.v1.name_scope("INPUTs"): # tf Graph input lr = tf.compat.v1.placeholder(tf.float32, []) x = tf.compat.v1.placeholder(tf.float32, [None, n_steps, n_input]) y = tf.compat.v1.placeholder(tf.float32, [None, n_outputs]) # Define weights weights = { 'out': tf.Variable(tf.random.truncated_normal([n_hidden, n_outputs], stddev=1.0)) } biases = { 'out': tf.Variable(tf.random.truncated_normal([n_outputs], stddev=0.1)) } # Define the GRU cells # with tf.name_scope("GRU_CELL"): # gru_cells = [rnn.GRUCell(n_hidden) for _ in range(n_layers)] # with tf.name_scope("GRU_NETWORK"): # stacked_lstm = rnn.MultiRNNCell(gru_cells) # Define the LSTM cells with tf.compat.v1.name_scope("LSTM_CELL"): lstm_cells = [tf.compat.v1.nn.rnn_cell.LSTMCell( n_hidden, forget_bias=1.) for _ in range(n_layers)] # with tf.name_scope("LSTM"): stacked_lstm = tf.compat.v1.nn.rnn_cell.MultiRNNCell(lstm_cells) with tf.compat.v1.name_scope("OUTPUTs"): outputs, states = tf.compat.v1.nn.dynamic_rnn( stacked_lstm, inputs=x, dtype=tf.float32, time_major=False) h = tf.transpose(a=outputs, perm=[1, 0, 2]) pred = tf.nn.bias_add(tf.matmul(h[-1], weights['out']), biases['out']) # Define loss (Euclidean distance) and optimizer individual_losses = tf.reduce_sum( input_tensor=tf.math.squared_difference(pred, y), axis=1) with tf.compat.v1.name_scope("Loss"): loss = tf.reduce_mean(input_tensor=individual_losses) tf.compat.v1.summary.scalar("loss", loss) optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=lr).minimize(loss) # Initializing the variables init = tf.compat.v1.global_variables_initializer() merged = tf.compat.v1.summary.merge_all() # Add ops to save and restore all the variables. saver = tf.compat.v1.train.Saver() _, _, _, _, _, dev_name = generate_sample( filename=data_file, batch_size=1, samples=n_steps, predict=n_outputs, test=True, test_set=test_device) save_file_name_model = './inference_models/model_' + \ dev_name + '.ckpt' def total_mse(): _, _, _, _, l, dev_name = generate_sample(filename=data_file, batch_size=1, samples=n_steps, predict=n_outputs, test=True, test_set=test_device) total_dev_len = l[0] how_many_seg = int(total_dev_len / (n_steps + n_outputs)) pred_lst = np.array([]) target_lst = np.array([]) for i in range(how_many_seg): _, y, _, expected_y, _, _ = generate_sample(filename=data_file, batch_size=1, samples=n_steps, predict=n_outputs, start_from=i * (n_steps + n_outputs), test=True, test_set=test_device) test_input = y.reshape((1, n_steps, n_input)) prediction = sess.run(pred, feed_dict={x: test_input}) # remove the batch size dimensions pred_lst = np.hstack((pred_lst, prediction[0])) # Prediction target_lst = np.hstack((target_lst, expected_y[0])) # Expected #individual_losses = tf.reduce_sum(input_tensor=tf.math.squared_difference(pred_lst, target_lst), axis=0) # tf.reduce_mean(input_tensor=individual_losses) loss = ((pred_lst - target_lst)**2).mean(axis=0) return loss def handle_close(evt): if not Monte_Carlo_test: print("Mean Test Loss= " + "{:.6f}".format(np.mean(loss_test))) if min_test_mse != float('Inf'): # Restore variables from disk. saver.restore(sess, save_file_name_model) print('Model restored.') if save_res_as_file: print("Writing to ./prediction_output/res_" + dev_name + ".txt file...") _, _, _, _, l, _ = generate_sample( filename=data_file, batch_size=1, samples=n_steps, predict=n_outputs, test=True, test_set=test_device) total_dev_len = l[0] how_many_seg = int(total_dev_len / (n_steps + n_outputs)) pred_lst = np.array([]) for i in range(how_many_seg): t, y, next_t, expected_y, _, _ = generate_sample(filename=data_file, batch_size=1, samples=n_steps, predict=n_outputs, start_from=i * (n_steps + n_outputs), test=True, test_set=test_device) test_input = y.reshape((1, n_steps, n_input)) prediction = sess.run(pred, feed_dict={x: test_input}) # remove the batch size dimensions pred_lst = np.hstack((pred_lst, y[0])) # Input Seq pred_lst = np.hstack((pred_lst, prediction[0])) # Prediction pred_nump = np.array(pred_lst) np.savetxt('./prediction_output/res_' + dev_name + '.txt', pred_nump, fmt="%f", newline='\r\n') fig.canvas.mpl_connect('close_event', handle_close) def animate(k): global step_global train(step_global+1) if step_global % (updating_plot) == 0: ax1.clear() ax1.plot(loss_train, color='blue', linestyle='-', label='Train', zorder=1) ax1.plot(loss_test, color='red', linestyle='-', label='Test', zorder=0) ax1.legend(loc='upper right') ax1.set_xlabel('Epochs') ax1.set_ylabel('MSE') ax1.set_ylim(0, 1) ax1.set_xlim(0, training_iters) if not Monte_Carlo_test: if step_global % (updating_plot) == 0: ax2.clear() _, _, _, _, l, _ = generate_sample( filename=data_file, batch_size=1, samples=n_steps, predict=n_outputs, test=True, test_set=test_device) total_dev_len = l[0] how_many_seg = int(total_dev_len / (n_steps + n_outputs)) for i in range(how_many_seg): t, y, next_t, expected_y, _, _ = generate_sample(filename=data_file, batch_size=1, samples=n_steps, predict=n_outputs, start_from=i * (n_steps + n_outputs), test=True, test_set=test_device) test_input = y.reshape((1, n_steps, n_input)) prediction = sess.run(pred, feed_dict={x: test_input}) # remove the batch size dimensions t = t.squeeze() y = y.squeeze() next_t = next_t.squeeze() prediction = prediction.squeeze() if i == (how_many_seg - 1): ax2.plot(t, y, color='black', label='Input') ax2.plot(np.append(t[-1], next_t), np.append(y[-1], expected_y), color='green', linestyle='-.', label='Real') ax2.plot(np.append(t[-1], next_t), np.append(y[-1], prediction), color='red', linestyle=':', label='Predicted') # plt.ylim([-1, 1]) ax2.legend(loc='upper left') ax2.set_xlabel('Samples') ax2.set_ylabel('$\Delta R$') ax2.set_ylim(-0.02, 0.0550) else: ax2.plot(t, y, color='black') ax2.plot(np.append( t[-1], next_t), np.append(y[-1], expected_y), color='green', linestyle='-.') ax2.plot(np.append( t[-1], next_t), np.append(y[-1], prediction), color='red', linestyle=':') ax2.set_ylim(-0.02, 0.0550) step_global += 1 if Monte_Carlo_test: if step_global == training_iters: print("Min Test Loss= " + "{:.6f}".format(min(loss_test))) plt.close('all') def train(step): global min_test_mse current_learning_rate = learning_rate current_learning_rate *= 0.1 ** ((step * batch_size) // training_iter_step_down_every) _, batch_x, _, batch_y, _, _ = generate_sample(filename=data_file, batch_size=batch_size, samples=n_steps, predict=n_outputs) batch_x = batch_x.reshape((batch_size, n_steps, n_input)) batch_y = batch_y.reshape((batch_size, n_outputs)) # Run optimization op (backprop) _, loss_value, summary = sess.run([optimizer, loss, merged], feed_dict={x: batch_x, y: batch_y, lr: current_learning_rate}) writer.add_summary(summary, step) #_, batch_x_test, __, batch_y_test, _, _ = generate_sample( # filename=data_file, batch_size=1, samples=n_steps, predict=n_outputs, test=True) #batch_x_test = batch_x_test.reshape((1, n_steps, n_input)) #batch_y_test = batch_y_test.reshape((1, n_outputs)) # loss_value_test, summary = sess.run([loss, merged], feed_dict={ # x: batch_x_test, y: batch_y_test}) loss_value_test = math.log10(total_mse()) if loss_value_test < min_test_mse: save_path = saver.save(sess, save_file_name_model) print("Model saved: " + save_path + " @ total prediction log(MSE): {:.5f}".format(loss_value_test)) min_test_mse = loss_value_test loss_test.append(loss_value_test) loss_train.append(loss_value) if step % display_step == 0: print("Epoch:" + "{}".format(step) + " Iteration " + str(step * batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss_value) + ", Minibatch Loss Test= " + "{:.6f}".format(loss_value_test)) if not save_movie: if loss_value <= reach_to_test_error: # End the optimization and save result into file if it is activated. plt.close('all') # Launch the graph step_global = 0 ani = None # to make it global if __name__ == "__main__": with tf.compat.v1.Session() as sess: sess.run(init) loss_value = float('+Inf') loss_value_test = float('+Inf') writer = tf.compat.v1.summary.FileWriter("./output", sess.graph) how_many_frame = int(training_iters)+1 ani = animation.FuncAnimation(fig, animate, range( 1, how_many_frame), interval=16, blit=False, repeat=False) if save_movie: mywriter = animation.FFMpegWriter(fps=24, codec='libx264', extra_args=['-pix_fmt', 'yuv420p', '-profile:v', 'high', '-tune', 'animation', '-crf', '18']) ani.save("./movies/deltaR_video_"+dev_name+".mp4", writer=mywriter) else: plt.show() writer.close()
[ "argparse.ArgumentParser", "tensorflow.compat.v1.disable_eager_execution", "tensorflow.matmul", "matplotlib.pyplot.figure", "numpy.mean", "matplotlib.pyplot.tight_layout", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.compat.v1.name_scope", "tensorflow.compat.v1.placeholder", "matplotlib.pyplot.close", "numpy.savetxt", "tensorflow.compat.v1.Session", "numpy.append", "matplotlib.pyplot.show", "tensorflow.compat.v1.train.Saver", "tensorflow.reduce_mean", "tensorflow.compat.v1.nn.rnn_cell.MultiRNNCell", "tensorflow.transpose", "numpy.hstack", "matplotlib.animation.FFMpegWriter", "tensorflow.compat.v1.summary.FileWriter", "matplotlib.use", "tensorflow.compat.v1.summary.scalar", "matplotlib.pyplot.ioff", "tensorflow.math.squared_difference", "tensorflow.random.truncated_normal", "tensorflow.compat.v1.nn.dynamic_rnn", "tensorflow.compat.v1.train.AdamOptimizer", "numpy.array", "utility.generate_sample.generate_sample", "tensorflow.compat.v1.nn.rnn_cell.LSTMCell", "tensorflow.compat.v1.summary.merge_all" ]
[((349, 387), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (385, 387), True, 'import tensorflow as tf\n'), ((388, 409), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (402, 409), False, 'import matplotlib\n'), ((688, 798), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""dR Transistor Degradation predicion Based on Stacked LSTM Approch."""'}), "(description=\n 'dR Transistor Degradation predicion Based on Stacked LSTM Approch.')\n", (711, 798), False, 'import argparse\n'), ((3734, 3744), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (3742, 3744), True, 'import matplotlib.pyplot as plt\n'), ((3753, 3765), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3763, 3765), True, 'import matplotlib.pyplot as plt\n'), ((5703, 5746), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (5744, 5746), True, 'import tensorflow as tf\n'), ((5757, 5789), 'tensorflow.compat.v1.summary.merge_all', 'tf.compat.v1.summary.merge_all', ([], {}), '()\n', (5787, 5789), True, 'import tensorflow as tf\n'), ((5848, 5874), 'tensorflow.compat.v1.train.Saver', 'tf.compat.v1.train.Saver', ([], {}), '()\n', (5872, 5874), True, 'import tensorflow as tf\n'), ((5901, 6024), 'utility.generate_sample.generate_sample', 'generate_sample', ([], {'filename': 'data_file', 'batch_size': '(1)', 'samples': 'n_steps', 'predict': 'n_outputs', 'test': '(True)', 'test_set': 'test_device'}), '(filename=data_file, batch_size=1, samples=n_steps, predict=\n n_outputs, test=True, test_set=test_device)\n', (5916, 6024), False, 'from utility.generate_sample import generate_sample\n'), ((3917, 3964), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(1.5)', 'w_pad': '(3.0)', 'h_pad': '(1.0)'}), '(pad=1.5, w_pad=3.0, h_pad=1.0)\n', (3933, 3964), True, 'import matplotlib.pyplot as plt\n'), ((4076, 4109), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['"""INPUTs"""'], {}), "('INPUTs')\n", (4099, 4109), True, 'import tensorflow as tf\n'), ((4141, 4181), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', '[]'], {}), '(tf.float32, [])\n', (4165, 4181), True, 'import tensorflow as tf\n'), ((4190, 4252), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', '[None, n_steps, n_input]'], {}), '(tf.float32, [None, n_steps, n_input])\n', (4214, 4252), True, 'import tensorflow as tf\n'), ((4261, 4316), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', '[None, n_outputs]'], {}), '(tf.float32, [None, n_outputs])\n', (4285, 4316), True, 'import tensorflow as tf\n'), ((4764, 4800), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['"""LSTM_CELL"""'], {}), "('LSTM_CELL')\n", (4787, 4800), True, 'import tensorflow as tf\n'), ((4964, 5013), 'tensorflow.compat.v1.nn.rnn_cell.MultiRNNCell', 'tf.compat.v1.nn.rnn_cell.MultiRNNCell', (['lstm_cells'], {}), '(lstm_cells)\n', (5001, 5013), True, 'import tensorflow as tf\n'), ((5020, 5054), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['"""OUTPUTs"""'], {}), "('OUTPUTs')\n", (5043, 5054), True, 'import tensorflow as tf\n'), ((5078, 5169), 'tensorflow.compat.v1.nn.dynamic_rnn', 'tf.compat.v1.nn.dynamic_rnn', (['stacked_lstm'], {'inputs': 'x', 'dtype': 'tf.float32', 'time_major': '(False)'}), '(stacked_lstm, inputs=x, dtype=tf.float32,\n time_major=False)\n', (5105, 5169), True, 'import tensorflow as tf\n'), ((5183, 5222), 'tensorflow.transpose', 'tf.transpose', ([], {'a': 'outputs', 'perm': '[1, 0, 2]'}), '(a=outputs, perm=[1, 0, 2])\n', (5195, 5222), True, 'import tensorflow as tf\n'), ((5450, 5481), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['"""Loss"""'], {}), "('Loss')\n", (5473, 5481), True, 'import tensorflow as tf\n'), ((5494, 5540), 'tensorflow.reduce_mean', 'tf.reduce_mean', ([], {'input_tensor': 'individual_losses'}), '(input_tensor=individual_losses)\n', (5508, 5540), True, 'import tensorflow as tf\n'), ((5545, 5586), 'tensorflow.compat.v1.summary.scalar', 'tf.compat.v1.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (5572, 5586), True, 'import tensorflow as tf\n'), ((6152, 6275), 'utility.generate_sample.generate_sample', 'generate_sample', ([], {'filename': 'data_file', 'batch_size': '(1)', 'samples': 'n_steps', 'predict': 'n_outputs', 'test': '(True)', 'test_set': 'test_device'}), '(filename=data_file, batch_size=1, samples=n_steps, predict=\n n_outputs, test=True, test_set=test_device)\n', (6167, 6275), False, 'from utility.generate_sample import generate_sample\n'), ((6419, 6431), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6427, 6431), True, 'import numpy as np\n'), ((6449, 6461), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6457, 6461), True, 'import numpy as np\n'), ((12080, 12178), 'utility.generate_sample.generate_sample', 'generate_sample', ([], {'filename': 'data_file', 'batch_size': 'batch_size', 'samples': 'n_steps', 'predict': 'n_outputs'}), '(filename=data_file, batch_size=batch_size, samples=n_steps,\n predict=n_outputs)\n', (12095, 12178), False, 'from utility.generate_sample import generate_sample\n'), ((4370, 4431), 'tensorflow.random.truncated_normal', 'tf.random.truncated_normal', (['[n_hidden, n_outputs]'], {'stddev': '(1.0)'}), '([n_hidden, n_outputs], stddev=1.0)\n', (4396, 4431), True, 'import tensorflow as tf\n'), ((4470, 4521), 'tensorflow.random.truncated_normal', 'tf.random.truncated_normal', (['[n_outputs]'], {'stddev': '(0.1)'}), '([n_outputs], stddev=0.1)\n', (4496, 4521), True, 'import tensorflow as tf\n'), ((4820, 4880), 'tensorflow.compat.v1.nn.rnn_cell.LSTMCell', 'tf.compat.v1.nn.rnn_cell.LSTMCell', (['n_hidden'], {'forget_bias': '(1.0)'}), '(n_hidden, forget_bias=1.0)\n', (4853, 4880), True, 'import tensorflow as tf\n'), ((5249, 5281), 'tensorflow.matmul', 'tf.matmul', (['h[-1]', "weights['out']"], {}), "(h[-1], weights['out'])\n", (5258, 5281), True, 'import tensorflow as tf\n'), ((5400, 5435), 'tensorflow.math.squared_difference', 'tf.math.squared_difference', (['pred', 'y'], {}), '(pred, y)\n', (5426, 5435), True, 'import tensorflow as tf\n'), ((5600, 5650), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.compat.v1.train.AdamOptimizer', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (5632, 5650), True, 'import tensorflow as tf\n'), ((6532, 6698), 'utility.generate_sample.generate_sample', 'generate_sample', ([], {'filename': 'data_file', 'batch_size': '(1)', 'samples': 'n_steps', 'predict': 'n_outputs', 'start_from': '(i * (n_steps + n_outputs))', 'test': '(True)', 'test_set': 'test_device'}), '(filename=data_file, batch_size=1, samples=n_steps, predict=\n n_outputs, start_from=i * (n_steps + n_outputs), test=True, test_set=\n test_device)\n', (6547, 6698), False, 'from utility.generate_sample import generate_sample\n'), ((6972, 7008), 'numpy.hstack', 'np.hstack', (['(pred_lst, prediction[0])'], {}), '((pred_lst, prediction[0]))\n', (6981, 7008), True, 'import numpy as np\n'), ((7044, 7082), 'numpy.hstack', 'np.hstack', (['(target_lst, expected_y[0])'], {}), '((target_lst, expected_y[0]))\n', (7053, 7082), True, 'import numpy as np\n'), ((13978, 14000), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (13998, 14000), True, 'import tensorflow as tf\n'), ((14127, 14182), 'tensorflow.compat.v1.summary.FileWriter', 'tf.compat.v1.summary.FileWriter', (['"""./output"""', 'sess.graph'], {}), "('./output', sess.graph)\n", (14158, 14182), True, 'import tensorflow as tf\n'), ((7778, 7901), 'utility.generate_sample.generate_sample', 'generate_sample', ([], {'filename': 'data_file', 'batch_size': '(1)', 'samples': 'n_steps', 'predict': 'n_outputs', 'test': '(True)', 'test_set': 'test_device'}), '(filename=data_file, batch_size=1, samples=n_steps, predict=\n n_outputs, test=True, test_set=test_device)\n', (7793, 7901), False, 'from utility.generate_sample import generate_sample\n'), ((8042, 8054), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (8050, 8054), True, 'import numpy as np\n'), ((8788, 8806), 'numpy.array', 'np.array', (['pred_lst'], {}), '(pred_lst)\n', (8796, 8806), True, 'import numpy as np\n'), ((8819, 8919), 'numpy.savetxt', 'np.savetxt', (["('./prediction_output/res_' + dev_name + '.txt')", 'pred_nump'], {'fmt': '"""%f"""', 'newline': "'\\r\\n'"}), "('./prediction_output/res_' + dev_name + '.txt', pred_nump, fmt=\n '%f', newline='\\r\\n')\n", (8829, 8919), True, 'import numpy as np\n'), ((9620, 9743), 'utility.generate_sample.generate_sample', 'generate_sample', ([], {'filename': 'data_file', 'batch_size': '(1)', 'samples': 'n_steps', 'predict': 'n_outputs', 'test': '(True)', 'test_set': 'test_device'}), '(filename=data_file, batch_size=1, samples=n_steps, predict=\n n_outputs, test=True, test_set=test_device)\n', (9635, 9743), False, 'from utility.generate_sample import generate_sample\n'), ((11812, 11828), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (11821, 11828), True, 'import matplotlib.pyplot as plt\n'), ((13854, 13870), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (13863, 13870), True, 'import matplotlib.pyplot as plt\n'), ((14407, 14552), 'matplotlib.animation.FFMpegWriter', 'animation.FFMpegWriter', ([], {'fps': '(24)', 'codec': '"""libx264"""', 'extra_args': "['-pix_fmt', 'yuv420p', '-profile:v', 'high', '-tune', 'animation', '-crf',\n '18']"}), "(fps=24, codec='libx264', extra_args=['-pix_fmt',\n 'yuv420p', '-profile:v', 'high', '-tune', 'animation', '-crf', '18'])\n", (14429, 14552), True, 'import matplotlib.animation as animation\n'), ((14759, 14769), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14767, 14769), True, 'import matplotlib.pyplot as plt\n'), ((8146, 8312), 'utility.generate_sample.generate_sample', 'generate_sample', ([], {'filename': 'data_file', 'batch_size': '(1)', 'samples': 'n_steps', 'predict': 'n_outputs', 'start_from': '(i * (n_steps + n_outputs))', 'test': '(True)', 'test_set': 'test_device'}), '(filename=data_file, batch_size=1, samples=n_steps, predict=\n n_outputs, start_from=i * (n_steps + n_outputs), test=True, test_set=\n test_device)\n', (8161, 8312), False, 'from utility.generate_sample import generate_sample\n'), ((8644, 8671), 'numpy.hstack', 'np.hstack', (['(pred_lst, y[0])'], {}), '((pred_lst, y[0]))\n', (8653, 8671), True, 'import numpy as np\n'), ((8712, 8748), 'numpy.hstack', 'np.hstack', (['(pred_lst, prediction[0])'], {}), '((pred_lst, prediction[0]))\n', (8721, 8748), True, 'import numpy as np\n'), ((9952, 10118), 'utility.generate_sample.generate_sample', 'generate_sample', ([], {'filename': 'data_file', 'batch_size': '(1)', 'samples': 'n_steps', 'predict': 'n_outputs', 'start_from': '(i * (n_steps + n_outputs))', 'test': '(True)', 'test_set': 'test_device'}), '(filename=data_file, batch_size=1, samples=n_steps, predict=\n n_outputs, start_from=i * (n_steps + n_outputs), test=True, test_set=\n test_device)\n', (9967, 10118), False, 'from utility.generate_sample import generate_sample\n'), ((7432, 7450), 'numpy.mean', 'np.mean', (['loss_test'], {}), '(loss_test)\n', (7439, 7450), True, 'import numpy as np\n'), ((10717, 10741), 'numpy.append', 'np.append', (['t[-1]', 'next_t'], {}), '(t[-1], next_t)\n', (10726, 10741), True, 'import numpy as np\n'), ((10743, 10771), 'numpy.append', 'np.append', (['y[-1]', 'expected_y'], {}), '(y[-1], expected_y)\n', (10752, 10771), True, 'import numpy as np\n'), ((10876, 10900), 'numpy.append', 'np.append', (['t[-1]', 'next_t'], {}), '(t[-1], next_t)\n', (10885, 10900), True, 'import numpy as np\n'), ((10902, 10930), 'numpy.append', 'np.append', (['y[-1]', 'prediction'], {}), '(y[-1], prediction)\n', (10911, 10930), True, 'import numpy as np\n'), ((11341, 11365), 'numpy.append', 'np.append', (['t[-1]', 'next_t'], {}), '(t[-1], next_t)\n', (11350, 11365), True, 'import numpy as np\n'), ((11392, 11420), 'numpy.append', 'np.append', (['y[-1]', 'expected_y'], {}), '(y[-1], expected_y)\n', (11401, 11420), True, 'import numpy as np\n'), ((11482, 11506), 'numpy.append', 'np.append', (['t[-1]', 'next_t'], {}), '(t[-1], next_t)\n', (11491, 11506), True, 'import numpy as np\n'), ((11533, 11561), 'numpy.append', 'np.append', (['y[-1]', 'prediction'], {}), '(y[-1], prediction)\n', (11542, 11561), True, 'import numpy as np\n')]
import numpy as np from transformers import AdamW, get_scheduler from transformers.tokenization_utils_base import BatchEncoding def forward_wrapper_tuple(model, batch): input_ids = batch['input_ids'].to(model.first_device) attention_mask = batch['attention_mask'].to(model.first_device) labels = batch['labels'].to(model.last_device) outputs = model((input_ids, attention_mask)) logits = outputs[0] return logits, labels def forward_wrapper_dict(model, batch): batch = BatchEncoding(batch).to(0) logits = model(**batch).logits return logits, batch['labels'].to(0) def train_step(model, batch, forward_wrapper, loss_fn): model.train() outputs, labels = forward_wrapper(model, batch) # print(outputs) loss = loss_fn(outputs.to(labels.device), labels) return loss def eval_step(model, batch, forward_wrapper, loss_fn): model.train() outputs, labels = forward_wrapper(model, batch) loss = loss_fn(outputs, labels) return outputs, loss def get_optimizer_grouped_parameters(args, model): no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] return optimizer_grouped_parameters def create_adamw_optimizer(args, model): optimizer_grouped_parameters = get_optimizer_grouped_parameters(args, model) optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate) return optimizer def create_lr_cheduler(args, optimizer): lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) return lr_scheduler def prepare_model(args, model): optimizer = create_adamw_optimizer(args, model) lr_cheduler = create_lr_cheduler(args, optimizer) return model, optimizer, lr_cheduler def test_parameters_consistency(model_gold, model_test, abort=True): model_test_param = model_test.named_parameters() model_gold_param = model_gold.named_parameters() for test, gold in zip(model_test_param, model_gold_param): name_test, param_test = test name_gold, param_gold = gold param_test = param_test.detach().cpu().numpy() param_gold = param_gold.detach().cpu().numpy() if abort: print(name_gold, name_test, param_gold.shape, param_test.shape) assert np.all(np.isclose( param_test, param_gold )) else: print(name_test, np.linalg.norm(param_gold-param_test))
[ "transformers.tokenization_utils_base.BatchEncoding", "numpy.isclose", "numpy.linalg.norm", "transformers.get_scheduler", "transformers.AdamW" ]
[((1660, 1718), 'transformers.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate'}), '(optimizer_grouped_parameters, lr=args.learning_rate)\n', (1665, 1718), False, 'from transformers import AdamW, get_scheduler\n'), ((1801, 1954), 'transformers.get_scheduler', 'get_scheduler', ([], {'name': 'args.lr_scheduler_type', 'optimizer': 'optimizer', 'num_warmup_steps': 'args.num_warmup_steps', 'num_training_steps': 'args.max_train_steps'}), '(name=args.lr_scheduler_type, optimizer=optimizer,\n num_warmup_steps=args.num_warmup_steps, num_training_steps=args.\n max_train_steps)\n', (1814, 1954), False, 'from transformers import AdamW, get_scheduler\n'), ((505, 525), 'transformers.tokenization_utils_base.BatchEncoding', 'BatchEncoding', (['batch'], {}), '(batch)\n', (518, 525), False, 'from transformers.tokenization_utils_base import BatchEncoding\n'), ((2736, 2770), 'numpy.isclose', 'np.isclose', (['param_test', 'param_gold'], {}), '(param_test, param_gold)\n', (2746, 2770), True, 'import numpy as np\n'), ((2861, 2900), 'numpy.linalg.norm', 'np.linalg.norm', (['(param_gold - param_test)'], {}), '(param_gold - param_test)\n', (2875, 2900), True, 'import numpy as np\n')]
"""Same as 01.py, but reports speed""" import os import sys if not os.path.abspath('../../../') in sys.path: sys.path.append('../../../') import swhlab import matplotlib.pyplot as plt import numpy as np import time if __name__=="__main__": abfFile=R"X:\Data\DIC1\2013\08-2013\08-16-2013-DP\13816004.abf" abf=swhlab.ABF(abfFile) # defaults to sweep 0 print("analyzing %d sweeps (%.02f sec each)"%(abf.sweeps,abf.sweepLength)) times=[] for sweep in abf.setsweeps(): t1=time.clock() baseFrequency=60 # frequency (Hz) to silence FFT=np.fft.fft(abf.sweepY) # frequency data (i/j vectors starting at 0Hz) for i in range(50): # first 50 odd harmonics I=int(baseFrequency*i+baseFrequency*len(abf.sweepY)/abf.pointsPerSec) FFT[I],FFT[-I]=0,0 # remember to silence from both ends of the FFT Ys2=np.fft.ifft(FFT) # all done times.append(time.clock()-t1) times=np.array(times)*1000 # now in ms print("analysis took %.02f +/- %.02f ms per sweep"%(np.average(times),np.std(times))) # analyzing 60 sweeps (5.00 sec each) # analysis took 6.47 +/- 1.71 ms per sweep
[ "sys.path.append", "numpy.fft.ifft", "swhlab.ABF", "os.path.abspath", "numpy.average", "numpy.std", "numpy.fft.fft", "time.clock", "numpy.array" ]
[((114, 142), 'sys.path.append', 'sys.path.append', (['"""../../../"""'], {}), "('../../../')\n", (129, 142), False, 'import sys\n'), ((322, 341), 'swhlab.ABF', 'swhlab.ABF', (['abfFile'], {}), '(abfFile)\n', (332, 341), False, 'import swhlab\n'), ((68, 96), 'os.path.abspath', 'os.path.abspath', (['"""../../../"""'], {}), "('../../../')\n", (83, 96), False, 'import os\n'), ((501, 513), 'time.clock', 'time.clock', ([], {}), '()\n', (511, 513), False, 'import time\n'), ((579, 601), 'numpy.fft.fft', 'np.fft.fft', (['abf.sweepY'], {}), '(abf.sweepY)\n', (589, 601), True, 'import numpy as np\n'), ((875, 891), 'numpy.fft.ifft', 'np.fft.ifft', (['FFT'], {}), '(FFT)\n', (886, 891), True, 'import numpy as np\n'), ((951, 966), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (959, 966), True, 'import numpy as np\n'), ((924, 936), 'time.clock', 'time.clock', ([], {}), '()\n', (934, 936), False, 'import time\n'), ((1040, 1057), 'numpy.average', 'np.average', (['times'], {}), '(times)\n', (1050, 1057), True, 'import numpy as np\n'), ((1058, 1071), 'numpy.std', 'np.std', (['times'], {}), '(times)\n', (1064, 1071), True, 'import numpy as np\n')]
import numpy as np from torch import nn def num_parameters(self): return sum(np.prod(p.shape) for p in self.parameters()) nn.Module.num_parameters = property(num_parameters) from .graph_attention_layer import GraphAttentionNetwork, GraphAttentionLayer from .utils import get_clones from .node_transformer import PositionalEncoding, NodeTransformer, TransformerNetwork from .transformer import Transformer from .multihead_attention import MultiheadAttention from .residual import Residual from .pairwise import PairwiseBilinear, PairwiseDot, PairwiseDistance
[ "numpy.prod" ]
[((82, 98), 'numpy.prod', 'np.prod', (['p.shape'], {}), '(p.shape)\n', (89, 98), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- """ Created on Thu Jul 4 10:23:34 2019 @author: <NAME> """ # This code is used for creating data set for 6DOF robotic arm with use of direct kinematic. from sympy import symbols, pi, sin, cos, simplify from sympy.matrices import Matrix import numpy as np import random import matplotlib.pyplot as plt import pandas as pd import time import math def build_mod_dh_matrix(s, theta, alpha, d, a): # transformation matrix Ta_b = Matrix([ [cos(theta), -cos(alpha)*sin(theta), sin(alpha)*sin(theta), a*cos(theta)], [sin(theta), cos(alpha)*cos(theta), -sin(alpha)*cos(theta), a*sin(theta)], [0, sin(alpha), cos(alpha), d ], [0, 0, 0, 1] ]) Ta_b = Ta_b.subs(s) # Substitute in the DH parameters return Ta_b def calculate_position(teta1, teta2, teta3, teta4, teta5, teta6): theta1, theta2, theta3, theta4, theta5, theta6 = symbols('theta1:7') alpha0, alpha1, alpha2, alpha3, alpha4, alpha5 = symbols('alpha0:6') d1, d2, d3, d4, d5, d6 = symbols('d1:7') a0, a1, a2, a3, a4, a5 = symbols('a0:6') # DH parameters kuka_s = {alpha0: -pi/2, d1: 0.675, a0: 0.260, alpha1: 0, d2: 0, a1: 0.68, alpha2: pi/2, d3: 0, a2: 0, theta2: (theta2 - pi/2), alpha3: -pi/2, d4: -0.67, a3: 0, alpha4: pi/2, d5: 0, a4: 0, alpha5: pi, d6: -0.158, a5: 0, } # Define Modified DH Transformation matrix T0_1 = build_mod_dh_matrix(s=kuka_s, theta=theta1, alpha=alpha0, d=d1, a=a0) T1_2 = build_mod_dh_matrix(s=kuka_s, theta=theta2, alpha=alpha1, d=d2, a=a1) T2_3 = build_mod_dh_matrix(s=kuka_s, theta=theta3, alpha=alpha2, d=d3, a=a2) T3_4 = build_mod_dh_matrix(s=kuka_s, theta=theta4, alpha=alpha3, d=d4, a=a3) T4_5 = build_mod_dh_matrix(s=kuka_s, theta=theta5, alpha=alpha4, d=d5, a=a4) T5_6 = build_mod_dh_matrix(s=kuka_s, theta=theta6, alpha=alpha5, d=d6, a=a5) # Create individual transformation matrices T0_2 = simplify(T0_1 * T1_2) T0_3 = simplify(T0_2 * T2_3) T0_4 = simplify(T0_3 * T3_4) T0_5 = simplify(T0_4 * T4_5) T0_G = simplify(T0_5 * T5_6) T_total = simplify( T0_G ) result = T_total.evalf(subs={theta1: teta1, theta2: teta2, theta3: teta3, theta4: teta4, theta5: teta5, theta6: teta5}) final = np.array(result).astype(np.float64) return final # creating data set start_time = time.time() # MORE TIGHT constraints for my configuration no6 # defining the constraints for every joint number_points = 2 #1000 angle0 = [random.uniform(-1.57, 1.57) for i in range(0,number_points)] # 90-90 degrees angle1 = [random.uniform(-1.05, 1.05) for i in range(0,number_points)] # 60-60 degrees angle2 = [random.uniform(-1.05, 1.05) for i in range(0,number_points)] # 60-60 degrees angle3 = [random.uniform(-0.78, 0.78) for i in range(0,number_points)] # 45-45 degrees angle4 = [random.uniform(-0.78, 0.78) for i in range(0,number_points)] # 45-45 degrees angle5 = [random.uniform(-0.78, 0.78) for i in range(0,number_points)] # 45-45 degrees angles = np.zeros([1,6],dtype=float) positions = np.zeros([1,3],dtype=float) n = np.zeros([1,3],dtype=float) o = np.zeros([1,3],dtype=float) a = np.zeros([1,3],dtype=float) # calculation of the final transform matrix and generation of dataset for i in range(0,number_points): print(i) ang = [] ang.append( [math.degrees(angle0[i]), math.degrees(angle1[i]), math.degrees(angle2[i]), math.degrees(angle3[i]), math.degrees(angle4[i]), math.degrees(angle5[i])] ) ang = np.asarray(ang) result = calculate_position(angle0[i], angle1[i], angle2[i], angle3[i], angle4[i], angle5[i]) position_xyz = [] position_xyz.append( [result[0][3], result[1][3], result[2][3]] ) position_xyz = np.asarray(position_xyz) n_xyz = [] n_xyz.append( [result[0][0], result[1][0], result[2][0]] ) o_xyz = [] o_xyz.append( [result[0][1], result[1][1], result[2][1]] ) a_xyz = [] a_xyz.append( [result[0][2], result[1][2], result[2][2]] ) angles = np.concatenate((angles,ang)) positions = np.concatenate((positions,position_xyz)) n = np.concatenate((n, n_xyz)) o = np.concatenate((o, o_xyz)) a = np.concatenate((a, a_xyz)) df = pd.DataFrame(np.concatenate((positions,n,o,a,angles), axis = 1) ) df = df.iloc[1:] # df.to_csv(r'.\codes_and_data\no.<1>6DOF.csv') end_time = time.time() time = end_time - start_time print('Time needed',time) print(df) # needed 15 sec
[ "sympy.symbols", "random.uniform", "numpy.asarray", "sympy.cos", "numpy.zeros", "sympy.simplify", "time.time", "numpy.array", "sympy.sin", "math.degrees", "numpy.concatenate" ]
[((2866, 2877), 'time.time', 'time.time', ([], {}), '()\n', (2875, 2877), False, 'import time\n'), ((3562, 3591), 'numpy.zeros', 'np.zeros', (['[1, 6]'], {'dtype': 'float'}), '([1, 6], dtype=float)\n', (3570, 3591), True, 'import numpy as np\n'), ((3603, 3632), 'numpy.zeros', 'np.zeros', (['[1, 3]'], {'dtype': 'float'}), '([1, 3], dtype=float)\n', (3611, 3632), True, 'import numpy as np\n'), ((3636, 3665), 'numpy.zeros', 'np.zeros', (['[1, 3]'], {'dtype': 'float'}), '([1, 3], dtype=float)\n', (3644, 3665), True, 'import numpy as np\n'), ((3669, 3698), 'numpy.zeros', 'np.zeros', (['[1, 3]'], {'dtype': 'float'}), '([1, 3], dtype=float)\n', (3677, 3698), True, 'import numpy as np\n'), ((3702, 3731), 'numpy.zeros', 'np.zeros', (['[1, 3]'], {'dtype': 'float'}), '([1, 3], dtype=float)\n', (3710, 3731), True, 'import numpy as np\n'), ((4950, 4961), 'time.time', 'time.time', ([], {}), '()\n', (4959, 4961), False, 'import time\n'), ((1113, 1132), 'sympy.symbols', 'symbols', (['"""theta1:7"""'], {}), "('theta1:7')\n", (1120, 1132), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((1187, 1206), 'sympy.symbols', 'symbols', (['"""alpha0:6"""'], {}), "('alpha0:6')\n", (1194, 1206), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((1237, 1252), 'sympy.symbols', 'symbols', (['"""d1:7"""'], {}), "('d1:7')\n", (1244, 1252), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((1287, 1302), 'sympy.symbols', 'symbols', (['"""a0:6"""'], {}), "('a0:6')\n", (1294, 1302), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((2366, 2387), 'sympy.simplify', 'simplify', (['(T0_1 * T1_2)'], {}), '(T0_1 * T1_2)\n', (2374, 2387), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((2404, 2425), 'sympy.simplify', 'simplify', (['(T0_2 * T2_3)'], {}), '(T0_2 * T2_3)\n', (2412, 2425), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((2442, 2463), 'sympy.simplify', 'simplify', (['(T0_3 * T3_4)'], {}), '(T0_3 * T3_4)\n', (2450, 2463), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((2479, 2500), 'sympy.simplify', 'simplify', (['(T0_4 * T4_5)'], {}), '(T0_4 * T4_5)\n', (2487, 2500), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((2517, 2538), 'sympy.simplify', 'simplify', (['(T0_5 * T5_6)'], {}), '(T0_5 * T5_6)\n', (2525, 2538), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((2566, 2580), 'sympy.simplify', 'simplify', (['T0_G'], {}), '(T0_G)\n', (2574, 2580), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((3015, 3042), 'random.uniform', 'random.uniform', (['(-1.57)', '(1.57)'], {}), '(-1.57, 1.57)\n', (3029, 3042), False, 'import random\n'), ((3106, 3133), 'random.uniform', 'random.uniform', (['(-1.05)', '(1.05)'], {}), '(-1.05, 1.05)\n', (3120, 3133), False, 'import random\n'), ((3197, 3224), 'random.uniform', 'random.uniform', (['(-1.05)', '(1.05)'], {}), '(-1.05, 1.05)\n', (3211, 3224), False, 'import random\n'), ((3288, 3315), 'random.uniform', 'random.uniform', (['(-0.78)', '(0.78)'], {}), '(-0.78, 0.78)\n', (3302, 3315), False, 'import random\n'), ((3379, 3406), 'random.uniform', 'random.uniform', (['(-0.78)', '(0.78)'], {}), '(-0.78, 0.78)\n', (3393, 3406), False, 'import random\n'), ((3470, 3497), 'random.uniform', 'random.uniform', (['(-0.78)', '(0.78)'], {}), '(-0.78, 0.78)\n', (3484, 3497), False, 'import random\n'), ((4073, 4088), 'numpy.asarray', 'np.asarray', (['ang'], {}), '(ang)\n', (4083, 4088), True, 'import numpy as np\n'), ((4307, 4331), 'numpy.asarray', 'np.asarray', (['position_xyz'], {}), '(position_xyz)\n', (4317, 4331), True, 'import numpy as np\n'), ((4600, 4629), 'numpy.concatenate', 'np.concatenate', (['(angles, ang)'], {}), '((angles, ang))\n', (4614, 4629), True, 'import numpy as np\n'), ((4646, 4687), 'numpy.concatenate', 'np.concatenate', (['(positions, position_xyz)'], {}), '((positions, position_xyz))\n', (4660, 4687), True, 'import numpy as np\n'), ((4696, 4722), 'numpy.concatenate', 'np.concatenate', (['(n, n_xyz)'], {}), '((n, n_xyz))\n', (4710, 4722), True, 'import numpy as np\n'), ((4732, 4758), 'numpy.concatenate', 'np.concatenate', (['(o, o_xyz)'], {}), '((o, o_xyz))\n', (4746, 4758), True, 'import numpy as np\n'), ((4768, 4794), 'numpy.concatenate', 'np.concatenate', (['(a, a_xyz)'], {}), '((a, a_xyz))\n', (4782, 4794), True, 'import numpy as np\n'), ((4816, 4868), 'numpy.concatenate', 'np.concatenate', (['(positions, n, o, a, angles)'], {'axis': '(1)'}), '((positions, n, o, a, angles), axis=1)\n', (4830, 4868), True, 'import numpy as np\n'), ((2763, 2779), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (2771, 2779), True, 'import numpy as np\n'), ((3891, 3914), 'math.degrees', 'math.degrees', (['angle0[i]'], {}), '(angle0[i])\n', (3903, 3914), False, 'import math\n'), ((3916, 3939), 'math.degrees', 'math.degrees', (['angle1[i]'], {}), '(angle1[i])\n', (3928, 3939), False, 'import math\n'), ((3941, 3964), 'math.degrees', 'math.degrees', (['angle2[i]'], {}), '(angle2[i])\n', (3953, 3964), False, 'import math\n'), ((3985, 4008), 'math.degrees', 'math.degrees', (['angle3[i]'], {}), '(angle3[i])\n', (3997, 4008), False, 'import math\n'), ((4010, 4033), 'math.degrees', 'math.degrees', (['angle4[i]'], {}), '(angle4[i])\n', (4022, 4033), False, 'import math\n'), ((4035, 4058), 'math.degrees', 'math.degrees', (['angle5[i]'], {}), '(angle5[i])\n', (4047, 4058), False, 'import math\n'), ((512, 522), 'sympy.cos', 'cos', (['theta'], {}), '(theta)\n', (515, 522), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((609, 619), 'sympy.sin', 'sin', (['theta'], {}), '(theta)\n', (612, 619), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((719, 729), 'sympy.sin', 'sin', (['alpha'], {}), '(alpha)\n', (722, 729), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((743, 753), 'sympy.cos', 'cos', (['alpha'], {}), '(alpha)\n', (746, 753), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((536, 546), 'sympy.sin', 'sin', (['theta'], {}), '(theta)\n', (539, 546), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((549, 559), 'sympy.sin', 'sin', (['alpha'], {}), '(alpha)\n', (552, 559), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((560, 570), 'sympy.sin', 'sin', (['theta'], {}), '(theta)\n', (563, 570), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((574, 584), 'sympy.cos', 'cos', (['theta'], {}), '(theta)\n', (577, 584), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((622, 632), 'sympy.cos', 'cos', (['alpha'], {}), '(alpha)\n', (625, 632), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((633, 643), 'sympy.cos', 'cos', (['theta'], {}), '(theta)\n', (636, 643), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((657, 667), 'sympy.cos', 'cos', (['theta'], {}), '(theta)\n', (660, 667), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((671, 681), 'sympy.sin', 'sin', (['theta'], {}), '(theta)\n', (674, 681), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((525, 535), 'sympy.cos', 'cos', (['alpha'], {}), '(alpha)\n', (528, 535), False, 'from sympy import symbols, pi, sin, cos, simplify\n'), ((646, 656), 'sympy.sin', 'sin', (['alpha'], {}), '(alpha)\n', (649, 656), False, 'from sympy import symbols, pi, sin, cos, simplify\n')]
import scipy.io import scipy.sparse import argparse import numpy as np import os import json import re import pandas import pdb def parse_args(): parser = argparse.ArgumentParser(description="Convert data from graphsage format to .mat format.") parser.add_argument('--prefix', default="example_data/douban/online/graphsage/online", help="Dataset prefix.") parser.add_argument('--prefix2', default="example_data/douban/offline/graphsage/offline", help="Dataset prefix.") parser.add_argument('--groundtruth', default="example_data/douban/dictionaries/groundtruth.dict", help="Ground truth file.") parser.add_argument('--out', default="example_data/douban/matlab/douban", help="Output prefix.") return parser.parse_args() def convert(args): G = json.load(open(args.prefix+"-G.json", "r")) id_map = json.load(open(args.prefix+"-id_map.json", "r")) # n1, offline, offline_node_label, groundtruth = pandas.read_csv(args.groundtruth, sep=" ").values.astype(np.uint16) n1 = np.array([[len(G["nodes"])]], dtype=np.uint16) # build links data = np.array([1.0, 1.0]*len(G["links"])) row = [] col = [] for link in G["links"]: row.append(id_map[str(link["source"])]) col.append(id_map[str(link["target"])]) row.append(id_map[str(link["target"])]) col.append(id_map[str(link["source"])]) links1 = scipy.sparse.csr_matrix((data, (row, col)), shape=(len(G["nodes"]), len(G["nodes"]))) # end build links # build offline_node_label data = [] row = [] col = [] for node in G["nodes"]: mask = np.array(node["feature"]) > 0.0 indexes = mask.nonzero()[0] row += [id_map[str(node["id"])]]*len(indexes) col += indexes.tolist() data += np.array(node["feature"])[indexes].tolist() node_label1 = scipy.sparse.csr_matrix((data, (row, col)), shape=(len(G["nodes"]), len(G["nodes"][0]["feature"]))) G = json.load(open(args.prefix2+"-G.json", "r")) n2 = np.array([[len(G["nodes"])]], dtype=np.uint16) # build links data = np.array([1.0, 1.0]*len(G["links"])) row = [] col = [] for link in G["links"]: row.append(id_map[str(link["source"])]) col.append(id_map[str(link["target"])]) row.append(id_map[str(link["target"])]) col.append(id_map[str(link["source"])]) links2 = scipy.sparse.csr_matrix((data, (row, col)), shape=(len(G["nodes"]), len(G["nodes"]))) # end build links # build offline_node_label data = [] row = [] col = [] for node in G["nodes"]: mask = np.array(node["feature"]) > 0.0 indexes = mask.nonzero()[0] row += [id_map[str(node["id"])]]*len(indexes) col += indexes.tolist() data += np.array(node["feature"])[indexes].tolist() node_label2 = scipy.sparse.csr_matrix((data, (row, col)), shape=(len(G["nodes"]), len(G["nodes"][0]["feature"]))) # # # TODO: Check code here # H = np.random.uniform(0, 1, size=(n2[0][0], n1[0][0])) # H = H / H.sum() # H1 = H.T H = np.zeros((n2[0][0], n1[0][0])) H1 = H.T # end TODO # # # build mat mat = { "n1": n1, "online": links1, "online_node_label": node_label1, "n2": n2, "offline": links2, "offline_node_label": node_label2, "ground_truth": groundtruth, "H": H, "H1": H1 } # mat1 = { # "n1": n1, # "edge": links1, # "node_label": node_label1, # "H": H, # "groundtruth": groundtruth # } # mat2 = { # "n2": n2, # "edge": links2, # "node_label": node_label2, # "H1": H1 # } output_dir = "/".join(re.split(r"[\\\/]", args.out)[:-1]) if not os.path.exists(output_dir): os.makedirs(output_dir) scipy.io.savemat(args.out+".mat", mat, do_compression=True) # scipy.io.savemat(args.out+"1.mat", mat1) # scipy.io.savemat(args.out+"2.mat", mat2) if __name__ == "__main__": args = parse_args() print(args) convert(args)
[ "re.split", "os.makedirs", "argparse.ArgumentParser", "pandas.read_csv", "numpy.zeros", "os.path.exists", "numpy.array" ]
[((160, 254), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Convert data from graphsage format to .mat format."""'}), "(description=\n 'Convert data from graphsage format to .mat format.')\n", (183, 254), False, 'import argparse\n'), ((3086, 3116), 'numpy.zeros', 'np.zeros', (['(n2[0][0], n1[0][0])'], {}), '((n2[0][0], n1[0][0]))\n', (3094, 3116), True, 'import numpy as np\n'), ((3796, 3822), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (3810, 3822), False, 'import os\n'), ((3832, 3855), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (3843, 3855), False, 'import os\n'), ((1609, 1634), 'numpy.array', 'np.array', (["node['feature']"], {}), "(node['feature'])\n", (1617, 1634), True, 'import numpy as np\n'), ((2605, 2630), 'numpy.array', 'np.array', (["node['feature']"], {}), "(node['feature'])\n", (2613, 2630), True, 'import numpy as np\n'), ((3749, 3780), 're.split', 're.split', (['"""[\\\\\\\\\\\\/]"""', 'args.out'], {}), "('[\\\\\\\\\\\\/]', args.out)\n", (3757, 3780), False, 'import re\n'), ((935, 977), 'pandas.read_csv', 'pandas.read_csv', (['args.groundtruth'], {'sep': '""" """'}), "(args.groundtruth, sep=' ')\n", (950, 977), False, 'import pandas\n'), ((1779, 1804), 'numpy.array', 'np.array', (["node['feature']"], {}), "(node['feature'])\n", (1787, 1804), True, 'import numpy as np\n'), ((2775, 2800), 'numpy.array', 'np.array', (["node['feature']"], {}), "(node['feature'])\n", (2783, 2800), True, 'import numpy as np\n')]
import os import sys import cv2 import imutils import numpy as np from tqdm import tqdm from enum import Enum from math import sqrt from time import time # Define a custom ENUM to specify which text-extraction function should be executed # later on on the last step of the pipeline class FTYPE(Enum): ALL = 0 EXACT = 1 SMOOTH = 2 BINARY = 3 GRAYSCALE = 4 SINGLECHAR = 5 # If the user should choose SINGLECHAR as type function, he should also specify # from which kind of processed plate he'd like to extract the single character. class STYPE(Enum): BINARY = 1 EXACT = 2 # We now define the main class of this project, the PlateExtractor. # This class will hold inside the methods which compose the pipeline. class PlateExtractor: #### 1) PREPROCESSING # Function that, given a plate image, will resize it to our standard (80x240), # approximating the result with the best interpolation possibile. def optimal_resize(self, plate): # Check if we need to enlarge the image or shrink it, because in those different # scenario we'll need different interpolation methods. if plate.shape[0] < 80 or plate.shape[1] < 240: # If the image (row,col) is lesser than the maximum dimension, we shrink it using # the cubic interpolation, whihc we'll give us the best result (at cost of # some computational speed). plate = cv2.resize(plate, (240,80), cv2.INTER_CUBIC) # Otherwise, if the image (row,col) exceed the maximum dimension elif plate.shape[0] > 80 or plate.shape[1] > 240: # we'll use the INTER_AREA interpolation. plate = cv2.resize(plate, (240,80), cv2.INTER_AREA) return plate # End # Simple function to display a given image with a given name def display_pipeline(self, name, plate): cv2.imshow(name, plate) cv2.waitKey(0) # End # Function that, given a plate image, will do various preprocessing operations to # optimally binarize and clarify the plate text in the image. def adaptive_preprocessing(self, plate, adaptive_bands=True, display=False): # If the display flag is set to true, we'll going to display the ENTIRE PIPELINE # of our methods. This will be the only comments explaining that. if display: self.display_pipeline("Raw Plate", plate) # Before going into the preprocessing, we need to scale our image ot a fixed # size of (80, 240) to be consistent in our steps. plate = self.optimal_resize(plate) # First thing, we convert the plate from RGB to HSV to generate a precise # blue mask that will be later applied to the grayscale image to cut # unnecessary elements from the whole plate image. hsv_plate = cv2.cvtColor(plate, cv2.COLOR_BGR2HSV) # PIPELINE SHOW if display: self.display_pipeline("HSV Plate", hsv_plate) # define a custom range for lower and upper blue values in HSV space; this # was taken by analyze the specific type of image to preprocess, in our case # 240x80 matrixes that present the same scene, using a HSV Color Picker # made with OpenCV. That said, with the Saturation channel max range imposed # to 170, we're excluding most of the blues from the final image range. lower = np.array([0,0,0]) upper = np.array([179,170,255]) # We then use inRange, that checks if array elements lie between the elements # of two other arrays. In this case, our first array is the plate to threshold, # while the two others are the lower and upper blue ranges. # inRange check if a pixel (x,y) of the image is contained inside the range: # if so, it puts a 255 value inside a dst image to attest that the (x,y) pixel # result in the range of the lower/upper bound. If not, that means the pixel # (x,y) hasn't passed the test and so it contains a blue value we want to mask. mask_blue = cv2.inRange(hsv_plate, lower, upper) # PIPELINE SHOW if display: self.display_pipeline("Blue mask", mask_blue) # Using a simple and fairly lightweight median blur to suppress white noise into # the mask image mask_blue_filtered = cv2.medianBlur(mask_blue, 3) # PIPELINE SHOW if display: self.display_pipeline("Blue mask filtered", mask_blue_filtered) # Before moving on the pipeline, we're gonna use the blue mask to extract the outer # (x,y) coordinates of the left and right blue band, since those will be useful # for considering only the central white plate content excluding the blue bands. # First thing, we extrapolate the max height (rows) and width (cols) of the image. # There will be cases where the blue band will be present only at one side (i.e.: # GERMAN PLATES) or at both sides (i.e.: ITALIAN PLATES). max_height = mask_blue_filtered.shape[0] max_width = mask_blue_filtered.shape[1] # If we'd like to have the coordinates of the maximum extension and starting point # of the bands to be automatic: # Before entering in the adaptive_bands scope, we initialize the optimal left and right # coordinate relative to the band that will be modified in the if/else # Optimal left and right are equal to the most-inner left and most-outer right # coordinate (in our default case: 0 and 240) optimal_left_width = 0 optimal_right_width = plate.shape[1] # We also initialize an optimal upper height and optimal lower height: that because # an image COULD NOT BE centered on a plate, but taking the surrounding car elements too. # With optimal height (lower and upper) we aim to extract ONLY the region containing the plate # alongisde with optimal width. Note that, since the image could instead contain ONLY the plate area, # we initialize those value at 0/max height and try to compute them with the adaptive bands logic. optimal_lower_height = 0 optimal_upper_height = plate.shape[0] # If the user would like adaptive bands if adaptive_bands: # Here, we're going to use find contours to check where the blue bands lie # in terms of pixel coordinates of the image we're analyzing. # Before finding the contours, we'd like to superimpose a white frame on the blue # mask image to detach rectangle mask from the border, since we're going to use # findContours to fulfill our scope. We achieve this by using numpy slicing on the # rows as follow, to replace on the bottom and top row (0-max cols) white pixels. mask_blue_filtered[0][0:max_width-1] = 255 mask_blue_filtered[max_height-1][0:max_width-1] = 255 # We do the same on the leftmost and rightmost column, replacing their values # with blue pixels: we do this by a simple iteration for the sake of simplicity for i in range(max_height): mask_blue_filtered[i][0] = 255 mask_blue_filtered[i][max_width-1] = 255 # We now call findContours to find the contours of the mask element (that should be # composed by the two blue mask rectangle. Note that we could have a decomposed black # mask, so we could deal with more contours than the expected). mask_contours = cv2.findContours(mask_blue_filtered, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Using the grab_contours function we return instead of a multi-dimensional array # containing both contours and hierarchy only the contours of an image, correctly # organized by the nested relationship between them. mask_contours = imutils.grab_contours(mask_contours) # PIPELINE SHOW if display: self.display_pipeline("Framed blue mask", mask_blue_filtered) # For drawing purposes in the debug section later on mask_blue_copy = None if display: mask_blue_copy = cv2.cvtColor(mask_blue_filtered, cv2.COLOR_GRAY2BGR) # Now, we need to find the most inner width coordinate in which the blue boxes # extends. We do such thing to use those coordinate to exclude everything before # those coordinates, since they represent a blue band. # We initialize the midpoint and the leftmost and rightmost width coordinate. image_midpoint = round(plate.shape[1]/2) # For every contour in the found contours in the mask for contour in mask_contours: # extract the x,y,w,h coordinates [x,y,w,h] = cv2.boundingRect(contour) # Check for noise: after the blue mask post-processing (median and closing) # the contour should contains only the contour representing the blue bounding # boxes. That should not be true due to small noise areas remained. To skip those # areas, we calculate the area of the bounding box: if the area is lesser than # the 5% of the area (empirical), we're dealing with a potential noise or symbol: # we just skip that. if (w*h) < (5*(plate.shape[1]*plate.shape[0]))/100: continue # check if the x coordinate (width) is placed left or right the midpoint # and if the box isn't the entire image (w=image width) if x > image_midpoint and w != plate.shape[1]: # now check if the found coordinate is lesser than the right optimal width # and if so, assign the new found coordinate: x alone is enough because # it represents the starting point of the blue band. if x <= optimal_right_width: optimal_right_width = x optimal_lower_height = y optimal_upper_height = h # Else, x is lesser than the midpoint: it's then located left in the image; # assure, as above, that we're not dealing with the entire image contour box. elif x < image_midpoint and w != plate.shape[1]: # As above, we use the inverse logic: if x is bigger than the current coordinate, # we found a better approximation for the blue band coordinate. In this case, # since the x alone represent the starting point and doesn't give us the extension # information of the blue box, we need to sum x and w to obtain the real coordinate # in which the blue band ends. if x >= optimal_left_width: optimal_left_width = x + w optimal_lower_height = y optimal_upper_height = h # DEBUG PURPOSES: if display: print("CONTOUR BOX FOUND: {}".format(cv2.boundingRect(contour))) cv2.rectangle(mask_blue_copy, (x,y), ((x + w), (y+h)+5), (0,255,0), 3) cv2.imshow("contours", mask_blue_copy) cv2.waitKey(0) else: # If not, set the default values relative to the italian plates: 25 and 215 optimal_left_width = 25 optimal_right_width = 215 # OPTIONAL: Draw correct blue band mask rectangles: this has been proved to decrease # the quality of the output sometimes. We'll just use the adaptive coordinates found # in the extract contours to avoid taking elements before or past the found coordinates. if display: # DIsplay infos about coordinates print("Max extension of left band: {} - Starting point of right band: {}".format( optimal_left_width, optimal_right_width )) # Getting a copy copy_mask = mask_blue_filtered.copy() # Draw left blue band: We'd like to draw the rectangle starting at (0,0) and finishing # at the optimal left width found with the maximum height possible (80 by default) cv2.rectangle(copy_mask, (0, 0), ((0 + optimal_left_width), (0+plate.shape[0])), 0, -1) # Draw right blue band: same concept here, but using the rightmost values cv2.rectangle(copy_mask, (optimal_right_width, 0), (plate.shape[1], plate.shape[0]), 0, -1) # Display it self.display_pipeline("Final Mask", copy_mask) # Now we're going to apply the mask on the plate: convert the latter to grayscale gray_plate = cv2.cvtColor(plate, cv2.COLOR_BGR2GRAY) # PIPELINE SHOW if display: self.display_pipeline("Gray plate", gray_plate) # Normalize the grayscale image to enhance the dark and white areas cv2.normalize(gray_plate, gray_plate, 0, 255, cv2.NORM_MINMAX) # PIPELINE SHOW if display: self.display_pipeline("Gray plate normalized", gray_plate) # Apply the generated blue mask on the grayscale plate image gray_plate_masked = cv2.bitwise_and(gray_plate, gray_plate, mask=mask_blue_filtered) # PIPELINE SHOW if display: self.display_pipeline("Gray plate masked", gray_plate_masked) # Now we need to use an adaptive threshold to generate a good approssimation # of the binarized image, useful to binarize text in an optimal way: we then # use adaptiveThreshold using GAUSSIAN_C as the gaussian summation of the neighborhood, # with a blocksize of 15 (neighborhood size) and a C constant equal to the # square of the standard deviation of the normalized grayscale image / 2, because # this value give us an adaptive way to threshold the image based on the content # of the analyzed plate (darker, brigther, blurred etc). We then invert the # binarized image to retrieve the text in white (more precise) binarized = cv2.adaptiveThreshold(gray_plate_masked, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 15, sqrt(gray_plate_masked.std())/2) # PIPELINE SHOW if display: self.display_pipeline("Gray plate binarized", binarized) # To suppress eventual noise in the binarization, we do use another media filter, # this time fairly weak to delete white noise in the binarization. binarized_filtered = cv2.medianBlur(binarized, 3) # PIPELINE SHOW if display: self.display_pipeline("Binarized filtered", binarized_filtered) # Let's now use the coordinates found with adaptive_bands to draw black rectangles representing # the part of the band we need to filter out. We decide to not draw upper and lower mask rectangle # because there are cases in which the plate is oblique: drawing those will cut character out. # Draw left blue band: We'd like to draw the rectangle starting at (0,0) and finishing # at the optimal left width found with the maximum height possible (80 by default) cv2.rectangle(binarized_filtered, (0, 0), ((0 + optimal_left_width), (0+plate.shape[0])), 0, -1) # Draw right blue band: same concept here, but using the rightmost values cv2.rectangle(binarized_filtered, (optimal_right_width, 0), (plate.shape[1], plate.shape[0]), 0, -1) # Draw upper and lower band: OPTIONAL, works really good with horizontal LP images but # struggle with oblique LP images #cv2.rectangle(binarized_filtered, (0, 0), (240, optimal_lower_height), 0, -1) #cv2.rectangle(binarized_filtered, (0, optimal_upper_height), (240, 80), 0, -1) # PIPELINE SHOW if display: self.display_pipeline("Binarized masked", binarized_filtered) # We then return both the image filtered, binarized and with the mask applied on, # and the grayscale image for further analysis. return gray_plate, binarized_filtered, (optimal_left_width, optimal_right_width, optimal_lower_height, optimal_upper_height) # End #### 2) CONTOURS EXTRACTION # Function that, given a correct preprocessed and binarized plate, will find the # contours of the text inside the image, generating a mask that will cover only # the plate characters identified. Note that, if precise_masking is set to true, # the contours of the final image will be, instead of the bounding box, the precise # contours of the characters into the image. def extract_contours(self, preprocessed_plate, precise_masking=False, band_coordinates=(25, 215), display=False): # Using the function findContours, we aim to extract contours and borders # from a given image. This function, since take in input the RETR_TREE flag, # will build a hierarchy of nested contours in form of an array, that will store # the relationship between bounds. Specifying only one returning elements we # store in contours both the contour of the input and the hierarchy, fundamental # for our next step. contours = cv2.findContours(preprocessed_plate, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Using the grab_contours function we return instead of a multi-dimensional array # containing both contours and hierarchy only the contours of an image, correctly # organized by the nested relationship between them. contours = imutils.grab_contours(contours) # Optional: sorting contours to extract the first 10 elements (most sensed contours) # and then take the first 10 elements that should contains text border: this can cause # a OUT OF BOUNDS exception since some images can present 8 or 9 total contours. # contours = (sorted(contours, key = cv2.contourArea)[::-1])[:10] # Create a black image with same size of the input image: this will be useful to # draw filled white boxes using the contour coordinate. Those boxes will be useful # in the next step when we'll use this generated image as a mask to extract characters. text_mask = np.zeros((preprocessed_plate.shape[0], preprocessed_plate.shape[1]), dtype=np.uint8) # To achieve better modularity we'll store the character bounding box inside # an array. bounding_contours = [] # Iterate through all the possible contour in the contours array for i,contour in enumerate(contours): # Given a set of coordinate points stored in a single contour object, # we tehn use boundingRect to calculates the up-right bounding rectangle of # a point set. This will return the x,y coordinate alongside with height and width # of a given contour point set. [x, y, w, h] = cv2.boundingRect(contour) # First thing, we check for meaningless contours: a meaningless contour is # just a contour that has nothing to do with the actual character contours: # those contours are the result of a not-too-much aggressive binarization, # that leave trails of white pixels on the left and right side of the plate. # To suppress those useless contours we just check the position found with # bounding rect of the contour analyzed. Remember that, in OpenCV, rows # and columns are inverted, so x = cols and y = rows. We then check if x # is lesser than a certain spatial position (left side) or exceed it from # the right. Since every plate got two blue band at it's side, we just # exclude a safe amount of pixel both from left and right (25 pixels is # a mean average found on some test images). Since our plate images are # of a fixed size of 80x240, we just add 25 + the first col and subtract # 25 - the last col, resulting in a min 25 and max 215 sensful bound. # Note that, if we override the band_coordinates with the adaptive blue band # coordinates found in the preprocessing, the result will be different based # on which extension of the blue bands found earlier. if x < band_coordinates[0] or x > band_coordinates[1]: # If the test not pass, the contour is meaningless and we can continue # to the next iteration continue # Now, since we got a bounding rectangle of a possible character contour, # we want to check if the size of this box is contained inside a specific # interval: this because, the plate extraction algorithm in this project # produces a costant image result in which characters of the plate got # similar size. With this knowledge we can then define a maximum and minimum # range in which a bounding box result in a potential character. # To assure that, we check the membership of widht and height to that interval. # NOTE: the interval was taken analyzing characters of various plate images. if 5 <= w <= 28 and 35 <= h <= 63: # Now, before performing whatever operation, we check which kind of contouring # we'd like to have: if precise_masking is set to True, that means we do not # want to draw bounding boxes around the characters but instead we want to draw # a precise mask of the character itself filled with a white solid color. # Instead, if precise_masking is set to False, we then use the given positions #`in terms of coordinate and width/height to draw a filled rectangle on the # text_mask black empty image, aimed to draw white boxes that later will be # used as a mask to extract characters from the plate. if precise_masking: # If precise_masking is True, then we just draw a filled character # contour in the mask destination # In the function, text_mask is the image where we'll going to draw # the contours, [contour] is the single occurrence of contours # passed as a matrix, 0 is the element we're going to analyze (since # countour is composed just by one element, the 0th) 255 is the color # we're going to draw the contour and -1 means "fill the shape". cv2.drawContours(text_mask, [contour], 0, 255, -1) # then append the current contours point into bounding_contours # alongside with his bounding coordinates x and y bounding_contours.append((x, y, contour)) # PIPELINE SHOW if display: print("{} CONTOUR PASSED: H = {} / W = {} / x = {} / y = {}".format(i, h, w, x, y)) self.display_pipeline("Text Mask", text_mask) else: # Instead, if precise_masking is False, we just compute the bounding # box based on x,y,w,h computed before. Note that, for precision # we give a 3 pixel room more in the height. # Here, text_mask is the image where we'll going to draw the rectangle, # (x,y) is the first vertex (uperr left) of the rectangle, (x+w) and # (y+h) is the opposite vertex (bottom right) 255 is the is the color # we're going to draw and -1 means "fill the rectangle". cv2.rectangle(text_mask, (x, y), ((x + w), (y + h) + 3), 255, -1) # then append the current bounding box points into bounding_contours # alongside with his bounding coordinates x and y bounding_contours.append((x, y, w, h)) # PIPELINE SHOW if display: print("{} CONTOUR PASSED: H = {} / W = {} / x = {} / y = {}".format(i, h, w, x, y)) self.display_pipeline("Text Mask", text_mask) else: # If here, that means the boundingRect coordinates did not pass the test; # we're then dealing with a FALSE POSITIVE that should be ignored. Display # information and skip the iteration cycle # PIPELINE SHOW if display: print("{} CONTOUR NOT PASSED: H = {} / W = {} / x = {} / y = {}".format(i, h, w, x, y)) continue # We want to remove unwanted contours. That is to say, contours nested one in another. # So we sort the bounding boxes by x position and we proceed to check a constraint # on the top left and bottom right corners of the squares. We assume external square # always antecede internal squares as their "x" is smaller. bounding_contours = sorted(bounding_contours, key = lambda x: x[0]) # Having the contours sorted by the x position (i.e.: columns) we now check # for every contour in bounding_contours if there are some nested contours # (i.e.: imagine a D where the first contour are the outer D points and the second # contour are the internal D points). # We start our enumeration at one because the first contour is represented by # the left-most character. for i, contour in enumerate(bounding_contours[1:], start=1): # Initialize empty x,y,w,h values needed later for nested contours check: # we do this both for the current contour and previous one [x_c, y_c, w_c, h_c] = [0,0,0,0] [x_p, y_p, w_p, h_p] = [0,0,0,0] # Check now if we're dealing with a precise masking or not: remember that, # a precise masking bounding_contours array is composed by 3 main elements, # (x,y,[contours_point]) while a not precise masking is composed by 4: (x,y,w,h) if precise_masking: # Extracting the boundingRect coordinate from the contous points, both # current and previous [x_c, y_c, w_c, h_c] = cv2.boundingRect(contour[2]) [x_p, y_p, w_p, h_p] = cv2.boundingRect(bounding_contours[i-1][2]) # Else, precise masking is set to false, and the len of contour is four: # x,y,w,h given by the non-precise masking bounding boxes. else: # Assigning the current bounding box coordinates to the existing one, # alongside with the previous one [x_c, y_c, w_c, h_c] = contour [x_p, y_p, w_p, h_p] = bounding_contours[i-1] # Extracted the points, we now check if the current contour box is inside # the previous one (i.e.: the D example). To do that, we check if the current # x is major than the previous x (upper left corner), and if the current x+w # is lesser than the previous x+w (upper right corner) if x_c > x_p and (x_c + w_c) < (x_p + w_p): # if so, remove the current element because it represent a nested # contour bounding_contours.remove(contour) # PIPELINE SHOW if display: self.display_pipeline("Final mask", text_mask) # We then return, after the character mask generation, both the masked text image # and the array containing the boxes that represent the characters. return (bounding_contours, text_mask) # END # Function that fiven an image name and its bounding contours, will create the .box # file associated with his associated syntax capable to be passed into the tesseract # train. def generate_boxfile(self, filename, bounding_contours): # If the OUTPUT_BOX file does not exists, create one to store the .box files if not os.path.isdir("OUTPUT_BOX"): os.mkdir("OUTPUT_BOX") # We use a try to catch an eventual error due to an inconsisten name of the plate # NOTE: for a correct .box file generation, the plate name should be like this: # ID-PLATECHARACTER.EXT -> 0-DZ234EW, where 0 is the numerical ID and the character # following are the plate characters contained in it. if the plate does not have 7 try: # Since the filename is in format ID-PLATECHARACTER.EXTENSION we need to split # the string to retrieve ONLY the ID and the characters of the plate. # Retrieving the ID ID = filename.split("-")[0] # Retrieving the entire plate characters name plate = (filename.split("-")[1]).split(".")[0] except: # If an error occurred, just return: the name isn't in the correct format. return # Initializing an empty x,y,w,h coordinates tuple; this will be modified later # based on the lenght of the bounding_contours: 3 for the precise masking, that # will require a boundingRect function on the third element to retrieve [x,y,w,h] # 4 for the not-precise masking (and so already in the [x,y,w,h] format) [x,y,w,h] = [0,0,0,0] # If the bounding_contours len is > 7 (characters of the plate) proceed with the # .box extraction (so if the bounding_contours failed to retrieve all the 7 boxes # it will be not created) if len(bounding_contours) >= len(plate): # We know that the height of the image is a fixed value of 80 height = 80 # We then open and create a file with write permission inside the output path # of the OUTPUT_BOX folder: it's name will be identical to the PLATE ID to # mantain consistence between box files and plates. with open(os.path.join("OUTPUT_BOX", "{}.box".format(ID)), 'w') as file: # Enumerating the plate lenght (7 characters) for ind,char in enumerate(plate): # We now check the bounding_contours lenght: if 3, this means the # array was obtained using precise_masking, and so we need to use # the function boundingRect to extract (x,y,w,h) if len(bounding_contours[ind]) == 3: # Extract the coordinates in format [x,y,w,h] using the contours # points [x,y,w,h] = cv2.boundingRect(bounding_contours[ind][2]) # Otherwise the non-precise masking was used and we can simply extract # the coordinates tuple by accessing the bounding_contours at that very index else: [x,y,w,h] = bounding_contours[ind] # We now get the coordinates: since tesseract uses the y axis inverted # (not like openCV that uses (y,x), tesseract do use (x,y)) we need to # invert coordinates in such manner. We use the height to invert them. left = x bottom = height - (y+h) right = left + w top = height - y # We then write in the file the char followed by it's coordinate just fetched. file.write("%s %d %d %d %d 0 \n" % (char,left,bottom,right,top)) # End ##### 3) TEXT EXTRACTION METHODS # Function that, given an image both binarized and grayscale, the mask, his bounding contours, # it's name and the type of function to be applied, will both write result on disk and # return the image if specified. Note that, by default, both return and write are set # to false; this flags will be activated by the appropriate function. # Also, a stype (singletype) flag is passed, to let the grayscale_sametext_single which # function has to be applied to retrieve the single characters. def extract_text(self, bin_plate, gray_plate, plate, adaptive_coord, contours_coordinates, mask, name, ftype, stype, write=False, ret=False, display=False): # Now check whatever funciton the user needs with some ifs if ftype == FTYPE.ALL: # if all was choosen, return should be set to false # because it's useless to return all the images; just write them on disk. # Same reason apply to display: we need to write down, not display them. ret = False display= False # Calling every text-extraction function: since the flag is ALL, we need # to write on the disk EVERY possible kind of text-extraction method. # We proceed to call every method passing the relative image. self.binarized_smooth(bin_plate, name, mask, write, ret, display) self.binarized_text(bin_plate, name, mask, write, ret, display) self.grayscale_text(plate, name, mask, write, ret, display, adaptive_coord) self.grayscale_sametext(plate, name, mask, write, ret, display) # For the single character extraction, we need to know which kind of final # output we need: if a single character extracted from the binary plate # or extracted from the enhanced grayscale one. # To do that we just check which kind of SINGLE CHARACTER TYPE we'd like, # binary or grayscale. We then pass the appropriate image to the function. if stype == STYPE.BINARY: # If the extraction needed is from binary, pass the binary plate self.grayscale_sametext_single(bin_plate, contours_coordinates, name, mask, stype) else: # Else, the extraction if from the grayscale image. self.grayscale_sametext_single(plate, contours_coordinates, name, mask, stype) elif ftype == FTYPE.SMOOTH: # return smooth (returns none in case ret is false) return self.binarized_smooth(bin_plate, name, mask, write, ret, display) elif ftype == FTYPE.BINARY: # return binary text (returns none in case ret is false) return self.binarized_text(bin_plate, name, mask, write, ret, display) elif ftype == FTYPE.GRAYSCALE: # return grayscale text binarized (returns none in case ret is false) return self.grayscale_text(plate, name, mask, write, ret, display, adaptive_coord) elif ftype == FTYPE.EXACT: # return exact text of grayscale (returns none in case ret is false) return self.grayscale_sametext(plate, name, mask, write, ret, display) elif ftype == FTYPE.SINGLECHAR: # return exact text of grayscale subdivided by a single character # (doesn't return anything; it just write single characters on disk) # so both write and ret aren't passed because this function ONLY writes. # As above, we must check which kind of plate is needed from the extraction. if stype == STYPE.BINARY: # If the extraction needed is from binary, pass the binary plate self.grayscale_sametext_single(bin_plate, contours_coordinates, name, mask, stype) else: # Else, the extraction is made on the rgb plate self.grayscale_sametext_single(plate, contours_coordinates, name, mask, stype) else: # if ftype was wrong return -1 as error return -1 # END def write_on_path(self, output_path, image, image_name): # Write output on disk: check if an output folder already exists if not os.path.isdir(output_path): # if not, create it and write the image on os.mkdir(output_path) cv2.imwrite(os.path.join(output_path, image_name), image) else: # If the path exists, just write the image inside cv2.imwrite(os.path.join(output_path, image_name), image) # END # Function that, given an RGB plate will enhance the color of the characters and # the background using the rgb planes for a accurate text extraction. def enhance_plate(self, plate): # Before going into the preprocessing, we need to scale our image ot a fixed # size of (80, 240) to be consistent in our steps. plate = self.optimal_resize(plate) # Splitting the RGB Planes into 3 images rgb_planes = cv2.split(plate) # Arrays containg the i-th step of the algorithm result_planes = [] result_norm_planes = [] # For each plane in the planes extracted for plane in rgb_planes: # Dilate the image to obtain a thicker version of the plate in that specific # plane dilated_img = cv2.dilate(plane, np.ones((5, 5), np.uint8)) # Applly a median blur to obtain the background for that very plane bg_img = cv2.medianBlur(dilated_img, 31) # Do the absolute difference between the current plane and the calculated image # to retrieve a clean image diff_img = 255 - cv2.absdiff(plane, bg_img) # Normalize the result to enhance differences norm_img = cv2.normalize(diff_img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1) # Append the processed plane to the arrasy result_planes.append(diff_img) result_norm_planes.append(norm_img) # After the operations are done, merge back the planes plate = cv2.merge(result_norm_planes) # And then return the grayscale version of the enchanced plate return cv2.cvtColor(plate, cv2.COLOR_BGR2GRAY) # Function that given the binarized optimal image and the mask, will produce a # clean image containing only the characters of the plate, smoothed and thickened # with some post-processing. It will also write the image with the name passed # on the specified directory. def binarized_smooth(self, binarized_plate, image_name, text_mask, write, ret, display): # Simply applying the text mask on the filtered image will give us # an image resulting in only characters of the plate masked_binarized = cv2.bitwise_and(binarized_plate, binarized_plate, mask=text_mask) # PIPELINE SHOW if display: self.display_pipeline("Masked binarized", masked_binarized) # Inverting the image to convert white text to black text text_image = cv2.threshold(masked_binarized, 127, 255, cv2.THRESH_BINARY_INV)[1] # PIPELINE SHOW if display: self.display_pipeline("Binarized not smoothed", text_image) # Blurring the image with a medium gaussian filter text_blur = cv2.GaussianBlur(text_image,(5,5),0) # Adding the weighted sum of the base image and the gaussian blur to retrieve # a smoother contour text_weighted = cv2.addWeighted(text_blur,1.5,text_image,-0.5,0) # Binarizing the resulting image to remove blurry areas text_binarized = cv2.threshold(text_weighted, 230, 255, cv2.THRESH_BINARY)[1] # thicken the image with a closing (dilation+erosion) kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) text_final = cv2.morphologyEx(text_binarized, cv2.MORPH_CLOSE, kernel, iterations=1) # PIPELINE SHOW if display: self.display_pipeline("Binarized Smoothed", text_final) # Write output on disk if write: self.write_on_path("OUTPUT_SMOOTH", text_final, image_name) # Return the text image if specified if ret: return text_final # END # Function that given the grayscale image and the mask, will produce a # clean image containing only the characters of the plate contained into the # binarized image. It will also write the image with the name passed # on the specified directory. def binarized_text(self, binarized_plate, image_name, text_mask, write, ret, display): # Since we got the binarized plate, we just apply the mask on it # to retrieve the plate characters masked_binarized = cv2.bitwise_and(binarized_plate, binarized_plate, mask=text_mask) # PIPELINE SHOW if display: self.display_pipeline("Mask binarized", masked_binarized) # Wr then invert the color to have the text in black instead of white text_final = cv2.threshold(masked_binarized, 127, 255, cv2.THRESH_BINARY_INV)[1] # PIPELINE SHOW if display: self.display_pipeline("Output", text_final) # Write output on disk if specified if write: self.write_on_path("OUTPUT_BINARIZED", text_final, image_name) # Return the text image if specified if ret: return text_final # END # Function that given the grayscale image and the mask, will produce a # clean and enhanced image. def grayscale_text(self, plate, image_name, text_mask, write, ret, display, adaptive_coord): # Apply the enhancement on the plate grayscale_plate = self.enhance_plate(plate) # PIPELINE SHOW if display: self.display_pipeline("Grayscale enhanced output", grayscale_plate) # Given the enhanced plate, now we'll cut the blue band with the coordinates found # in the preprocessing step: note that, the coordinates could be adaptive (found by # the band contour method) or static (Defaultly assigned). # Cut from the left band coordinate found to the end grayscale_plate_crop = grayscale_plate[adaptive_coord[2]:adaptive_coord[3], adaptive_coord[0]:grayscale_plate.shape[1]] # Now cut from 0 to the rightmost starting band coordinate: remember that, the image has # been cropped from the left so we need to subtract to the rightmost coordinate the # pixels that now are missing (from 0 to adaptive_coord[0]). grayscale_plate_crop = grayscale_plate_crop[:, 0:(adaptive_coord[1]-adaptive_coord[0])] # PIPELINE SHOW if display: self.display_pipeline("Grayscale cropped", grayscale_plate_crop) # Reshape to original size grayscale_plate = self.optimal_resize(grayscale_plate_crop) # PIPELINE SHOW if display: self.display_pipeline("Grayscale cropped resized", grayscale_plate) # Write output on disk if write: self.write_on_path("OUTPUT_GRAYSCALE_BIN", grayscale_plate, image_name) # Return the text image if specified if ret: return grayscale_plate # END # Function that given the grayscale image and the mask, will produce a # clean image containing only the characters of the plate returning # the exact same characters of the plate without any postprocessing. # It will also write the image with the name passed on the specified directory. def grayscale_sametext(self, plate, image_name, text_mask, write, ret, display): # Enhance plate grayscale_plate = self.enhance_plate(plate) # We first do an AND masking with the grayscale enhanced plate to extract the desired # zone containing the characters. masked_binarized_and = cv2.bitwise_and(grayscale_plate, grayscale_plate, mask=text_mask) # PIPELINE SHOW if display: self.display_pipeline("Masked binarized and", masked_binarized_and) # We then do the negation (not) of the and mask to retrieve the inverse mask masked_binarized_notand=cv2.bitwise_not(masked_binarized_and) # PIPELINE SHOW if display: self.display_pipeline("Masked binarized not and", masked_binarized_notand) # We then apply the text mask on the negation of the and mask retrieved before masked_binarized_andnotand = cv2.bitwise_and(masked_binarized_notand, masked_binarized_notand, mask=text_mask) # PIPELINE SHOW if display: self.display_pipeline("Masked binarized and not nad", masked_binarized_andnotand) # We now invert the image to retrieve the original and exact pixel values in their # right colors: we extracted ONLY the wanted masked areas into the grayscale image. masked_grayscale = ~masked_binarized_andnotand # Normalize final result for better color distinction cv2.normalize(masked_grayscale, masked_grayscale, 0, 255, cv2.NORM_MINMAX) # PIPELINE SHOW if display: self.display_pipeline("Output", masked_grayscale) # Write output on disk if specified if write: self.write_on_path("OUTPUT_GRAYSCALE_EXACT", masked_grayscale, image_name) # Return the text image if specified if ret: return masked_grayscale # END # Function that given the grayscale image, the mask and the spatial countour coord, # will produce a clean image containing only the characters of the plate, that will # further divide into seven images, each of them containing, in a sequential organization # (first spatial character will come first: i.e: having EX 4573A as plate, this function # will generate seven images, in the order: E(1), X(2), 4(3) etc..). # NOTE: this modality doesn't show return anything, it will ONLY write the single character # of a given plate on a folder. def grayscale_sametext_single(self, plate, contours_coordinates, image_name, text_mask, stype): # First thing we're going to do, is sort the contours_coordinate by the x coordinate # (the first element of both the precise masking and not precise masking result). # Doing this allow to organize the bounding box in terms of spatial relationship, # giving a 'sequence-like' order respecting the order of the plate string. # Remember that since we extracted the coordinates with OpenCV x means columns # and y means row. We gonna use a lambda function centered on the first element # to sort our array for the column position; lesser column value means precedent # position in the plate string. contours_coordinates = sorted(contours_coordinates, key=lambda x:x[0]) # We then check which type of character we'd like to have in output: extracted # from a binary plate or from the enhanced grayscale image. We then initialize # an empty image at first text_image = None # And then check which type of function we need for the character extraction if stype == STYPE.BINARY: # If binary, we need to get the optimal binary image from the plate in input text_image = self.binarized_text(plate, image_name, text_mask, write=False, ret=True, display=False) else: # Otherwise, we need to extract the single character wanted from a grayscale # image. text_image = self.grayscale_sametext(plate, image_name, text_mask, write=False, ret=True, display=False) # If not existing, make a folder for the single character output if not os.path.isdir("OUTPUT_SINGLE"): os.mkdir("OUTPUT_SINGLE") # Since we're going to extract the single character from the grayscale plate # applying a bounding box on it, we need to nest all the operation on a for loop # iterating seven times (size of contours_coordinate) extracting the single # characters into single images. for i, contour in enumerate(contours_coordinates): # Before assuring in which case we're on, we need to define two empty structure # that will save the current character image and the coordinates in which the # bounding box around this caracter is stored. We do this to avoid reduntant # operation in the code. cur_character = None [x,y,w,h] = [0,0,0,0] # Now, first thing we need to check, is the lenght of the contour passed: # we have two cases. 1) is when precise_masking was set to true, so the # contour is composed by (x,y, [contour_points]). In this case, the lenght # is equal to three. 2) is when precise_masking was set to true, so the # contour is composed by (x,y,w,h) because a rectangle was draw. # Different cases requires then different approached. # Case when the precise_masking is true (we got only character contours) if len(contour) == 3: # using the contour itself, we're going to generate a bounding rectangle # around our contour to define the area of the output image, since boundingRect # calculates the up-right bounding rectangle of a point set. We pass into # the function the third argument of contour, the contour points array. [x, y, w, h] = cv2.boundingRect(contour[2]) # if the lenght of the current contour isn't equal to three, that means we # got 4 elements (and we know that for sure since the extract_contours function # returns an array composed by [x,y,w,h] elements instead of [x,y,[contours_point]) # and we just need to write the single character as an image extracting the box. else: # Extract the [x,y,w,h] elements from the current contour element, saved # previously with boundingRect into the contour extraction function [x, y, w, h] = contour # We then need to extract the current character from the grayscale image: # to achieve this fast, we use the numpy slicing on the image: note that, # only OpenCV treats x as y and y as x, so in a numpy array x = row and # y = col. With that in mind, we invert the coordinates using y as row # and x as col since we got x,y,w,h with the OpenCV function boundingRect. # We use y and x as starting index, and then crop our ROI using the widht # and height to generate the bounding box of the text_image, already processed # and ready to be used for character extraction purposes. # Note that, Using the slicing we'll select ONLY the image part relative to # the current analyzed character. cur_character = text_image[y: y+h, x: x+w] # Make padding border on the image using copyMakeBorder and passing the # desired padding size in every direction bordersize = 10 cur_character = cv2.copyMakeBorder(cur_character, top=bordersize, bottom=bordersize, left=bordersize, right=bordersize, borderType=cv2.BORDER_CONSTANT, value=255) # Having the current character box, we now need to write it into the # folder defined by the current image name passed in input: for that, # we'll simply use the function write_on_path to write the current # character as a single image. self.write_on_path(os.path.join("OUTPUT_SINGLE", image_name.split('.')[0]), cur_character, "{}.png".format(i)) # END #### END TEXT EXTRACTION METHODS #### DISPLAY FUNCTIONS # Function that, given a path, will write the corresponding image/s found in the path # (if the path is an image, it will write only the passed image) into an output folder. # NOTE: if the input path is a folder, it must contains only images and folders. # Defaultly, This function will apply the binarization function as default one, # with precise_masking=false (bounding boxes will be returned instead of precise contours) def apply_extraction_onpath(self, input_path=None, desired_ext='png', precise_masking=True, adaptive_bands=True, ftype=FTYPE.BINARY, stype=STYPE.BINARY, ret=False, write=True): # Check if path is none then exit if input_path is None: print("Path must be a folder containing images or a single plate image.") exit(1) # First thing, we check if the passed input path is a directory: if os.path.isdir(input_path): print("Going to extract {} images from: {}".format(len(os.listdir(input_path)), input_path)) # If so, we are going to extract image by image the files in that directory. for file in tqdm(sorted(os.listdir(input_path))): # We then check if the fhe file extracted is a folder if os.path.isdir(file): # if so, continue with the next for iteration continue # If not, we then extract the text from the file and write it into # it's relative folder # We then open the image plate = cv2.imread(os.path.join(input_path, file)) # extract the grayscale normalized and the binarized optimal plate gray_plate, bin_plate, adaptive_coord = self.adaptive_preprocessing(plate) # we then extract the contours mask contours_coordinate, contours_mask = self.extract_contours(bin_plate, precise_masking=precise_masking, band_coordinates=adaptive_coord) # Generating a .BOXFILE for an optional tesseract (OCR) training self.generate_boxfile(file, contours_coordinate) # Split the filename to extract name and extension and subsitute the # extracted extensione with the desired one in the latter function filename, _ = file.split('.') # and extract the text from the image: note that, this function will write # by default results on disk. result = self.extract_text(bin_plate, gray_plate, plate, adaptive_coord, contours_coordinate, contours_mask, "{}.{}".format(filename, desired_ext), ftype, stype, True, False, False) else: print("Going to extract characters from: {}".format(input_path)) # Else, the path is not a folder but just an image. Then, proceed # to apply preprocessing and text extraction. # Reading the file plate = cv2.imread(input_path) # extract the grayscale normalized and the binarized optimal plate gray_plate, bin_plate, adaptive_coord = self.adaptive_preprocessing(plate) # we then extract the contours mask contours_coordinate, contours_mask = self.extract_contours(bin_plate, precise_masking=precise_masking, band_coordinates=adaptive_coord) # Generating a .BOXFILE for an optional tesseract (OCR) training self.generate_boxfile(input_path, contours_coordinate) # Split the filename to extract name and extension and subsitute the # extracted extensione with the desired one in the latter function filename, _ = input_path.split('.') # and extract the text from the image: note that, this function will write # by default results on disk. result = self.extract_text(bin_plate, gray_plate, plate, adaptive_coord, contours_coordinate, contours_mask, "{}.{}".format(filename, desired_ext), ftype, stype, write, ret, False) # If ret was set to true, return the processed plate to the user if ret: return result print("All done!") # END # Function aimed to show a preprocessing cycle on a given image # Note that precise_masking is set to false; we'll then get the bounding box mask. # We also use SMOOTH as standard function and return set to true to retrieve # the image processed with the extract_text function. # NOTE: this function works only for FTYPE GRAYSCALE, FTYPE EXACT, FTYPE SMOOTH and FTYPE BINARY. def display_result(self, input_path=None, precise_masking=True, adaptive_bands=True, ftype=FTYPE.BINARY, display=True): # Exit if no path specified or is a folder if input_path is None or os.path.isdir(input_path) is True: print("Path missing OR path inserted is a folder.\nPlease usa a single image when using function 'display_result'.") exit(1) if ftype==FTYPE.SINGLECHAR: print("Single characters can not be displayed. Please, select another extraction method to display.") exit(1) # Reading the plate plate = cv2.imread(input_path) # Apply preprocessing on it to binarize information gray_plate, bin_plate, adaptive_coord = self.adaptive_preprocessing(plate, adaptive_bands, display) # We then extract the contours passing the specified precise_masking property # and taking only the second of the returned arguments (the image) contours_coordinate, contours_mask = self.extract_contours(bin_plate, precise_masking=precise_masking, band_coordinates=adaptive_coord, display=True) # we then process the final image containing only the clean text. text = self.extract_text(bin_plate, gray_plate, plate, adaptive_coord, contours_coordinate, contours_mask, "image.png", ftype, None, False, True, display) # End # Endclas
[ "cv2.GaussianBlur", "os.mkdir", "cv2.bitwise_and", "cv2.medianBlur", "numpy.ones", "cv2.rectangle", "cv2.normalize", "cv2.absdiff", "cv2.imshow", "cv2.inRange", "os.path.join", "cv2.cvtColor", "cv2.copyMakeBorder", "cv2.split", "cv2.drawContours", "cv2.boundingRect", "cv2.resize", "cv2.bitwise_not", "cv2.waitKey", "cv2.morphologyEx", "cv2.addWeighted", "imutils.grab_contours", "cv2.merge", "os.listdir", "os.path.isdir", "cv2.getStructuringElement", "cv2.threshold", "numpy.zeros", "cv2.imread", "numpy.array", "cv2.findContours" ]
[((1883, 1906), 'cv2.imshow', 'cv2.imshow', (['name', 'plate'], {}), '(name, plate)\n', (1893, 1906), False, 'import cv2\n'), ((1915, 1929), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1926, 1929), False, 'import cv2\n'), ((2836, 2874), 'cv2.cvtColor', 'cv2.cvtColor', (['plate', 'cv2.COLOR_BGR2HSV'], {}), '(plate, cv2.COLOR_BGR2HSV)\n', (2848, 2874), False, 'import cv2\n'), ((3396, 3415), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (3404, 3415), True, 'import numpy as np\n'), ((3430, 3455), 'numpy.array', 'np.array', (['[179, 170, 255]'], {}), '([179, 170, 255])\n', (3438, 3455), True, 'import numpy as np\n'), ((4062, 4098), 'cv2.inRange', 'cv2.inRange', (['hsv_plate', 'lower', 'upper'], {}), '(hsv_plate, lower, upper)\n', (4073, 4098), False, 'import cv2\n'), ((4334, 4362), 'cv2.medianBlur', 'cv2.medianBlur', (['mask_blue', '(3)'], {}), '(mask_blue, 3)\n', (4348, 4362), False, 'import cv2\n'), ((12860, 12899), 'cv2.cvtColor', 'cv2.cvtColor', (['plate', 'cv2.COLOR_BGR2GRAY'], {}), '(plate, cv2.COLOR_BGR2GRAY)\n', (12872, 12899), False, 'import cv2\n'), ((13078, 13140), 'cv2.normalize', 'cv2.normalize', (['gray_plate', 'gray_plate', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(gray_plate, gray_plate, 0, 255, cv2.NORM_MINMAX)\n', (13091, 13140), False, 'import cv2\n'), ((13343, 13407), 'cv2.bitwise_and', 'cv2.bitwise_and', (['gray_plate', 'gray_plate'], {'mask': 'mask_blue_filtered'}), '(gray_plate, gray_plate, mask=mask_blue_filtered)\n', (13358, 13407), False, 'import cv2\n'), ((14865, 14893), 'cv2.medianBlur', 'cv2.medianBlur', (['binarized', '(3)'], {}), '(binarized, 3)\n', (14879, 14893), False, 'import cv2\n'), ((15512, 15610), 'cv2.rectangle', 'cv2.rectangle', (['binarized_filtered', '(0, 0)', '(0 + optimal_left_width, 0 + plate.shape[0])', '(0)', '(-1)'], {}), '(binarized_filtered, (0, 0), (0 + optimal_left_width, 0 +\n plate.shape[0]), 0, -1)\n', (15525, 15610), False, 'import cv2\n'), ((15700, 15804), 'cv2.rectangle', 'cv2.rectangle', (['binarized_filtered', '(optimal_right_width, 0)', '(plate.shape[1], plate.shape[0])', '(0)', '(-1)'], {}), '(binarized_filtered, (optimal_right_width, 0), (plate.shape[1],\n plate.shape[0]), 0, -1)\n', (15713, 15804), False, 'import cv2\n'), ((17533, 17609), 'cv2.findContours', 'cv2.findContours', (['preprocessed_plate', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(preprocessed_plate, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (17549, 17609), False, 'import cv2\n'), ((17871, 17902), 'imutils.grab_contours', 'imutils.grab_contours', (['contours'], {}), '(contours)\n', (17892, 17902), False, 'import imutils\n'), ((18552, 18641), 'numpy.zeros', 'np.zeros', (['(preprocessed_plate.shape[0], preprocessed_plate.shape[1])'], {'dtype': 'np.uint8'}), '((preprocessed_plate.shape[0], preprocessed_plate.shape[1]), dtype=\n np.uint8)\n', (18560, 18641), True, 'import numpy as np\n'), ((36959, 36975), 'cv2.split', 'cv2.split', (['plate'], {}), '(plate)\n', (36968, 36975), False, 'import cv2\n'), ((38084, 38113), 'cv2.merge', 'cv2.merge', (['result_norm_planes'], {}), '(result_norm_planes)\n', (38093, 38113), False, 'import cv2\n'), ((38201, 38240), 'cv2.cvtColor', 'cv2.cvtColor', (['plate', 'cv2.COLOR_BGR2GRAY'], {}), '(plate, cv2.COLOR_BGR2GRAY)\n', (38213, 38240), False, 'import cv2\n'), ((38786, 38851), 'cv2.bitwise_and', 'cv2.bitwise_and', (['binarized_plate', 'binarized_plate'], {'mask': 'text_mask'}), '(binarized_plate, binarized_plate, mask=text_mask)\n', (38801, 38851), False, 'import cv2\n'), ((39298, 39337), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['text_image', '(5, 5)', '(0)'], {}), '(text_image, (5, 5), 0)\n', (39314, 39337), False, 'import cv2\n'), ((39475, 39527), 'cv2.addWeighted', 'cv2.addWeighted', (['text_blur', '(1.5)', 'text_image', '(-0.5)', '(0)'], {}), '(text_blur, 1.5, text_image, -0.5, 0)\n', (39490, 39527), False, 'import cv2\n'), ((39755, 39804), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(3, 3)'], {}), '(cv2.MORPH_RECT, (3, 3))\n', (39780, 39804), False, 'import cv2\n'), ((39826, 39897), 'cv2.morphologyEx', 'cv2.morphologyEx', (['text_binarized', 'cv2.MORPH_CLOSE', 'kernel'], {'iterations': '(1)'}), '(text_binarized, cv2.MORPH_CLOSE, kernel, iterations=1)\n', (39842, 39897), False, 'import cv2\n'), ((40699, 40764), 'cv2.bitwise_and', 'cv2.bitwise_and', (['binarized_plate', 'binarized_plate'], {'mask': 'text_mask'}), '(binarized_plate, binarized_plate, mask=text_mask)\n', (40714, 40764), False, 'import cv2\n'), ((43733, 43798), 'cv2.bitwise_and', 'cv2.bitwise_and', (['grayscale_plate', 'grayscale_plate'], {'mask': 'text_mask'}), '(grayscale_plate, grayscale_plate, mask=text_mask)\n', (43748, 43798), False, 'import cv2\n'), ((44030, 44067), 'cv2.bitwise_not', 'cv2.bitwise_not', (['masked_binarized_and'], {}), '(masked_binarized_and)\n', (44045, 44067), False, 'import cv2\n'), ((44313, 44399), 'cv2.bitwise_and', 'cv2.bitwise_and', (['masked_binarized_notand', 'masked_binarized_notand'], {'mask': 'text_mask'}), '(masked_binarized_notand, masked_binarized_notand, mask=\n text_mask)\n', (44328, 44399), False, 'import cv2\n'), ((44832, 44906), 'cv2.normalize', 'cv2.normalize', (['masked_grayscale', 'masked_grayscale', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(masked_grayscale, masked_grayscale, 0, 255, cv2.NORM_MINMAX)\n', (44845, 44906), False, 'import cv2\n'), ((52647, 52672), 'os.path.isdir', 'os.path.isdir', (['input_path'], {}), '(input_path)\n', (52660, 52672), False, 'import os\n'), ((56974, 56996), 'cv2.imread', 'cv2.imread', (['input_path'], {}), '(input_path)\n', (56984, 56996), False, 'import cv2\n'), ((1435, 1480), 'cv2.resize', 'cv2.resize', (['plate', '(240, 80)', 'cv2.INTER_CUBIC'], {}), '(plate, (240, 80), cv2.INTER_CUBIC)\n', (1445, 1480), False, 'import cv2\n'), ((7560, 7636), 'cv2.findContours', 'cv2.findContours', (['mask_blue_filtered', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask_blue_filtered, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (7576, 7636), False, 'import cv2\n'), ((7919, 7955), 'imutils.grab_contours', 'imutils.grab_contours', (['mask_contours'], {}), '(mask_contours)\n', (7940, 7955), False, 'import imutils\n'), ((12384, 12474), 'cv2.rectangle', 'cv2.rectangle', (['copy_mask', '(0, 0)', '(0 + optimal_left_width, 0 + plate.shape[0])', '(0)', '(-1)'], {}), '(copy_mask, (0, 0), (0 + optimal_left_width, 0 + plate.shape[0\n ]), 0, -1)\n', (12397, 12474), False, 'import cv2\n'), ((12571, 12667), 'cv2.rectangle', 'cv2.rectangle', (['copy_mask', '(optimal_right_width, 0)', '(plate.shape[1], plate.shape[0])', '(0)', '(-1)'], {}), '(copy_mask, (optimal_right_width, 0), (plate.shape[1], plate.\n shape[0]), 0, -1)\n', (12584, 12667), False, 'import cv2\n'), ((19231, 19256), 'cv2.boundingRect', 'cv2.boundingRect', (['contour'], {}), '(contour)\n', (19247, 19256), False, 'import cv2\n'), ((28385, 28412), 'os.path.isdir', 'os.path.isdir', (['"""OUTPUT_BOX"""'], {}), "('OUTPUT_BOX')\n", (28398, 28412), False, 'import os\n'), ((28426, 28448), 'os.mkdir', 'os.mkdir', (['"""OUTPUT_BOX"""'], {}), "('OUTPUT_BOX')\n", (28434, 28448), False, 'import os\n'), ((36159, 36185), 'os.path.isdir', 'os.path.isdir', (['output_path'], {}), '(output_path)\n', (36172, 36185), False, 'import os\n'), ((36254, 36275), 'os.mkdir', 'os.mkdir', (['output_path'], {}), '(output_path)\n', (36262, 36275), False, 'import os\n'), ((37459, 37490), 'cv2.medianBlur', 'cv2.medianBlur', (['dilated_img', '(31)'], {}), '(dilated_img, 31)\n', (37473, 37490), False, 'import cv2\n'), ((37762, 37860), 'cv2.normalize', 'cv2.normalize', (['diff_img', 'None'], {'alpha': '(0)', 'beta': '(255)', 'norm_type': 'cv2.NORM_MINMAX', 'dtype': 'cv2.CV_8UC1'}), '(diff_img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX,\n dtype=cv2.CV_8UC1)\n', (37775, 37860), False, 'import cv2\n'), ((39045, 39109), 'cv2.threshold', 'cv2.threshold', (['masked_binarized', '(127)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(masked_binarized, 127, 255, cv2.THRESH_BINARY_INV)\n', (39058, 39109), False, 'import cv2\n'), ((39614, 39671), 'cv2.threshold', 'cv2.threshold', (['text_weighted', '(230)', '(255)', 'cv2.THRESH_BINARY'], {}), '(text_weighted, 230, 255, cv2.THRESH_BINARY)\n', (39627, 39671), False, 'import cv2\n'), ((40968, 41032), 'cv2.threshold', 'cv2.threshold', (['masked_binarized', '(127)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(masked_binarized, 127, 255, cv2.THRESH_BINARY_INV)\n', (40981, 41032), False, 'import cv2\n'), ((47535, 47565), 'os.path.isdir', 'os.path.isdir', (['"""OUTPUT_SINGLE"""'], {}), "('OUTPUT_SINGLE')\n", (47548, 47565), False, 'import os\n'), ((47579, 47604), 'os.mkdir', 'os.mkdir', (['"""OUTPUT_SINGLE"""'], {}), "('OUTPUT_SINGLE')\n", (47587, 47604), False, 'import os\n'), ((51026, 51177), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['cur_character'], {'top': 'bordersize', 'bottom': 'bordersize', 'left': 'bordersize', 'right': 'bordersize', 'borderType': 'cv2.BORDER_CONSTANT', 'value': '(255)'}), '(cur_character, top=bordersize, bottom=bordersize, left=\n bordersize, right=bordersize, borderType=cv2.BORDER_CONSTANT, value=255)\n', (51044, 51177), False, 'import cv2\n'), ((54736, 54758), 'cv2.imread', 'cv2.imread', (['input_path'], {}), '(input_path)\n', (54746, 54758), False, 'import cv2\n'), ((1687, 1731), 'cv2.resize', 'cv2.resize', (['plate', '(240, 80)', 'cv2.INTER_AREA'], {}), '(plate, (240, 80), cv2.INTER_AREA)\n', (1697, 1731), False, 'import cv2\n'), ((8212, 8264), 'cv2.cvtColor', 'cv2.cvtColor', (['mask_blue_filtered', 'cv2.COLOR_GRAY2BGR'], {}), '(mask_blue_filtered, cv2.COLOR_GRAY2BGR)\n', (8224, 8264), False, 'import cv2\n'), ((8848, 8873), 'cv2.boundingRect', 'cv2.boundingRect', (['contour'], {}), '(contour)\n', (8864, 8873), False, 'import cv2\n'), ((26635, 26663), 'cv2.boundingRect', 'cv2.boundingRect', (['contour[2]'], {}), '(contour[2])\n', (26651, 26663), False, 'import cv2\n'), ((26703, 26748), 'cv2.boundingRect', 'cv2.boundingRect', (['bounding_contours[i - 1][2]'], {}), '(bounding_contours[i - 1][2])\n', (26719, 26748), False, 'import cv2\n'), ((36300, 36337), 'os.path.join', 'os.path.join', (['output_path', 'image_name'], {}), '(output_path, image_name)\n', (36312, 36337), False, 'import os\n'), ((36447, 36484), 'os.path.join', 'os.path.join', (['output_path', 'image_name'], {}), '(output_path, image_name)\n', (36459, 36484), False, 'import os\n'), ((37330, 37355), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (37337, 37355), True, 'import numpy as np\n'), ((37653, 37679), 'cv2.absdiff', 'cv2.absdiff', (['plane', 'bg_img'], {}), '(plane, bg_img)\n', (37664, 37679), False, 'import cv2\n'), ((49339, 49367), 'cv2.boundingRect', 'cv2.boundingRect', (['contour[2]'], {}), '(contour[2])\n', (49355, 49367), False, 'import cv2\n'), ((53022, 53041), 'os.path.isdir', 'os.path.isdir', (['file'], {}), '(file)\n', (53035, 53041), False, 'import os\n'), ((56574, 56599), 'os.path.isdir', 'os.path.isdir', (['input_path'], {}), '(input_path)\n', (56587, 56599), False, 'import os\n'), ((11243, 11316), 'cv2.rectangle', 'cv2.rectangle', (['mask_blue_copy', '(x, y)', '(x + w, y + h + 5)', '(0, 255, 0)', '(3)'], {}), '(mask_blue_copy, (x, y), (x + w, y + h + 5), (0, 255, 0), 3)\n', (11256, 11316), False, 'import cv2\n'), ((11334, 11372), 'cv2.imshow', 'cv2.imshow', (['"""contours"""', 'mask_blue_copy'], {}), "('contours', mask_blue_copy)\n", (11344, 11372), False, 'import cv2\n'), ((11393, 11407), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (11404, 11407), False, 'import cv2\n'), ((22892, 22942), 'cv2.drawContours', 'cv2.drawContours', (['text_mask', '[contour]', '(0)', '(255)', '(-1)'], {}), '(text_mask, [contour], 0, 255, -1)\n', (22908, 22942), False, 'import cv2\n'), ((24037, 24098), 'cv2.rectangle', 'cv2.rectangle', (['text_mask', '(x, y)', '(x + w, y + h + 3)', '(255)', '(-1)'], {}), '(text_mask, (x, y), (x + w, y + h + 3), 255, -1)\n', (24050, 24098), False, 'import cv2\n'), ((52906, 52928), 'os.listdir', 'os.listdir', (['input_path'], {}), '(input_path)\n', (52916, 52928), False, 'import os\n'), ((53337, 53367), 'os.path.join', 'os.path.join', (['input_path', 'file'], {}), '(input_path, file)\n', (53349, 53367), False, 'import os\n'), ((30950, 30993), 'cv2.boundingRect', 'cv2.boundingRect', (['bounding_contours[ind][2]'], {}), '(bounding_contours[ind][2])\n', (30966, 30993), False, 'import cv2\n'), ((52742, 52764), 'os.listdir', 'os.listdir', (['input_path'], {}), '(input_path)\n', (52752, 52764), False, 'import os\n'), ((11195, 11220), 'cv2.boundingRect', 'cv2.boundingRect', (['contour'], {}), '(contour)\n', (11211, 11220), False, 'import cv2\n')]
import os import time import numpy as np import random import torch import math from behavenet.data.utils import build_data_generator from behavenet.fitting.eval import export_train_plots from behavenet.fitting.hyperparam_utils import get_all_params from behavenet.fitting.hyperparam_utils import get_slurm_params from behavenet.fitting.training import fit from behavenet.fitting.utils import _clean_tt_dir from behavenet.fitting.utils import _print_hparams from behavenet.fitting.utils import create_tt_experiment from behavenet.fitting.utils import export_hparams from behavenet.models.aes import load_pretrained_ae def main(hparams, *args): if not isinstance(hparams, dict): hparams = vars(hparams) if hparams['model_type'] == 'conv': # blend outer hparams with architecture hparams hparams = {**hparams['architecture_params'], **hparams} # print hparams to console _print_hparams(hparams) if hparams['model_type'] == 'conv' and hparams['n_ae_latents'] > hparams['max_latents']: raise ValueError('Number of latents higher than max latents, architecture will not work') # Start at random times (so test tube creates separate folders) np.random.seed(random.randint(0, 1000)) time.sleep(np.random.uniform(3)) # create test-tube experiment hparams, sess_ids, exp = create_tt_experiment(hparams) if hparams is None: print('Experiment exists! Aborting fit') return # build data generator data_generator = build_data_generator(hparams, sess_ids) # #################### # ### CREATE MODEL ### # #################### def set_n_labels(data_generator, hparams): data, _ = data_generator.next_batch('train') sh = data['labels'].shape hparams['n_labels'] = sh[2] # [1, n_t, n_labels] print('constructing model...', end='') torch.manual_seed(hparams['rng_seed_model']) torch_rng_seed = torch.get_rng_state() hparams['model_build_rng_seed'] = torch_rng_seed hparams['n_datasets'] = len(sess_ids) if hparams['model_class'] == 'ae': from behavenet.models import AE as Model elif hparams['model_class'] == 'vae': from behavenet.models import VAE as Model elif hparams['model_class'] == 'beta-tcvae': from behavenet.models import BetaTCVAE as Model elif hparams['model_class'] == 'ps-vae': from behavenet.models import PSVAE as Model set_n_labels(data_generator, hparams) elif hparams['model_class'] == 'cond-vae': from behavenet.models import ConditionalVAE as Model set_n_labels(data_generator, hparams) elif hparams['model_class'] == 'cond-ae': from behavenet.models import ConditionalAE as Model set_n_labels(data_generator, hparams) elif hparams['model_class'] == 'cond-ae-msp': from behavenet.models import AEMSP as Model set_n_labels(data_generator, hparams) else: raise NotImplementedError( 'The model class "%s" is not currently implemented' % hparams['model_class']) model = Model(hparams) model.to(hparams['device']) # load pretrained weights if specified model = load_pretrained_ae(model, hparams) # Parallelize over gpus if desired if hparams['n_parallel_gpus'] > 1: from behavenet.models import CustomDataParallel model = CustomDataParallel(model) model.version = exp.version torch_rng_seed = torch.get_rng_state() hparams['training_rng_seed'] = torch_rng_seed # save out hparams as csv and dict hparams['training_completed'] = False export_hparams(hparams, exp) print('done') # ################### # ### TRAIN MODEL ### # ################### print(model) fit(hparams, model, data_generator, exp, method='ae') # export training plots if hparams['export_train_plots']: print('creating training plots...', end='') version_dir = os.path.join(hparams['expt_dir'], 'version_%i' % hparams['version']) save_file = os.path.join(version_dir, 'loss_training') export_train_plots(hparams, 'train', save_file=save_file) save_file = os.path.join(version_dir, 'loss_validation') export_train_plots(hparams, 'val', save_file=save_file) print('done') # update hparams upon successful training hparams['training_completed'] = True export_hparams(hparams, exp) # get rid of unneeded logging info _clean_tt_dir(hparams) if __name__ == '__main__': hyperparams = get_all_params('grid_search') if 'slurm' in hyperparams and hyperparams.slurm: cluster = get_slurm_params(hyperparams) if hyperparams.device == 'cuda' or hyperparams.device == 'gpu': cluster.optimize_parallel_cluster_gpu( main, hyperparams.tt_n_cpu_trials, hyperparams.experiment_name, job_display_name=None) elif hyperparams.device == 'cpu': cluster.optimize_parallel_cluster_cpu( main, hyperparams.tt_n_cpu_trials, hyperparams.experiment_name, job_display_name=None) else: if hyperparams.device == 'cuda' or hyperparams.device == 'gpu': if hyperparams.device == 'gpu': hyperparams.device = 'cuda' gpu_ids = hyperparams.gpus_viz.split(';') # Set up gpu ids for parallel gpus parallel_gpu_ids = [] for instance in range(math.ceil(len(gpu_ids) / hyperparams.n_parallel_gpus)): idx_beg = instance * hyperparams.n_parallel_gpus idx_end = (instance + 1) * hyperparams.n_parallel_gpus parallel_gpu_ids.append(','.join(gpu_ids[idx_beg:idx_end])) hyperparams.optimize_parallel_gpu(main, gpu_ids=parallel_gpu_ids) elif hyperparams.device == 'cpu': hyperparams.optimize_parallel_cpu( main, nb_trials=hyperparams.tt_n_cpu_trials, nb_workers=hyperparams.tt_n_cpu_workers)
[ "numpy.random.uniform", "behavenet.models.aes.load_pretrained_ae", "random.randint", "os.path.join", "torch.manual_seed", "behavenet.fitting.training.fit", "behavenet.fitting.hyperparam_utils.get_all_params", "behavenet.fitting.eval.export_train_plots", "torch.get_rng_state", "behavenet.models.CustomDataParallel", "behavenet.fitting.hyperparam_utils.get_slurm_params", "behavenet.fitting.utils._print_hparams", "behavenet.fitting.utils._clean_tt_dir", "behavenet.fitting.utils.export_hparams", "behavenet.data.utils.build_data_generator", "behavenet.models.AEMSP", "behavenet.fitting.utils.create_tt_experiment" ]
[((915, 938), 'behavenet.fitting.utils._print_hparams', '_print_hparams', (['hparams'], {}), '(hparams)\n', (929, 938), False, 'from behavenet.fitting.utils import _print_hparams\n'), ((1345, 1374), 'behavenet.fitting.utils.create_tt_experiment', 'create_tt_experiment', (['hparams'], {}), '(hparams)\n', (1365, 1374), False, 'from behavenet.fitting.utils import create_tt_experiment\n'), ((1512, 1551), 'behavenet.data.utils.build_data_generator', 'build_data_generator', (['hparams', 'sess_ids'], {}), '(hparams, sess_ids)\n', (1532, 1551), False, 'from behavenet.data.utils import build_data_generator\n'), ((1875, 1919), 'torch.manual_seed', 'torch.manual_seed', (["hparams['rng_seed_model']"], {}), "(hparams['rng_seed_model'])\n", (1892, 1919), False, 'import torch\n'), ((1941, 1962), 'torch.get_rng_state', 'torch.get_rng_state', ([], {}), '()\n', (1960, 1962), False, 'import torch\n'), ((3087, 3101), 'behavenet.models.AEMSP', 'Model', (['hparams'], {}), '(hparams)\n', (3092, 3101), True, 'from behavenet.models import AEMSP as Model\n'), ((3190, 3224), 'behavenet.models.aes.load_pretrained_ae', 'load_pretrained_ae', (['model', 'hparams'], {}), '(model, hparams)\n', (3208, 3224), False, 'from behavenet.models.aes import load_pretrained_ae\n'), ((3456, 3477), 'torch.get_rng_state', 'torch.get_rng_state', ([], {}), '()\n', (3475, 3477), False, 'import torch\n'), ((3614, 3642), 'behavenet.fitting.utils.export_hparams', 'export_hparams', (['hparams', 'exp'], {}), '(hparams, exp)\n', (3628, 3642), False, 'from behavenet.fitting.utils import export_hparams\n'), ((3763, 3816), 'behavenet.fitting.training.fit', 'fit', (['hparams', 'model', 'data_generator', 'exp'], {'method': '"""ae"""'}), "(hparams, model, data_generator, exp, method='ae')\n", (3766, 3816), False, 'from behavenet.fitting.training import fit\n'), ((4399, 4427), 'behavenet.fitting.utils.export_hparams', 'export_hparams', (['hparams', 'exp'], {}), '(hparams, exp)\n', (4413, 4427), False, 'from behavenet.fitting.utils import export_hparams\n'), ((4472, 4494), 'behavenet.fitting.utils._clean_tt_dir', '_clean_tt_dir', (['hparams'], {}), '(hparams)\n', (4485, 4494), False, 'from behavenet.fitting.utils import _clean_tt_dir\n'), ((4543, 4572), 'behavenet.fitting.hyperparam_utils.get_all_params', 'get_all_params', (['"""grid_search"""'], {}), "('grid_search')\n", (4557, 4572), False, 'from behavenet.fitting.hyperparam_utils import get_all_params\n'), ((1219, 1242), 'random.randint', 'random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (1233, 1242), False, 'import random\n'), ((1259, 1279), 'numpy.random.uniform', 'np.random.uniform', (['(3)'], {}), '(3)\n', (1276, 1279), True, 'import numpy as np\n'), ((3376, 3401), 'behavenet.models.CustomDataParallel', 'CustomDataParallel', (['model'], {}), '(model)\n', (3394, 3401), False, 'from behavenet.models import CustomDataParallel\n'), ((3958, 4026), 'os.path.join', 'os.path.join', (["hparams['expt_dir']", "('version_%i' % hparams['version'])"], {}), "(hparams['expt_dir'], 'version_%i' % hparams['version'])\n", (3970, 4026), False, 'import os\n'), ((4047, 4089), 'os.path.join', 'os.path.join', (['version_dir', '"""loss_training"""'], {}), "(version_dir, 'loss_training')\n", (4059, 4089), False, 'import os\n'), ((4098, 4155), 'behavenet.fitting.eval.export_train_plots', 'export_train_plots', (['hparams', '"""train"""'], {'save_file': 'save_file'}), "(hparams, 'train', save_file=save_file)\n", (4116, 4155), False, 'from behavenet.fitting.eval import export_train_plots\n'), ((4176, 4220), 'os.path.join', 'os.path.join', (['version_dir', '"""loss_validation"""'], {}), "(version_dir, 'loss_validation')\n", (4188, 4220), False, 'import os\n'), ((4229, 4284), 'behavenet.fitting.eval.export_train_plots', 'export_train_plots', (['hparams', '"""val"""'], {'save_file': 'save_file'}), "(hparams, 'val', save_file=save_file)\n", (4247, 4284), False, 'from behavenet.fitting.eval import export_train_plots\n'), ((4646, 4675), 'behavenet.fitting.hyperparam_utils.get_slurm_params', 'get_slurm_params', (['hyperparams'], {}), '(hyperparams)\n', (4662, 4675), False, 'from behavenet.fitting.hyperparam_utils import get_slurm_params\n')]
import numpy as np import pytest @pytest.fixture def simulate(): np.random.seed(0) l = np.random.normal(size=(100, 3)) f = np.random.normal(size=(3, 200)) eta = l.dot(f) eta *= 5 / eta.max() x = np.random.poisson(lam=np.exp(eta)) return x, eta @pytest.fixture def simulate_lam_low_rank(): np.random.seed(0) l = np.exp(np.random.normal(size=(100, 1))) f = np.exp(np.random.normal(size=(1, 200))) lam = l.dot(f) x = np.random.poisson(lam=lam) return x, lam
[ "numpy.exp", "numpy.random.seed", "numpy.random.poisson", "numpy.random.normal" ]
[((68, 85), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (82, 85), True, 'import numpy as np\n'), ((92, 123), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100, 3)'}), '(size=(100, 3))\n', (108, 123), True, 'import numpy as np\n'), ((130, 161), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 200)'}), '(size=(3, 200))\n', (146, 161), True, 'import numpy as np\n'), ((307, 324), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (321, 324), True, 'import numpy as np\n'), ((440, 466), 'numpy.random.poisson', 'np.random.poisson', ([], {'lam': 'lam'}), '(lam=lam)\n', (457, 466), True, 'import numpy as np\n'), ((338, 369), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100, 1)'}), '(size=(100, 1))\n', (354, 369), True, 'import numpy as np\n'), ((384, 415), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, 200)'}), '(size=(1, 200))\n', (400, 415), True, 'import numpy as np\n'), ((230, 241), 'numpy.exp', 'np.exp', (['eta'], {}), '(eta)\n', (236, 241), True, 'import numpy as np\n')]
""" sklearn extend のテストコード """ import os import numpy as np import pytest from lightgbm import LGBMClassifier from sklearn.linear_model import Ridge, Lasso, LassoCV, RidgeClassifierCV from sklearn.utils.validation import NotFittedError from xgboost import XGBClassifier from vivid.sklearn_extend import UtilityTransform, PrePostProcessModel from .utils import is_close_to_zero @pytest.mark.parametrize('scaling', [None, 'standard', 'minmax']) @pytest.mark.parametrize('log', [True, False]) def test_basic(log, scaling): transformer = UtilityTransform() x = np.random.uniform(size=100) x_edited = transformer.fit_transform(x) assert is_close_to_zero(x, x_edited) transformer = UtilityTransform(log=log, scaling=scaling) x_trans = transformer.fit_transform(x) x_inv = transformer.inverse_transform(x_trans) assert is_close_to_zero(x, x_inv) def test_out_of_scaling_string(regression_Xy): transformer = UtilityTransform(scaling='hogehoge') assert transformer.scaling == 'hogehoge' with pytest.raises(ValueError): transformer.fit(*regression_Xy) def test_raise_value_error_logscale(): transformer = UtilityTransform(log=True) X = np.random.uniform(-2, -1, size=100) with pytest.raises(ValueError): transformer.fit(X, y=None) def test_not_fitteed_error(): transformer = UtilityTransform() x = np.random.uniform(size=10) with pytest.raises(NotFittedError): transformer.transform(x) with pytest.raises(NotFittedError): transformer.inverse_transform(x) def test_logscaling(): tf = UtilityTransform(log=True) x = np.array([10, 100, -1]) with pytest.raises(ValueError): tf.fit_transform(x) tf.fit_transform(x[:-1]) @pytest.fixture(params=[Ridge, LassoCV, Lasso, RidgeClassifierCV, XGBClassifier, LGBMClassifier]) def classifier(request): return request.param() def test_simple(classifier, binary_Xy): model = PrePostProcessModel(classifier) model.fit(*binary_Xy) def test_raise_not_fitting(classifier, binary_Xy): model = PrePostProcessModel(classifier) with pytest.raises(NotFittedError): model.predict(*binary_Xy) def test_serializable(classifier, binary_Xy, tmpdir): model = PrePostProcessModel(classifier, input_scaling='standard') model.fit(*binary_Xy) y_pred = model.predict(binary_Xy[0]) import joblib path = os.path.join(tmpdir, 'clf.joblib') joblib.dump(model, path) del model model_loaded = joblib.load(path) y_loaded = model_loaded.predict(binary_Xy[0]) assert np.array_equal(y_pred, y_loaded)
[ "numpy.random.uniform", "os.path.join", "pytest.fixture", "joblib.dump", "pytest.raises", "numpy.array", "numpy.array_equal", "pytest.mark.parametrize", "joblib.load", "vivid.sklearn_extend.PrePostProcessModel", "vivid.sklearn_extend.UtilityTransform" ]
[((382, 446), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""scaling"""', "[None, 'standard', 'minmax']"], {}), "('scaling', [None, 'standard', 'minmax'])\n", (405, 446), False, 'import pytest\n'), ((448, 493), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""log"""', '[True, False]'], {}), "('log', [True, False])\n", (471, 493), False, 'import pytest\n'), ((1754, 1854), 'pytest.fixture', 'pytest.fixture', ([], {'params': '[Ridge, LassoCV, Lasso, RidgeClassifierCV, XGBClassifier, LGBMClassifier]'}), '(params=[Ridge, LassoCV, Lasso, RidgeClassifierCV,\n XGBClassifier, LGBMClassifier])\n', (1768, 1854), False, 'import pytest\n'), ((542, 560), 'vivid.sklearn_extend.UtilityTransform', 'UtilityTransform', ([], {}), '()\n', (558, 560), False, 'from vivid.sklearn_extend import UtilityTransform, PrePostProcessModel\n'), ((570, 597), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(100)'}), '(size=100)\n', (587, 597), True, 'import numpy as np\n'), ((702, 744), 'vivid.sklearn_extend.UtilityTransform', 'UtilityTransform', ([], {'log': 'log', 'scaling': 'scaling'}), '(log=log, scaling=scaling)\n', (718, 744), False, 'from vivid.sklearn_extend import UtilityTransform, PrePostProcessModel\n'), ((944, 980), 'vivid.sklearn_extend.UtilityTransform', 'UtilityTransform', ([], {'scaling': '"""hogehoge"""'}), "(scaling='hogehoge')\n", (960, 980), False, 'from vivid.sklearn_extend import UtilityTransform, PrePostProcessModel\n'), ((1162, 1188), 'vivid.sklearn_extend.UtilityTransform', 'UtilityTransform', ([], {'log': '(True)'}), '(log=True)\n', (1178, 1188), False, 'from vivid.sklearn_extend import UtilityTransform, PrePostProcessModel\n'), ((1197, 1232), 'numpy.random.uniform', 'np.random.uniform', (['(-2)', '(-1)'], {'size': '(100)'}), '(-2, -1, size=100)\n', (1214, 1232), True, 'import numpy as np\n'), ((1354, 1372), 'vivid.sklearn_extend.UtilityTransform', 'UtilityTransform', ([], {}), '()\n', (1370, 1372), False, 'from vivid.sklearn_extend import UtilityTransform, PrePostProcessModel\n'), ((1381, 1407), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(10)'}), '(size=10)\n', (1398, 1407), True, 'import numpy as np\n'), ((1597, 1623), 'vivid.sklearn_extend.UtilityTransform', 'UtilityTransform', ([], {'log': '(True)'}), '(log=True)\n', (1613, 1623), False, 'from vivid.sklearn_extend import UtilityTransform, PrePostProcessModel\n'), ((1633, 1656), 'numpy.array', 'np.array', (['[10, 100, -1]'], {}), '([10, 100, -1])\n', (1641, 1656), True, 'import numpy as np\n'), ((1957, 1988), 'vivid.sklearn_extend.PrePostProcessModel', 'PrePostProcessModel', (['classifier'], {}), '(classifier)\n', (1976, 1988), False, 'from vivid.sklearn_extend import UtilityTransform, PrePostProcessModel\n'), ((2080, 2111), 'vivid.sklearn_extend.PrePostProcessModel', 'PrePostProcessModel', (['classifier'], {}), '(classifier)\n', (2099, 2111), False, 'from vivid.sklearn_extend import UtilityTransform, PrePostProcessModel\n'), ((2254, 2311), 'vivid.sklearn_extend.PrePostProcessModel', 'PrePostProcessModel', (['classifier'], {'input_scaling': '"""standard"""'}), "(classifier, input_scaling='standard')\n", (2273, 2311), False, 'from vivid.sklearn_extend import UtilityTransform, PrePostProcessModel\n'), ((2408, 2442), 'os.path.join', 'os.path.join', (['tmpdir', '"""clf.joblib"""'], {}), "(tmpdir, 'clf.joblib')\n", (2420, 2442), False, 'import os\n'), ((2447, 2471), 'joblib.dump', 'joblib.dump', (['model', 'path'], {}), '(model, path)\n', (2458, 2471), False, 'import joblib\n'), ((2506, 2523), 'joblib.load', 'joblib.load', (['path'], {}), '(path)\n', (2517, 2523), False, 'import joblib\n'), ((2585, 2617), 'numpy.array_equal', 'np.array_equal', (['y_pred', 'y_loaded'], {}), '(y_pred, y_loaded)\n', (2599, 2617), True, 'import numpy as np\n'), ((1036, 1061), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1049, 1061), False, 'import pytest\n'), ((1242, 1267), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1255, 1267), False, 'import pytest\n'), ((1417, 1446), 'pytest.raises', 'pytest.raises', (['NotFittedError'], {}), '(NotFittedError)\n', (1430, 1446), False, 'import pytest\n'), ((1491, 1520), 'pytest.raises', 'pytest.raises', (['NotFittedError'], {}), '(NotFittedError)\n', (1504, 1520), False, 'import pytest\n'), ((1666, 1691), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1679, 1691), False, 'import pytest\n'), ((2121, 2150), 'pytest.raises', 'pytest.raises', (['NotFittedError'], {}), '(NotFittedError)\n', (2134, 2150), False, 'import pytest\n')]
import matplotlib.cm as cm import matplotlib.pyplot as plt import numpy as np def gen_color_map(keys): colors = cm.rainbow(np.linspace(0, 1, len(keys))) return dict(zip(keys, colors)) def visualize_dataset_2d(x1, x2, ys, alpha=0.5, x1_label='', x2_label='', loc='upper left', figsize=(16, 8), xlim=None, ylim=None, unique_ys=None, save_path=None, label_text_lookup=None): """ Args: x1 - data's first dimention x2 - data's second dimention """ # To avoid type 3 fonts. ACM Digital library complain about this # based on the recomendations here http://phyletica.org/matplotlib-fonts/ plt.rcParams['pdf.fonttype'] = 42 plt.rcParams['ps.fonttype'] = 42 if unique_ys is not None: c_lookup = gen_color_map(unique_ys) else: c_lookup = gen_color_map(set(ys)) # c_sequence = [''] * len(ys) # for i in xrange(len(ys)): # c_sequence[i] = c_lookup[ys[i]] plt.figure(figsize=figsize) for label in set(ys): color = c_lookup[label] mask = ys == label plt.scatter(x1[mask], x2[mask], c=color, label=label if label_text_lookup is None else label_text_lookup[label], alpha=alpha) #plt.scatter(x1, x2, c=c_sequence, alpha=alpha) plt.xlabel(x1_label) plt.ylabel(x2_label) if xlim: plt.xlim(xlim) if ylim: plt.ylim(ylim) lgd=plt.legend(loc=loc) plt.grid(True) if save_path: plt.savefig(save_path, bbox_extra_artists=(lgd,), bbox_inches='tight') plt.show() def visualize_dataset_nd(X, ys, grid_shape=(2,2), alpha=0.5, xlim=None, ylim=None, loc='upper left', bbox_to_anchor=(1.04,1), figsize=(16, 8), unique_ys=None, save_path=None, label_text_lookup=None): """ Args: X: 2d np.array ys: 1d n.array """ import matplotlib.pyplot as plt from matplotlib import gridspec # To avoid type 3 fonts. ACM Digital library complain about this # based on the recomendations here http://phyletica.org/matplotlib-fonts/ plt.rcParams['pdf.fonttype'] = 42 plt.rcParams['ps.fonttype'] = 42 if unique_ys is not None: c_lookup = gen_color_map(unique_ys) else: c_lookup = gen_color_map(set(ys)) fig = plt.figure(figsize=figsize) gs = gridspec.GridSpec(grid_shape[0], grid_shape[1]) n_dim = X.shape[1] dim_1 = 0 dim_2 = 1 for i in range(grid_shape[0]): for j in range(grid_shape[1]): ax = fig.add_subplot(gs[i, j]) for label in set(ys): color = c_lookup[label] mask = ys == label ax.scatter(X[mask, dim_1], X[mask, dim_2], c=color, label=label if label_text_lookup is None else label_text_lookup[label], alpha=alpha) ax.set_xlabel('Z{0}'.format(dim_1)) ax.set_ylabel('Z{0}'.format(dim_2)) ax.grid(True) if xlim: ax.set_xlim(xlim) if ylim: ax.set_ylim(ylim) dim_2 += 1 if dim_2 == n_dim: dim_1 += 1 dim_2 = dim_1 + 1 plt.tight_layout() lgd = plt.legend(bbox_to_anchor=bbox_to_anchor, loc=loc) if save_path: plt.savefig(save_path, bbox_extra_artists=(lgd,), bbox_inches='tight') plt.show() def visualize_z_separate(z, ts_y, ts_known_mask, n_scatter=1000, unique_ys=range(7), xlim=None, ylim=None, grid_shape=(1,3), figsize=(12, 4),font_size=13, markersize=None, save_path_known=None, save_path_unknown=None, label_text_lookup=None): import matplotlib as mpl font = {'family' : 'normal', # 'weight' : 'bold', 'size' : font_size} mpl.rc('font', **font) def plot(z, ys, path): if z.shape[1] == 2: visualize_dataset_2d(z[:, 0], z[:, 1], ys, xlim=xlim, ylim=ylim, alpha=0.5, figsize=(8, 6), unique_ys=unique_ys, save_path=path, label_text_lookup=label_text_lookup) elif z.shape[1] == 3: visualize_dataset_nd(z, ys, grid_shape=(1,3), alpha=0.5, xlim=xlim, ylim=ylim, loc='upper left', bbox_to_anchor=(1.04,1), figsize=(12, 4), unique_ys=unique_ys, save_path=path, label_text_lookup=label_text_lookup) else: visualize_dataset_nd(z, ys, grid_shape=grid_shape, alpha=0.5, xlim=xlim, ylim=ylim, loc='upper left', bbox_to_anchor=(1.04,1), figsize=figsize, unique_ys=unique_ys, save_path=path, label_text_lookup=label_text_lookup) z = z[:n_scatter] y = np.argmax(ts_y[:n_scatter], axis=1) known_mask = ts_known_mask[:n_scatter] unknown_mask = np.logical_not(known_mask) #plot known # plot(z[known_mask], y[known_mask], save_path_known) #plot unknown # plot(z[unknown_mask], y[unknown_mask], save_path_unknown) mpl.rcdefaults()
[ "matplotlib.pyplot.tight_layout", "matplotlib.rc", "matplotlib.pyplot.xlim", "matplotlib.pyplot.show", "numpy.argmax", "matplotlib.pyplot.ylim", "matplotlib.pyplot.scatter", "matplotlib.pyplot.legend", "numpy.logical_not", "matplotlib.rcdefaults", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.grid", "matplotlib.pyplot.savefig" ]
[((1000, 1027), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (1010, 1027), True, 'import matplotlib.pyplot as plt\n'), ((1344, 1364), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x1_label'], {}), '(x1_label)\n', (1354, 1364), True, 'import matplotlib.pyplot as plt\n'), ((1369, 1389), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['x2_label'], {}), '(x2_label)\n', (1379, 1389), True, 'import matplotlib.pyplot as plt\n'), ((1471, 1490), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': 'loc'}), '(loc=loc)\n', (1481, 1490), True, 'import matplotlib.pyplot as plt\n'), ((1495, 1509), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1503, 1509), True, 'import matplotlib.pyplot as plt\n'), ((1611, 1621), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1619, 1621), True, 'import matplotlib.pyplot as plt\n'), ((2372, 2399), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (2382, 2399), True, 'import matplotlib.pyplot as plt\n'), ((2409, 2456), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['grid_shape[0]', 'grid_shape[1]'], {}), '(grid_shape[0], grid_shape[1])\n', (2426, 2456), False, 'from matplotlib import gridspec\n'), ((3325, 3343), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3341, 3343), True, 'import matplotlib.pyplot as plt\n'), ((3354, 3404), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': 'bbox_to_anchor', 'loc': 'loc'}), '(bbox_to_anchor=bbox_to_anchor, loc=loc)\n', (3364, 3404), True, 'import matplotlib.pyplot as plt\n'), ((3506, 3516), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3514, 3516), True, 'import matplotlib.pyplot as plt\n'), ((4006, 4028), 'matplotlib.rc', 'mpl.rc', (['"""font"""'], {}), "('font', **font)\n", (4012, 4028), True, 'import matplotlib as mpl\n'), ((5057, 5092), 'numpy.argmax', 'np.argmax', (['ts_y[:n_scatter]'], {'axis': '(1)'}), '(ts_y[:n_scatter], axis=1)\n', (5066, 5092), True, 'import numpy as np\n'), ((5155, 5181), 'numpy.logical_not', 'np.logical_not', (['known_mask'], {}), '(known_mask)\n', (5169, 5181), True, 'import numpy as np\n'), ((5341, 5357), 'matplotlib.rcdefaults', 'mpl.rcdefaults', ([], {}), '()\n', (5355, 5357), True, 'import matplotlib as mpl\n'), ((1121, 1250), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x1[mask]', 'x2[mask]'], {'c': 'color', 'label': '(label if label_text_lookup is None else label_text_lookup[label])', 'alpha': 'alpha'}), '(x1[mask], x2[mask], c=color, label=label if label_text_lookup is\n None else label_text_lookup[label], alpha=alpha)\n', (1132, 1250), True, 'import matplotlib.pyplot as plt\n'), ((1411, 1425), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (1419, 1425), True, 'import matplotlib.pyplot as plt\n'), ((1447, 1461), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (1455, 1461), True, 'import matplotlib.pyplot as plt\n'), ((1536, 1606), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {'bbox_extra_artists': '(lgd,)', 'bbox_inches': '"""tight"""'}), "(save_path, bbox_extra_artists=(lgd,), bbox_inches='tight')\n", (1547, 1606), True, 'import matplotlib.pyplot as plt\n'), ((3431, 3501), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {'bbox_extra_artists': '(lgd,)', 'bbox_inches': '"""tight"""'}), "(save_path, bbox_extra_artists=(lgd,), bbox_inches='tight')\n", (3442, 3501), True, 'import matplotlib.pyplot as plt\n')]
from tetris import Tetris import gym from gym import error, spaces, utils from gym.utils import seeding from gym import spaces from gym.envs.toy_text import discrete import numpy as np class TetrisEnv(discrete.DiscreteEnv): metadata = {'render.modes': ['human']} def __init__(self): self.t = Tetris() state_num = 2**(4*8)*4*4*9*5 # 4x8 board [filled or not], 4*9 active-shape locations, 4 rotation positions, 5 shape types action_num = 4 # rotate, left, right, step #P = {s : {a : [] for a in range(action_num)} for s in range(state_num)} init_state_dist = [] for x in range(4): for rot in range(4): for shape_type in range(5): init_state_dist.append( encode([0, 0, x, rot, shape_type]) ) init_state_dist = np.array(init_state_dist) #init_state_dist /= init_state_dist.sum() #super(TetrisEnv, self).__init__(state_num, action_num, P, init_state_dist) self.action_space = spaces.Discrete(5) self.observation_space = spaces.Tuple((spaces.Discrete(2**(4*8)), spaces.Discrete(4*9), spaces.Discrete(4), spaces.Discrete(5))) size = 2**(4*8)*4*4*9*5 #-int(-np.log(2**(4*8)*4*4*9*5)/np.log(2)) self.observation_space.shape = np.zeros(size, dtype=int) #spaces.Discrete((2**(4*8))*4*4*9*5) # 4x8 board [filled or not], 4*9 active-shape locations, 4 rotation positions, 5 shape types # #(np.zeros(2**(4*8)), np.zeros(4*9), np.zeros(4), np.zeros(5)) def step(self, action): og_score = self.t.score self.t.take_action(action) shape_y, shape_x = t.shape_loc obs = (self.t.ground, shape_y, shape_x, t) reward = self.t.score - og_score done = (self.t.score is 0) return self.encode(obs), reward, done, {} def reset(self): self.t.__init__() return 0 def render(self, mode='human', close=False): self.t.print_board() def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] # encode/decode style based on gym's taxi example def encode(obs): board, shape_y, shape_x, shape_rot, shape_type = obs i = shape_type i *= 5 i += shape_rot i *= 4 i += shape_x i *= 4 i += shape_y i *= 9 i += board return i def decode(i): out = [] out.append(i % 9) i = i // 9 out.append(i % 4) i = i // 4 out.append(i % 4) i = i // 4 out.append(i % 5) i = i // 5 out.append(i) out.reverse() return out
[ "tetris.Tetris", "gym.spaces.Discrete", "numpy.zeros", "numpy.array", "gym.utils.seeding.np_random" ]
[((301, 309), 'tetris.Tetris', 'Tetris', ([], {}), '()\n', (307, 309), False, 'from tetris import Tetris\n'), ((744, 769), 'numpy.array', 'np.array', (['init_state_dist'], {}), '(init_state_dist)\n', (752, 769), True, 'import numpy as np\n'), ((917, 935), 'gym.spaces.Discrete', 'spaces.Discrete', (['(5)'], {}), '(5)\n', (932, 935), False, 'from gym import spaces\n'), ((1173, 1198), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'int'}), '(size, dtype=int)\n', (1181, 1198), True, 'import numpy as np\n'), ((1850, 1873), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (1867, 1873), False, 'from gym.utils import seeding\n'), ((979, 1008), 'gym.spaces.Discrete', 'spaces.Discrete', (['(2 ** (4 * 8))'], {}), '(2 ** (4 * 8))\n', (994, 1008), False, 'from gym import spaces\n'), ((1006, 1028), 'gym.spaces.Discrete', 'spaces.Discrete', (['(4 * 9)'], {}), '(4 * 9)\n', (1021, 1028), False, 'from gym import spaces\n'), ((1028, 1046), 'gym.spaces.Discrete', 'spaces.Discrete', (['(4)'], {}), '(4)\n', (1043, 1046), False, 'from gym import spaces\n'), ((1048, 1066), 'gym.spaces.Discrete', 'spaces.Discrete', (['(5)'], {}), '(5)\n', (1063, 1066), False, 'from gym import spaces\n')]
import matplotlib.pyplot as plt import numpy as np import math n = np.linspace(2, 7) plt.plot(n, n**3, label="n**3") plt.plot(n, n**0.3, label="n**0.3") plt.plot(n, n, label="n") plt.plot(n, np.sqrt(n), label="sqrtn") plt.plot(n, (n ** 2) / np.sqrt(n), label="(n ** 2) / np.sqrt(n)") plt.plot(n, n ** 2, label="n **2") plt.plot(n, 3**n, label="3**n") plt.plot(n, n * np.log2(n), label="n * np.log2(n)") plt.plot(n, np.log(n) / np.log(4), label="log4n") plt.plot(n, 5 ** (np.log2(n)), label="5 ** (np.log2(n))") plt.plot(n, n, label="n") plt.plot(n, np.sqrt(n), label="sqrtn") plt.plot(n, 2 ** (2*n), label="2 ** (2*n)") plt.plot(n, n ** 2, label="n ** 2") plt.legend(loc='upper left') plt.show()
[ "matplotlib.pyplot.show", "numpy.log", "matplotlib.pyplot.plot", "numpy.log2", "matplotlib.pyplot.legend", "numpy.linspace", "numpy.sqrt" ]
[((68, 85), 'numpy.linspace', 'np.linspace', (['(2)', '(7)'], {}), '(2, 7)\n', (79, 85), True, 'import numpy as np\n'), ((86, 119), 'matplotlib.pyplot.plot', 'plt.plot', (['n', '(n ** 3)'], {'label': '"""n**3"""'}), "(n, n ** 3, label='n**3')\n", (94, 119), True, 'import matplotlib.pyplot as plt\n'), ((118, 155), 'matplotlib.pyplot.plot', 'plt.plot', (['n', '(n ** 0.3)'], {'label': '"""n**0.3"""'}), "(n, n ** 0.3, label='n**0.3')\n", (126, 155), True, 'import matplotlib.pyplot as plt\n'), ((154, 179), 'matplotlib.pyplot.plot', 'plt.plot', (['n', 'n'], {'label': '"""n"""'}), "(n, n, label='n')\n", (162, 179), True, 'import matplotlib.pyplot as plt\n'), ((285, 319), 'matplotlib.pyplot.plot', 'plt.plot', (['n', '(n ** 2)'], {'label': '"""n **2"""'}), "(n, n ** 2, label='n **2')\n", (293, 319), True, 'import matplotlib.pyplot as plt\n'), ((322, 355), 'matplotlib.pyplot.plot', 'plt.plot', (['n', '(3 ** n)'], {'label': '"""3**n"""'}), "(n, 3 ** n, label='3**n')\n", (330, 355), True, 'import matplotlib.pyplot as plt\n'), ((514, 539), 'matplotlib.pyplot.plot', 'plt.plot', (['n', 'n'], {'label': '"""n"""'}), "(n, n, label='n')\n", (522, 539), True, 'import matplotlib.pyplot as plt\n'), ((579, 624), 'matplotlib.pyplot.plot', 'plt.plot', (['n', '(2 ** (2 * n))'], {'label': '"""2 ** (2*n)"""'}), "(n, 2 ** (2 * n), label='2 ** (2*n)')\n", (587, 624), True, 'import matplotlib.pyplot as plt\n'), ((623, 658), 'matplotlib.pyplot.plot', 'plt.plot', (['n', '(n ** 2)'], {'label': '"""n ** 2"""'}), "(n, n ** 2, label='n ** 2')\n", (631, 658), True, 'import matplotlib.pyplot as plt\n'), ((659, 687), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (669, 687), True, 'import matplotlib.pyplot as plt\n'), ((688, 698), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (696, 698), True, 'import matplotlib.pyplot as plt\n'), ((192, 202), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (199, 202), True, 'import numpy as np\n'), ((552, 562), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (559, 562), True, 'import numpy as np\n'), ((242, 252), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (249, 252), True, 'import numpy as np\n'), ((370, 380), 'numpy.log2', 'np.log2', (['n'], {}), '(n)\n', (377, 380), True, 'import numpy as np\n'), ((418, 427), 'numpy.log', 'np.log', (['n'], {}), '(n)\n', (424, 427), True, 'import numpy as np\n'), ((430, 439), 'numpy.log', 'np.log', (['(4)'], {}), '(4)\n', (436, 439), True, 'import numpy as np\n'), ((474, 484), 'numpy.log2', 'np.log2', (['n'], {}), '(n)\n', (481, 484), True, 'import numpy as np\n')]
import h5py import pickle import numpy as np from torch.utils.data import Dataset from sklearn.model_selection import train_test_split class ExpressionDataset(Dataset): ''' Gene expression dataset capable of using subsets of inputs. Args: data: array of inputs with size (samples, dim). labels: array of labels with size (samples,) or (samples, dim). ''' def __init__(self, data, labels): self.input_size = data.shape[1] self._data = data.astype(np.float32) if len(labels.shape) == 1: # Classification labels. self.output_size = len(np.unique(labels)) self._output = labels.astype(np.long) else: # Regression labels. self.output_size = labels.shape[1] self._output = labels.astype(np.float32) self.set_inds(None) self.set_output_inds(None) def set_inds(self, inds, delete_remaining=False): ''' Set input indices to be returned. Args: inds: list/array of selected indices. delete_remaining: whether to permanently delete the indices that are not selected. ''' if inds is None: assert not delete_remaining inds = np.arange(self._data.shape[1]) # Set input and inds. self.inds = inds self.data = self._data self.input_size = len(inds) else: # Verify inds. inds = np.sort(inds) assert len(inds) > 0 assert (inds[0] >= 0) and (inds[-1] < self._data.shape[1]) assert np.all(np.unique(inds, return_counts=True)[1] == 1) if len(inds) == self.input_size: assert not delete_remaining self.input_size = len(inds) if delete_remaining: # Reset data and input size. self._data = self._data[:, inds] self.data = self._data self.inds = np.arange(len(inds)) else: # Set input and inds. self.inds = inds self.data = self._data[:, inds] def set_output_inds(self, inds, delete_remaining=False): ''' Set output inds to be returned. Only for use with multivariate outputs. Args: inds: list/array of selected indices. delete_remaining: whether to permanently delete the indices that are not selected. ''' if inds is None: assert not delete_remaining # Set output and inds. self.output = self._output if len(self._output.shape) == 1: # Classification labels. self.output_size = len(np.unique(self._output)) self.output_inds = None else: # Regression labels. self.output_size = self._output.shape[1] self.output_inds = np.arange(self.output_size) else: # Verify that there are multiple output inds. assert len(self._output.shape) == 2 # Verify inds. inds = np.sort(inds) assert len(inds) > 0 assert (inds[0] >= 0) and (inds[-1] < self._output.shape[1]) assert np.all(np.unique(inds, return_counts=True)[1] == 1) if len(inds) == self.output_size: assert not delete_remaining self.output_size = len(inds) if delete_remaining: # Reset data and input size. self._output = self._output[:, inds] self.output = self._output self.output_inds = np.arange(len(inds)) else: # Set output and inds. self.output_inds = inds self.output = self._output[:, inds] @property def max_input_size(self): return self._data.shape[1] def __len__(self): return len(self._data) def __getitem__(self, index): return self.data[index], self.output[index] class HDF5ExpressionDataset(Dataset): ''' Dataset wrapper, capable of using subsets of inputs. Args: filename: HDF5 filename. data_name: key for data array. label_name: key for labels. sample_inds: list of indices for rows to be sampled. initialize: whether to initialize by opening the HDF5 file. This should only be done when using a data loader with no worker threads. ''' def __init__(self, filename, data_name, label_name, sample_inds=None, initialize=False): # Set up data variables. self.filename = filename self.data_name = data_name self.label_name = label_name # Set sample inds. hf = h5py.File(filename, 'r') data = hf[self.data_name] labels = hf[self.label_name] if sample_inds is None: sample_inds = np.arange(len(data)) self.sample_inds = sample_inds # Set input, output size. self.input_size = data.shape[1] if labels.ndim == 1: # Classification labels. self.output_size = len(np.unique(labels)) self.multiple_outputs = False else: # Regression labels. self.output_size = labels.shape[1] self.multiple_outputs = True hf.close() # Set input inds. self.all_inds = np.arange(self.input_size) self.set_inds(None) # Set output inds. if self.multiple_outputs: self.all_output_inds = np.arange(self.output_size) else: self.all_output_inds = None self.set_output_inds(None) # Initialize. if initialize: self.init_worker(0) def set_inds(self, inds, delete_remaining=False): ''' Set input indices to be returned. Args: inds: list/array of selected indices. delete_remaining: whether to permanently delete the indices that are not selected. ''' if inds is None: assert not delete_remaining self.inds = np.arange(len(self.all_inds)) self.relative_inds = self.all_inds self.input_size = len(self.all_inds) else: # Verify inds. inds = np.sort(inds) assert len(inds) > 0 assert (inds[0] >= 0) and (inds[-1] < len(self.all_inds)) assert np.all(np.unique(inds, return_counts=True)[1] == 1) self.input_size = len(inds) if delete_remaining: self.inds = np.arange(len(inds)) self.all_inds = self.all_inds[inds] self.relative_inds = self.all_inds else: self.inds = inds self.relative_inds = self.all_inds[inds] def set_output_inds(self, inds, delete_remaining=False): ''' Set output inds to be returned. Only for use with multivariate outputs. Args: inds: list/array of selected indices. delete_remaining: whether to permanently delete the indices that are not selected. ''' if inds is None: assert not delete_remaining if self.multiple_outputs: self.output_inds = np.arange(len(self.all_output_inds)) self.output_relative_inds = self.all_output_inds self.output_size = len(self.all_output_inds) else: self.output_inds = None self.output_relative_inds = None else: # Verify that there are multiple output inds. assert self.multiple_outputs # Verify inds. inds = np.sort(inds) assert len(inds) > 0 assert (inds[0] >= 0) and (inds[-1] < len(self.all_output_inds)) assert np.all(np.unique(inds, return_counts=True)[1] == 1) if len(inds) == self.output_size: assert not delete_remaining self.output_size = len(inds) if delete_remaining: self.output_inds = np.arange(len(inds)) self.all_output_inds = self.all_output_inds[inds] self.output_relative_inds = self.all_output_inds else: self.output_inds = inds self.output_relative_inds = self.all_output_inds[inds] def init_worker(self, worker_id): '''Initialize worker in data loader thread.''' self.h5 = h5py.File(self.filename, 'r', swmr=True) @property def max_input_size(self): return len(self.all_inds) def __len__(self): return len(self.sample_inds) def __getitem__(self, index): # Possibly initialize worker to open HDF5 file. if not hasattr(self, 'h5'): self.init_worker(0) index = self.sample_inds[index] data = self.h5[self.data_name][index][self.relative_inds] labels = self.h5[self.label_name][index] if self.output_relative_inds is not None: labels = labels[self.output_relative_inds] return data, labels def split_data(data, seed=123, val_portion=0.1, test_portion=0.1): '''Split data into train, val, test.''' N = data.shape[0] N_val = int(val_portion * N) N_test = int(test_portion * N) train, test = train_test_split(data, test_size=N_test, random_state=seed) train, val = train_test_split(train, test_size=N_val, random_state=seed+1) return train, val, test def bootstrapped_dataset(dataset, seed=None): '''Sample a bootstrapped dataset.''' if isinstance(dataset, ExpressionDataset): data = dataset.data labels = dataset.output if seed: np.random.seed(seed) N = len(data) inds = np.random.choice(N, size=N, replace=True) return ExpressionDataset(data[inds], labels[inds]) elif isinstance(dataset, HDF5ExpressionDataset): inds = dataset.sample_inds inds = np.random.choice(inds, size=len(inds), replace=True) return HDF5ExpressionDataset(dataset.filename, dataset.data_name, dataset.label_name, sample_inds=inds) else: raise ValueError('dataset must be ExpressionDataset or ' 'HDF5ExpressionDataset') class GeneSet: ''' Set of genes, represented by their indices and names. Args: inds: gene indices. names: gene names. ''' def __init__(self, inds, names): self.inds = inds self.names = names def save(self, filename): '''Save object by pickling.''' with open(filename, 'wb') as f: pickle.dump(self, f) def load_set(filename): '''Load GeneSet object.''' with open(filename, 'rb') as f: subset = pickle.load(f) if isinstance(subset, GeneSet): return subset else: raise ValueError('object is not GeneSet')
[ "h5py.File", "pickle.dump", "numpy.random.seed", "sklearn.model_selection.train_test_split", "numpy.sort", "pickle.load", "numpy.arange", "numpy.random.choice", "numpy.unique" ]
[((9452, 9511), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data'], {'test_size': 'N_test', 'random_state': 'seed'}), '(data, test_size=N_test, random_state=seed)\n', (9468, 9511), False, 'from sklearn.model_selection import train_test_split\n'), ((9529, 9592), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train'], {'test_size': 'N_val', 'random_state': '(seed + 1)'}), '(train, test_size=N_val, random_state=seed + 1)\n', (9545, 9592), False, 'from sklearn.model_selection import train_test_split\n'), ((4823, 4847), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (4832, 4847), False, 'import h5py\n'), ((5479, 5505), 'numpy.arange', 'np.arange', (['self.input_size'], {}), '(self.input_size)\n', (5488, 5505), True, 'import numpy as np\n'), ((8602, 8642), 'h5py.File', 'h5py.File', (['self.filename', '"""r"""'], {'swmr': '(True)'}), "(self.filename, 'r', swmr=True)\n", (8611, 8642), False, 'import h5py\n'), ((9902, 9943), 'numpy.random.choice', 'np.random.choice', (['N'], {'size': 'N', 'replace': '(True)'}), '(N, size=N, replace=True)\n', (9918, 9943), True, 'import numpy as np\n'), ((10929, 10943), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10940, 10943), False, 'import pickle\n'), ((1269, 1299), 'numpy.arange', 'np.arange', (['self._data.shape[1]'], {}), '(self._data.shape[1])\n', (1278, 1299), True, 'import numpy as np\n'), ((1499, 1512), 'numpy.sort', 'np.sort', (['inds'], {}), '(inds)\n', (1506, 1512), True, 'import numpy as np\n'), ((3176, 3189), 'numpy.sort', 'np.sort', (['inds'], {}), '(inds)\n', (3183, 3189), True, 'import numpy as np\n'), ((5631, 5658), 'numpy.arange', 'np.arange', (['self.output_size'], {}), '(self.output_size)\n', (5640, 5658), True, 'import numpy as np\n'), ((6390, 6403), 'numpy.sort', 'np.sort', (['inds'], {}), '(inds)\n', (6397, 6403), True, 'import numpy as np\n'), ((7814, 7827), 'numpy.sort', 'np.sort', (['inds'], {}), '(inds)\n', (7821, 7827), True, 'import numpy as np\n'), ((9844, 9864), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (9858, 9864), True, 'import numpy as np\n'), ((10798, 10818), 'pickle.dump', 'pickle.dump', (['self', 'f'], {}), '(self, f)\n', (10809, 10818), False, 'import pickle\n'), ((617, 634), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (626, 634), True, 'import numpy as np\n'), ((2981, 3008), 'numpy.arange', 'np.arange', (['self.output_size'], {}), '(self.output_size)\n', (2990, 3008), True, 'import numpy as np\n'), ((5213, 5230), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (5222, 5230), True, 'import numpy as np\n'), ((2769, 2792), 'numpy.unique', 'np.unique', (['self._output'], {}), '(self._output)\n', (2778, 2792), True, 'import numpy as np\n'), ((1643, 1678), 'numpy.unique', 'np.unique', (['inds'], {'return_counts': '(True)'}), '(inds, return_counts=True)\n', (1652, 1678), True, 'import numpy as np\n'), ((3322, 3357), 'numpy.unique', 'np.unique', (['inds'], {'return_counts': '(True)'}), '(inds, return_counts=True)\n', (3331, 3357), True, 'import numpy as np\n'), ((6533, 6568), 'numpy.unique', 'np.unique', (['inds'], {'return_counts': '(True)'}), '(inds, return_counts=True)\n', (6542, 6568), True, 'import numpy as np\n'), ((7964, 7999), 'numpy.unique', 'np.unique', (['inds'], {'return_counts': '(True)'}), '(inds, return_counts=True)\n', (7973, 7999), True, 'import numpy as np\n')]
import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torch.backends.cudnn as cudnn import torchvision import torchvision.transforms as transforms from weights_init import weight_init import os import argparse from datetime import datetime import matplotlib.pyplot as plt import numpy as np import pickle import random cfg = { 'VGG11': [64, 'M', 128, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M'], 'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'] } class CNN_ete(nn.Module): def __init__(self, vgg_name): super(CNN_ete, self).__init__() self.features = self._make_layers(cfg[vgg_name]) self.broad_classifier = nn.Sequential( nn.Dropout(0.5), nn.Linear(512, 4096), nn.ReLU(inplace=True), nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, Nct) ) self.bn1 = nn.BatchNorm1d(Nct) self.dense_layers = nn.Sequential( nn.Dropout(0.5), nn.Linear(512*Nct, 4096), nn.ReLU(inplace=True), nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, K) ) # self.broad_layer = nn.Linear(Nct, K) def forward(self, x): out = self.features(x) out = out.view(out.size(0), -1) broad_out = F.softmax(self.broad_classifier(out), dim=1) broad_out1 = self.bn1(broad_out) out = torch.bmm(out.unsqueeze(2), broad_out1.unsqueeze(1)) # Outer product for batches out = out.view(out.size(0),-1) out = self.dense_layers(out) return (out, broad_out) def _make_layers(self, cfg): layers = [] in_channels = 3 for x in cfg: if x == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), nn.ReLU(inplace=True)] in_channels = x layers += [nn.AvgPool2d(kernel_size=1, stride=1)] return nn.Sequential(*layers) # Categories K = 100 Nct = 10 filename = 'ete' print('Number of Broad categories:', Nct) # Model device = 'cuda' if torch.cuda.is_available() else 'cpu' print('==> Setting up the model..') net = CNN_ete('VGG16').to(device) if device == 'cuda': net = torch.nn.DataParallel(net) cudnn.benchmark = True # Data print('==> Preparing data..') transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5071, 0.4865, 0.4409), (0.2009, 0.1984, 0.2023)), ]) testset = torchvision.datasets.CIFAR100(root='~/projectdata', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False, num_workers=2) # Optimization criterion = nn.CrossEntropyLoss() def validation(epoch, validation=True): global best_acc net.eval() val_loss = 0 correct = 0 total = 0 broad_outs = torch.Tensor().to(device) total1 = torch.zeros(K, dtype=torch.long).to(device) correct1 = torch.zeros(K, dtype=torch.long).to(device) dloader = testloader if validation: dloader = valloader with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(dloader): inputs, targets = inputs.to(device), targets.to(device) outputs, broad_out = net(inputs) loss = criterion(outputs, targets) broad_outs = torch.cat((broad_outs, broad_out)) B = targets.size()[0] val_loss += loss.item()*B _, predicted = outputs.max(1) total += targets.size(0) corr = predicted.eq(targets) correct += corr.sum().item() total1 += targets.bincount(minlength=K) for i in range(targets.size(0)): if corr[i] == 1: correct1[targets[i]] += 1 torch.set_printoptions(precision=3) np.set_printoptions(precision=3) acc = 100.*correct/total acc1 = 100.*correct1.float()/total1.float() val_loss *= 1./total print('Validation loss:', val_loss) print("Validation accuracy: %.2f %%" % acc) print("Individual Validation accuracy:", acc1.cpu().numpy().flatten()) return val_loss, broad_outs best_acc = 0 # best test accuracy start_epoch = 0 # start from epoch 0 or last checkpoint epoch chkp_address = "./categorizations/checkpoint/" + 'full_' + filename + "_early" + '.pth' # Load checkpoint. print('==> Resuming from checkpoint..') assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' checkpoint = torch.load(chkp_address) net.load_state_dict(checkpoint['net']) best_acc = checkpoint['acc'] start_epoch = checkpoint['epoch'] + 1 print(start_epoch) test_loss, test_broad_outs = validation(start_epoch, validation=False) test_broad_target = torch.argmax(test_broad_outs, dim=1).cpu().tolist() categs = list(set(test_broad_target)) print(categs) Nct = len(categs) cat_matrix = np.zeros((Nct, K), dtype=int) for i in range(len(testset.targets)): broad_categ_num = categs.index(test_broad_target[i]) cat_matrix[broad_categ_num, testset.targets[i]] += 1 for i in range(Nct): print(cat_matrix[i]) print(np.sum(cat_matrix, axis=1))
[ "torch.nn.Dropout", "numpy.sum", "torch.argmax", "torch.cat", "torchvision.transforms.Normalize", "torch.no_grad", "numpy.set_printoptions", "torch.utils.data.DataLoader", "torch.load", "torch.set_printoptions", "torch.Tensor", "torch.nn.Linear", "torch.zeros", "torch.nn.AvgPool2d", "torch.nn.BatchNorm1d", "torchvision.datasets.CIFAR100", "torch.nn.Conv2d", "torch.nn.BatchNorm2d", "torch.cuda.is_available", "torch.nn.MaxPool2d", "torch.nn.ReLU", "torch.nn.Sequential", "os.path.isdir", "torch.nn.CrossEntropyLoss", "numpy.zeros", "torch.nn.DataParallel", "torchvision.transforms.ToTensor" ]
[((3061, 3171), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""~/projectdata"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_test'}), "(root='~/projectdata', train=False, download=\n True, transform=transform_test)\n", (3090, 3171), False, 'import torchvision\n'), ((3180, 3265), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': '(32)', 'shuffle': '(False)', 'num_workers': '(2)'}), '(testset, batch_size=32, shuffle=False,\n num_workers=2)\n', (3207, 3265), False, 'import torch\n'), ((3375, 3396), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3394, 3396), True, 'import torch.nn as nn\n'), ((5112, 5139), 'os.path.isdir', 'os.path.isdir', (['"""checkpoint"""'], {}), "('checkpoint')\n", (5125, 5139), False, 'import os\n'), ((5194, 5218), 'torch.load', 'torch.load', (['chkp_address'], {}), '(chkp_address)\n', (5204, 5218), False, 'import torch\n'), ((5574, 5603), 'numpy.zeros', 'np.zeros', (['(Nct, K)'], {'dtype': 'int'}), '((Nct, K), dtype=int)\n', (5582, 5603), True, 'import numpy as np\n'), ((2667, 2692), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2690, 2692), False, 'import torch\n'), ((2808, 2834), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['net'], {}), '(net)\n', (2829, 2834), False, 'import torch\n'), ((4485, 4520), 'torch.set_printoptions', 'torch.set_printoptions', ([], {'precision': '(3)'}), '(precision=3)\n', (4507, 4520), False, 'import torch\n'), ((4525, 4557), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)'}), '(precision=3)\n', (4544, 4557), True, 'import numpy as np\n'), ((5810, 5836), 'numpy.sum', 'np.sum', (['cat_matrix'], {'axis': '(1)'}), '(cat_matrix, axis=1)\n', (5816, 5836), True, 'import numpy as np\n'), ((1269, 1288), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['Nct'], {}), '(Nct)\n', (1283, 1288), True, 'import torch.nn as nn\n'), ((2521, 2543), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (2534, 2543), True, 'import torch.nn as nn\n'), ((2945, 2966), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2964, 2966), True, 'import torchvision.transforms as transforms\n'), ((2972, 3044), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5071, 0.4865, 0.4409)', '(0.2009, 0.1984, 0.2023)'], {}), '((0.5071, 0.4865, 0.4409), (0.2009, 0.1984, 0.2023))\n', (2992, 3044), True, 'import torchvision.transforms as transforms\n'), ((3768, 3783), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3781, 3783), False, 'import torch\n'), ((1022, 1037), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (1032, 1037), True, 'import torch.nn as nn\n'), ((1051, 1071), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(4096)'], {}), '(512, 4096)\n', (1060, 1071), True, 'import torch.nn as nn\n'), ((1085, 1106), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1092, 1106), True, 'import torch.nn as nn\n'), ((1120, 1135), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (1130, 1135), True, 'import torch.nn as nn\n'), ((1149, 1170), 'torch.nn.Linear', 'nn.Linear', (['(4096)', '(4096)'], {}), '(4096, 4096)\n', (1158, 1170), True, 'import torch.nn as nn\n'), ((1184, 1205), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1191, 1205), True, 'import torch.nn as nn\n'), ((1219, 1239), 'torch.nn.Linear', 'nn.Linear', (['(4096)', 'Nct'], {}), '(4096, Nct)\n', (1228, 1239), True, 'import torch.nn as nn\n'), ((1344, 1359), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (1354, 1359), True, 'import torch.nn as nn\n'), ((1373, 1399), 'torch.nn.Linear', 'nn.Linear', (['(512 * Nct)', '(4096)'], {}), '(512 * Nct, 4096)\n', (1382, 1399), True, 'import torch.nn as nn\n'), ((1411, 1432), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1418, 1432), True, 'import torch.nn as nn\n'), ((1446, 1461), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (1456, 1461), True, 'import torch.nn as nn\n'), ((1475, 1496), 'torch.nn.Linear', 'nn.Linear', (['(4096)', '(4096)'], {}), '(4096, 4096)\n', (1484, 1496), True, 'import torch.nn as nn\n'), ((1510, 1531), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1517, 1531), True, 'import torch.nn as nn\n'), ((1545, 1563), 'torch.nn.Linear', 'nn.Linear', (['(4096)', 'K'], {}), '(4096, K)\n', (1554, 1563), True, 'import torch.nn as nn\n'), ((2467, 2504), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': '(1)', 'stride': '(1)'}), '(kernel_size=1, stride=1)\n', (2479, 2504), True, 'import torch.nn as nn\n'), ((3539, 3553), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (3551, 3553), False, 'import torch\n'), ((3584, 3616), 'torch.zeros', 'torch.zeros', (['K'], {'dtype': 'torch.long'}), '(K, dtype=torch.long)\n', (3595, 3616), False, 'import torch\n'), ((3643, 3675), 'torch.zeros', 'torch.zeros', (['K'], {'dtype': 'torch.long'}), '(K, dtype=torch.long)\n', (3654, 3675), False, 'import torch\n'), ((4034, 4068), 'torch.cat', 'torch.cat', (['(broad_outs, broad_out)'], {}), '((broad_outs, broad_out))\n', (4043, 4068), False, 'import torch\n'), ((5438, 5474), 'torch.argmax', 'torch.argmax', (['test_broad_outs'], {'dim': '(1)'}), '(test_broad_outs, dim=1)\n', (5450, 5474), False, 'import torch\n'), ((2183, 2220), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (2195, 2220), True, 'import torch.nn as nn\n'), ((2267, 2318), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'x'], {'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels, x, kernel_size=3, padding=1)\n', (2276, 2318), True, 'import torch.nn as nn\n'), ((2347, 2364), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['x'], {}), '(x)\n', (2361, 2364), True, 'import torch.nn as nn\n'), ((2393, 2414), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2400, 2414), True, 'import torch.nn as nn\n')]
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import unittest import shutil import numpy as np import google.protobuf.text_format as text_format import oneflow as flow import oneflow.core.serving.saved_model_pb2 as saved_model_pb from alexnet import load_data, alexnet from ofrecord_dataset import ImageNetRecordDataset DEFAULT_BATCH_SIZE = 8 DEFAULT_TRAIN_DATA_PATH = "/dataset/imagenet_227/train/32/" DEFAULT_TRAIN_DATA_PART_NUM = 32 DEFAULT_INFER_DATA_PATH = "/dataset/imagenet_227/train/32/" DEFAULT_INFER_DATA_PART_NUM = 32 DEFAULT_CHECKPOINT_DIR = "/dataset/PNGS/cnns_model_for_test/alexnet/models/of_model_bk" DEFAULT_IMAGE_SIZE = 227 def init_env(): flow.env.init() flow.config.machine_num(1) flow.config.cpu_device_num(1) flow.config.gpu_device_num(1) flow.config.enable_debug_mode(True) def make_alexnet_train_func(batch_size, data_dir, data_part_num): @flow.global_function(type="train") def alexnet_train() -> flow.typing.Numpy: image, label = load_data(batch_size, data_dir, data_part_num) loss = alexnet(image, label) flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([], [0.00001]), momentum=0 ).minimize(loss) return loss return alexnet_train def make_alexnet_infer_func(batch_size, image_size): input_lbns = {} output_lbns = {} image_shape = (batch_size,) + tuple(image_size) label_shape = (batch_size,) @flow.global_function(type="predict") def alexnet_inference( image: flow.typing.Numpy.Placeholder(image_shape, dtype=flow.float32), label: flow.typing.Numpy.Placeholder(label_shape, dtype=flow.int32), ) -> flow.typing.Numpy: input_lbns["image"] = image.logical_blob_name input_lbns["label"] = label.logical_blob_name image = flow.transpose(image, perm=(0, 3, 1, 2)) loss = alexnet(image, label, trainable=False) # reduce_mean calculate reduce_count in python api, we should only set attribute for op in python, # so reduce_count is out of date when we have loaded model and set new batch_size. # We will modify implementation of reduce_mean # output = flow.math.reduce_mean(loss) output = loss output_lbns["output"] = output.logical_blob_name return output return alexnet_inference, input_lbns, output_lbns def load_saved_model(model_meta_file_path): saved_model_proto = saved_model_pb.SavedModel() with open(model_meta_file_path, "rb") as f: text_format.Merge(f.read(), saved_model_proto) return saved_model_proto @flow.unittest.skip_unless_1n1d() class TestSaveAndLoadModel(flow.unittest.TestCase): def test_alexnet(test_case, batch_size=DEFAULT_BATCH_SIZE, num_batchs=6): init_env() alexnet_infer, input_lbns, output_lbns = make_alexnet_infer_func( batch_size, (DEFAULT_IMAGE_SIZE, DEFAULT_IMAGE_SIZE, 3) ) flow.load_variables(flow.checkpoint.get(DEFAULT_CHECKPOINT_DIR)) # save model saved_model_path = "alexnet_models" model_name = "alexnet" model_version = 1 model_version_path = os.path.join(saved_model_path, str(model_version)) if os.path.exists(saved_model_path) and os.path.isdir(saved_model_path): print( "WARNING: The model version path '{}' already exist" ", old version directory will be removed".format(model_version_path) ) shutil.rmtree(saved_model_path) saved_model_builder = flow.saved_model.ModelBuilder(saved_model_path) signature_builder = ( saved_model_builder.ModelName(model_name) .Version(model_version) .AddFunction(alexnet_infer) .AddSignature("regress") ) for input_name, lbn in input_lbns.items(): signature_builder.Input(input_name, lbn) for output_name, lbn in output_lbns.items(): signature_builder.Output(output_name, lbn) saved_model_builder.Save() # test data new_batch_size = int(batch_size / 2) dataset = ImageNetRecordDataset( batch_size=new_batch_size, image_resize_size=DEFAULT_IMAGE_SIZE, data_format="NHWC", ) image_list, label_list = dataset.load_batchs(num_batchs) assert image_list[0].shape[0] == new_batch_size image_size = tuple(image_list[0].shape[1:]) flow.clear_default_session() alexnet_infer, _, _ = make_alexnet_infer_func(new_batch_size, image_size) flow.load_variables(flow.checkpoint.get(DEFAULT_CHECKPOINT_DIR)) print("alexnet inference result:") origin_outputs = [] for i, (image, label) in enumerate(zip(image_list, label_list)): output = alexnet_infer(image, label) # origin_outputs.append(output.item()) # print("iter#{:<6} output:".format(i), output.item()) origin_outputs.append(output) print("iter#{:<6} output:".format(i), output) origin_outputs = np.array(origin_outputs, dtype=np.float32) # load model and run flow.clear_default_session() model_meta_file_path = os.path.join( saved_model_path, str(model_version), "saved_model.prototxt" ) saved_model_proto = load_saved_model(model_meta_file_path) sess = flow.serving.InferenceSession() checkpoint_path = os.path.join( saved_model_path, str(model_version), saved_model_proto.checkpoint_dir ) sess.set_checkpoint_path(checkpoint_path) graph_name = saved_model_proto.default_graph_name graph_def = saved_model_proto.graphs[graph_name] signature_def = graph_def.signatures[graph_def.default_signature_name] with sess.open(graph_name, signature_def, new_batch_size): sess.compile(graph_def.op_list) # sess.print_job_set() sess.launch() job_name = sess.list_jobs()[0] input_names = sess.list_inputs() print("input names:", input_names) for input_name in input_names: print( 'input "{}" info: {}'.format( input_name, sess.input_info(input_name, job_name) ) ) output_names = sess.list_outputs() print("output names:", output_names) for output_name in output_names: print( 'output "{}" info: {}'.format( output_name, sess.output_info(output_name, job_name) ) ) print("load saved alexnet and inference result:") print_input_info = False cmp_outputs = [] for i, (image, label) in enumerate(zip(image_list, label_list)): if print_input_info: print("image shape: {}, dtype: {}".format(image.shape, image.dtype)) print( "label shape: {}, dtype: {}, data: {}".format( label.shape, label.dtype, label ) ) if i > 1: print((image - image_list[i - 1]).mean()) outputs = sess.run(alexnet_infer.__name__, image=image, label=label) # cmp_outputs.append(outputs[0].item()) # print("iter#{:<6} output:".format(i), outputs[0].item()) cmp_outputs.append(outputs[0]) print("iter#{:<6} output:".format(i), outputs[0]) cmp_outputs = np.array(cmp_outputs, dtype=np.float32) test_case.assertTrue(np.allclose(origin_outputs, cmp_outputs)) sess.close() if __name__ == "__main__": unittest.main()
[ "oneflow.config.machine_num", "oneflow.serving.InferenceSession", "oneflow.typing.Numpy.Placeholder", "numpy.allclose", "oneflow.clear_default_session", "oneflow.config.cpu_device_num", "oneflow.unittest.skip_unless_1n1d", "shutil.rmtree", "unittest.main", "oneflow.transpose", "os.path.exists", "oneflow.config.gpu_device_num", "oneflow.saved_model.ModelBuilder", "alexnet.alexnet", "oneflow.optimizer.PiecewiseConstantScheduler", "alexnet.load_data", "ofrecord_dataset.ImageNetRecordDataset", "oneflow.checkpoint.get", "oneflow.env.init", "oneflow.global_function", "oneflow.core.serving.saved_model_pb2.SavedModel", "os.path.isdir", "numpy.array", "oneflow.config.enable_debug_mode" ]
[((3159, 3191), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (3189, 3191), True, 'import oneflow as flow\n'), ((1221, 1236), 'oneflow.env.init', 'flow.env.init', ([], {}), '()\n', (1234, 1236), True, 'import oneflow as flow\n'), ((1241, 1267), 'oneflow.config.machine_num', 'flow.config.machine_num', (['(1)'], {}), '(1)\n', (1264, 1267), True, 'import oneflow as flow\n'), ((1272, 1301), 'oneflow.config.cpu_device_num', 'flow.config.cpu_device_num', (['(1)'], {}), '(1)\n', (1298, 1301), True, 'import oneflow as flow\n'), ((1306, 1335), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (1332, 1335), True, 'import oneflow as flow\n'), ((1340, 1375), 'oneflow.config.enable_debug_mode', 'flow.config.enable_debug_mode', (['(True)'], {}), '(True)\n', (1369, 1375), True, 'import oneflow as flow\n'), ((1449, 1483), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""'}), "(type='train')\n", (1469, 1483), True, 'import oneflow as flow\n'), ((2003, 2039), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""predict"""'}), "(type='predict')\n", (2023, 2039), True, 'import oneflow as flow\n'), ((2996, 3023), 'oneflow.core.serving.saved_model_pb2.SavedModel', 'saved_model_pb.SavedModel', ([], {}), '()\n', (3021, 3023), True, 'import oneflow.core.serving.saved_model_pb2 as saved_model_pb\n'), ((8262, 8277), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8275, 8277), False, 'import unittest\n'), ((1553, 1599), 'alexnet.load_data', 'load_data', (['batch_size', 'data_dir', 'data_part_num'], {}), '(batch_size, data_dir, data_part_num)\n', (1562, 1599), False, 'from alexnet import load_data, alexnet\n'), ((1615, 1636), 'alexnet.alexnet', 'alexnet', (['image', 'label'], {}), '(image, label)\n', (1622, 1636), False, 'from alexnet import load_data, alexnet\n'), ((2375, 2415), 'oneflow.transpose', 'flow.transpose', (['image'], {'perm': '(0, 3, 1, 2)'}), '(image, perm=(0, 3, 1, 2))\n', (2389, 2415), True, 'import oneflow as flow\n'), ((2431, 2469), 'alexnet.alexnet', 'alexnet', (['image', 'label'], {'trainable': '(False)'}), '(image, label, trainable=False)\n', (2438, 2469), False, 'from alexnet import load_data, alexnet\n'), ((4113, 4160), 'oneflow.saved_model.ModelBuilder', 'flow.saved_model.ModelBuilder', (['saved_model_path'], {}), '(saved_model_path)\n', (4142, 4160), True, 'import oneflow as flow\n'), ((4699, 4810), 'ofrecord_dataset.ImageNetRecordDataset', 'ImageNetRecordDataset', ([], {'batch_size': 'new_batch_size', 'image_resize_size': 'DEFAULT_IMAGE_SIZE', 'data_format': '"""NHWC"""'}), "(batch_size=new_batch_size, image_resize_size=\n DEFAULT_IMAGE_SIZE, data_format='NHWC')\n", (4720, 4810), False, 'from ofrecord_dataset import ImageNetRecordDataset\n'), ((5035, 5063), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (5061, 5063), True, 'import oneflow as flow\n'), ((5656, 5698), 'numpy.array', 'np.array', (['origin_outputs'], {'dtype': 'np.float32'}), '(origin_outputs, dtype=np.float32)\n', (5664, 5698), True, 'import numpy as np\n'), ((5737, 5765), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (5763, 5765), True, 'import oneflow as flow\n'), ((5976, 6007), 'oneflow.serving.InferenceSession', 'flow.serving.InferenceSession', ([], {}), '()\n', (6005, 6007), True, 'import oneflow as flow\n'), ((8097, 8136), 'numpy.array', 'np.array', (['cmp_outputs'], {'dtype': 'np.float32'}), '(cmp_outputs, dtype=np.float32)\n', (8105, 8136), True, 'import numpy as np\n'), ((2082, 2144), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', (['image_shape'], {'dtype': 'flow.float32'}), '(image_shape, dtype=flow.float32)\n', (2111, 2144), True, 'import oneflow as flow\n'), ((2161, 2221), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', (['label_shape'], {'dtype': 'flow.int32'}), '(label_shape, dtype=flow.int32)\n', (2190, 2221), True, 'import oneflow as flow\n'), ((3521, 3564), 'oneflow.checkpoint.get', 'flow.checkpoint.get', (['DEFAULT_CHECKPOINT_DIR'], {}), '(DEFAULT_CHECKPOINT_DIR)\n', (3540, 3564), True, 'import oneflow as flow\n'), ((3781, 3813), 'os.path.exists', 'os.path.exists', (['saved_model_path'], {}), '(saved_model_path)\n', (3795, 3813), False, 'import os\n'), ((3818, 3849), 'os.path.isdir', 'os.path.isdir', (['saved_model_path'], {}), '(saved_model_path)\n', (3831, 3849), False, 'import os\n'), ((4050, 4081), 'shutil.rmtree', 'shutil.rmtree', (['saved_model_path'], {}), '(saved_model_path)\n', (4063, 4081), False, 'import shutil\n'), ((5174, 5217), 'oneflow.checkpoint.get', 'flow.checkpoint.get', (['DEFAULT_CHECKPOINT_DIR'], {}), '(DEFAULT_CHECKPOINT_DIR)\n', (5193, 5217), True, 'import oneflow as flow\n'), ((8166, 8206), 'numpy.allclose', 'np.allclose', (['origin_outputs', 'cmp_outputs'], {}), '(origin_outputs, cmp_outputs)\n', (8177, 8206), True, 'import numpy as np\n'), ((1677, 1731), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[1e-05]'], {}), '([], [1e-05])\n', (1718, 1731), True, 'import oneflow as flow\n')]
# # This file is part of seirmo (https://github.com/SABS-R3-Epidemiology/seirmo/) # which is released under the BSD 3-clause license. See accompanying LICENSE.md # for copyright notice and full license details. # import numpy as np from scipy.integrate import solve_ivp import seirmo class DeterministicSEIRModel(seirmo.SEIRForwardModel): r""" ODE model: deterministic SEIR The SEIR Model has four compartments: susceptible individuals (:math:`S`), exposed but not yet infectious (:math:`E`), infectious (:math:`I`) and recovered (:math:`R`): .. math:: \frac{dS(t)}{dt} = -\beta S(t)I(t), .. math:: \frac{dE(t)}{dt} = \beta S(t)I(t) - \kappa E(t), .. math:: \frac{dI(t)}{dt} = \kappa E(t) - \gamma I(t), .. math:: \frac{dR(t)}{dt} = \gamma I(t), where :math:`S(0) = S_0, E(0) = E_0, I(O) = I_0, R(0) = R_0` are also parameters of the model. Extends :class:`SEIRForwardModel`. """ def __init__(self): super(DeterministicSEIRModel, self).__init__() # Assign default values self._output_collector = seirmo.SEIROutputCollector( ['S', 'E', 'I', 'R', 'Incidence']) self._parameters = seirmo.SEIRParameters( ['S0', 'E0', 'I0', 'R0', 'alpha', 'beta', 'gamma']) def _right_hand_side(self, t, y, c): # Assuming y = [S, E, I, R] (the dependent variables in the model) # Assuming the parameters are ordered like # parameters = [S0, E0, I0, R0, beta, kappa, gamma] # Let c = [beta, kappa, gamma] # = [parameters[0], parameters[1], parameters[2]], # then beta = c[0], kappa = c[1], gamma = c[2] # Construct the derivative functions of the system of ODEs s, e, i, _ = y beta, kappa, gamma = c dydt = [-beta * s * i, beta * s * i - kappa * e, kappa * e - gamma * i, gamma * i] return dydt def simulate(self, parameters, times): self._parameters.configure_parameters(parameters) # Define time spans, initial conditions, and constants #y_init = parameters[:4] #c = parameters[4:] # Solve the system of ODEs sol = solve_ivp( lambda t, y: self._right_hand_side(t, y, self._parameters[4:]), [times[0], times[-1]], self._parameters[:4], t_eval=times) output = sol['y'] # Total infected is infectious 'i' plus recovered 'r' total_infected = output[2, :] + output[3, :] # Number of incidences is the increase in total_infected # between the time points (add a 0 at the front to # make the length consistent with the solution) n_incidence = np.zeros(len(times)) n_incidence[1:] = total_infected[1:] - total_infected[:-1] # Append n_incidence to output # Output is a matrix with rows being S, E, I, R and Incidence output = np.vstack(tup=(output, n_incidence)) # Get the selected outputs self._output_collector.report_all(output.transpose()) #output = output[self._output_indices, :] return self._output_collector.retrieve()
[ "seirmo.SEIROutputCollector", "seirmo.SEIRParameters", "numpy.vstack" ]
[((1121, 1182), 'seirmo.SEIROutputCollector', 'seirmo.SEIROutputCollector', (["['S', 'E', 'I', 'R', 'Incidence']"], {}), "(['S', 'E', 'I', 'R', 'Incidence'])\n", (1147, 1182), False, 'import seirmo\n'), ((1223, 1296), 'seirmo.SEIRParameters', 'seirmo.SEIRParameters', (["['S0', 'E0', 'I0', 'R0', 'alpha', 'beta', 'gamma']"], {}), "(['S0', 'E0', 'I0', 'R0', 'alpha', 'beta', 'gamma'])\n", (1244, 1296), False, 'import seirmo\n'), ((2938, 2974), 'numpy.vstack', 'np.vstack', ([], {'tup': '(output, n_incidence)'}), '(tup=(output, n_incidence))\n', (2947, 2974), True, 'import numpy as np\n')]
# Dev Note: # Log Finished. # early stop Finished. # Autosave Finished. # Learning rate decay # Warm start # Parameters regularization import io import os import logging from datetime import datetime import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms class PyTorchBaseModel(object): def __init__(self, model=None, loss_function=None, optimizer=None, log_dir=None, checkpoint_dir=None): # Tic: self.tic = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') # Initialize log: self.__init_logging(log_dir=log_dir) # Define model checkpoints: self.checkpoint_dir = checkpoint_dir # Define model: self.model = model if self.model is not None: assert hasattr(self.model, "forward"), "Input PyTorch model should be consistent to have 'forward' method." # Define loss function: self.loss = loss_function logging.info("Initial loss function: {}".format(loss_function)) # Define optimizer: self.optimizer = optimizer logging.info("Initial optimizer: {}".format(optimizer)) def __init_logging(self, log_dir=None): if log_dir is None: self.__log_stream = io.StringIO() logging.basicConfig( stream=self.__log_stream, level=logging.INFO, format="[%(asctime)s (Local Time)] %(levelname)s : %(message)s", # Local time may vary for cloud services. datefmt="%m/%d/%Y %I:%M:%S %p" ) else: self.__log_stream = None if not os.path.isdir(log_dir): os.makedirs(log_dir) # date_str = datetime.now().strftime('%Y-%m-%d_%H-%M') log_file = 'PyTorch_log_{}.txt'.format(self.tic) # reload(logging) # bad logging.basicConfig( filename=os.path.join(log_dir, log_file), level=logging.INFO, format="[%(asctime)s (Local Time)] %(levelname)s : %(message)s", datefmt='%m/%d/%Y %I:%M:%S %p' ) # logging.getLogger().addHandler(logging.StreamHandler()) logging.info("Created log at {}.".format(self.tic)) def get_log(self): if self.__log_stream is not None: return self.__log_stream.get_value() else: print("Log stream does not exist.") raise def pytorch_default_loss_functions(self): pass # Need to finish the loss function list: def set_loss_function(self, loss_function=None): if loss_function is None: logging.error("Error: No loss function was provided.") raise NotImplementedError("Need to define loss function.") else: if isinstance(loss_function, str): assert loss_function in ( "binary_cross_entropy", "nll_loss", "mse_loss", "cross_entropy", "binary_cross_entropy_with_logits" ) logging.info("Use loss function: {}".format(loss_function)) if loss_function.upper() == "NLL_LOSS": return F.nll_loss if loss_function.upper() == "BINARY_CROSS_ENTROPY": return F.binary_cross_entropy if loss_function.upper() == "BINARY_CROSS_ENTROPY_WITH_LOGITS": return F.binary_cross_entropy_with_logits if loss_function.upper() == "CROSS_ENTROPY": return F.cross_entropy # print("Use loss function: {}".format(loss_function)) # Need to finish the optimizer list: def set_optimizer(self, optimizer=None): # assert isinstance(learning_rate, float) and learning_rate>0, TypeError("Input paramter 'learning_rate' can only be positive float.") if optimizer is None: raise NotImplementedError('Need to define optimizer.') else: if isinstance(optimizer, str): assert optimizer.upper() in ( "ADAM", "RMSPROP", "SGD" ) logging.info("Use optimizer: {}".format(optimizer)) if optimizer.upper() == "ADAM": return optim.Adam elif optimizer.upper() == "SGD": return optim.SGD def __update_optimizer_learning_rate(self): pass def fit(self, train_loader, test_loader, epoch, loss_function=None, optimizer=None, learning_rate=0.001, \ learning_rate_decay_rate=0.8, lr_decay_tolerance=3, clip=-1, use_cuda=True, verbose=1, random_seed=7, **kwargs): ''' :param train_loader: :param epoch: :param use_cuda: :param early_stopping: :param verbose: :param kwargs: Used for adjusting the parameters of loss functions and optimizers. :return: ''' logging.info("Training PyTorch model.") logging.info("Neural network architecture: {}".format(self.model.eval())) if isinstance(random_seed, int) and random_seed > 0: torch.manual_seed(random_seed) else: raise TypeError("Input parameter random_seed can only be positive integer.") if use_cuda: if torch.cuda.is_available(): self.device = torch.device("cuda") else: print("CUDA is not available!") raise else: print("Warning: CUDA is not in use. CUDA availability: {}".format(torch.cuda.is_available())) self.device = torch.device("cpu") try: # Define placeholders for recording training processes: train_loss_by_ep = dict() # Leave for later. # ep_train_loss = 0.0 # Define model checkpoints setup: if self.checkpoint_dir is not None: save_checkpoints = True if not os.path.isdir(self.checkpoint_dir): os.makedirs(self.checkpoint_dir) checkpoint_path = os.path.join(self.checkpoint_dir, "PyTorch_training_checkpoint_{}".format(self.tic)) else: save_checkpoints = False # Define placeholders for counting early stopping: _tolerance = 0 _best_epoch = 1 _best_loss = 999999.99 _best_model = None # May cause larger memory usage? _train_loss = 0.0 adjustable_learning_rate = learning_rate # Training model: self.model = self.model.to(self.device) self.model.train() # Adjust trainable parameters: if hasattr(self.model, "trainable_params"): trainable_params = self.model.trainable_params() logging.info("Will only train partial parameters under tranable_params method.") else: trainable_params = self.model.parameters() logging.info("Will train all the parameters inherited from nn.Module.parameters method.") _loss_function = self.set_loss_function(loss_function=loss_function) # Initialize optimizer: _optimizer = self.set_optimizer(optimizer=optimizer) _optimizer = _optimizer(trainable_params, lr=adjustable_learning_rate) # Adjustment for images augmentation: if not isinstance(train_loader, list): train_loader_list = [train_loader] else: train_loader_list = train_loader # Epoch loop: for ep in range(1, epoch+1): # if ep == 1: # # Define optimizer: (May be integrated into set parameter function.) # _optimizer = self.set_optimizer(optimizer=optimizer) # _optimizer = _optimizer(trainable_params, lr=adjustable_learning_rate) # if ep == 2: # # Define optimizer: (May be integrated into set parameter function.) # adjustable_learning_rate *= 0.1 # for param_group_ in _optimizer.param_groups: # param_group_["lr"] = adjustable_learning_rate # logging.info("Epoch {}, updated learning rate to {}".format(ep, adjustable_learning_rate)) # elif _tolerance >= lr_decay_tolerance: # self.model = _best_model # Restore the best model. # _tolerance = 0 # Reset tolerance # adjustable_learning_rate *= learning_rate_decay_rate # for param_group_ in _optimizer.param_groups: # param_group_["lr"] = adjustable_learning_rate # _msg = "Loss did not improve for {} epochs. Adjusted learning rate to {}".format(lr_decay_tolerance, \ # adjustable_learning_rate) # print(_msg) # logging.info(_msg) # Loop through data loaders: adjustable_learning_rate *= learning_rate_decay_rate ** (ep-1) for _train_loader_idx, _train_loader in enumerate(train_loader_list): # Session started with train loader: _separator = '='*35 + '\tSession of data loader {}\t'.format(_train_loader_idx+1) + '='*35 print(_separator) # Loop through batches: for batch_idx, (data, target) in enumerate(_train_loader): data, target = data.to(self.device), target.to(self.device) _optimizer.zero_grad() _output = self.model(data) _loss = _loss_function(_output, target) _loss.backward() # Clip the grad norm: if clip > 0: torch.nn.utils.clip_grad_norm(trainable_params, clip) _optimizer.step() _train_loss += _loss if batch_idx % verbose == 0: print('Epoch: {} Data Loader {} [{}/{} ({:.0f}%)]\t Avg Train Loss per Batch: {:.6f}'.format(\ ep, _train_loader_idx+1, \ batch_idx * len(data), \ len(_train_loader.dataset), \ 100. * batch_idx / len(_train_loader), \ _train_loss.data[0]/verbose)) # Reset placeholder of batch training loss: _train_loss = 0.0 # This part needs second thoughts... # avg_epoch_loss = ep_train_loss.data[0]/(batch_idx+1) avg_epoch_loss = _loss.item() # / batch_idx # Debug. # Logging and output epoch loss: epoch_loss_info = "Epoch {}, Average Loss: {:.6f}".format(ep, avg_epoch_loss) logging.info(epoch_loss_info) print(epoch_loss_info) # Calculate score from evaluation data set: test_log_loss = self.eval(self.model, test_loader) # logging.info("Epoch {}, Test Loss: {:.6f}, Test Accuracy: {:.2f}".format(ep, test_loss, test_accuracy)) # Show error per epoch: # if ep % verbose == 0: # print("Training Epoch: {}\tAvg Loss: {:.6f}".format(ep, avg_epoch_loss)) if test_log_loss <= _best_loss: _best_loss = test_log_loss _best_epoch = ep _best_model = self.model _tolerance = 1 else: if _tolerance < lr_decay_tolerance: _tolerance += 1 # Truncated early stopping, updated it to learning rate decay: else: print("Early stopped.") logging.info("Early stopped.") break # Reset epoch loss: ep_train_loss = 0.0 # Log best scores: logging.info("Best training epoch: {}, best training loss score: {:.6f}".format(_best_epoch, _best_loss)) # Replace class model with best trained model: self.model = _best_model # Save model checkpoint: if save_checkpoints: try: torch.save(_best_model, checkpoint_path) except Exception as e: print("Failed in saving trained model as checkpoint. Error: {}".format(e)) logging.error("Failed in saving trained model as checkpoint. Error: {}".format(e)) raise return _best_model except Exception as e: print("Error during training PyTorch model: {}".format(e)) raise def eval(self, model, test_loaders): # raise NotImplementError("Sub-class needs to define the eval function.") # Debug. from sklearn.metrics import accuracy_score from sklearn.metrics import log_loss try: model.eval() test_loss = 0.0 correct = 0.0 # Placeholders for calculating accuracy: truth = [] proba = [] if not isinstance(test_loaders, list): test_loader_list = [test_loaders] else: test_loader_list = test_loaders # Loop through test data loaders: for test_loader_idx, test_loader in enumerate(test_loader_list): # Loop through data batch per loader: for batch_idx, (data, target) in enumerate(test_loader): data, target = data.to(self.device), target.to(self.device) output = model(data) test_loss += F.cross_entropy(output, target).data[0] # Binary classification, calculate accuracy: # proba_, bin_pred_ = output.max(dim=1) proba_ = output[:, 1] proba += proba_.cpu().detach().numpy().tolist() truth += target.cpu().numpy().tolist() # Calculate total metrics: test_loss /= len(test_loader) eval_logloss = log_loss(truth, np.exp(proba)) print("\nTest set: Average loss: {:.6f}\tLog Loss: {:.6f}\n".format(test_loss, eval_logloss)) return eval_logloss except Exception as e: print("Failed in evaluate the model. Error: {}".format(e))
[ "io.StringIO", "logging.error", "os.makedirs", "logging.basicConfig", "os.path.join", "os.path.isdir", "torch.manual_seed", "torch.nn.utils.clip_grad_norm", "torch.nn.functional.cross_entropy", "torch.save", "logging.info", "torch.cuda.is_available", "numpy.exp", "torch.device", "datetime.datetime.now" ]
[((5015, 5054), 'logging.info', 'logging.info', (['"""Training PyTorch model."""'], {}), "('Training PyTorch model.')\n", (5027, 5054), False, 'import logging\n'), ((1317, 1330), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (1328, 1330), False, 'import io\n'), ((1343, 1515), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'self.__log_stream', 'level': 'logging.INFO', 'format': '"""[%(asctime)s (Local Time)] %(levelname)s : %(message)s"""', 'datefmt': '"""%m/%d/%Y %I:%M:%S %p"""'}), "(stream=self.__log_stream, level=logging.INFO, format=\n '[%(asctime)s (Local Time)] %(levelname)s : %(message)s', datefmt=\n '%m/%d/%Y %I:%M:%S %p')\n", (1362, 1515), False, 'import logging\n'), ((2739, 2793), 'logging.error', 'logging.error', (['"""Error: No loss function was provided."""'], {}), "('Error: No loss function was provided.')\n", (2752, 2793), False, 'import logging\n'), ((5211, 5241), 'torch.manual_seed', 'torch.manual_seed', (['random_seed'], {}), '(random_seed)\n', (5228, 5241), False, 'import torch\n'), ((5382, 5407), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5405, 5407), False, 'import torch\n'), ((5694, 5713), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5706, 5713), False, 'import torch\n'), ((538, 552), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (550, 552), False, 'from datetime import datetime\n'), ((1712, 1734), 'os.path.isdir', 'os.path.isdir', (['log_dir'], {}), '(log_dir)\n', (1725, 1734), False, 'import os\n'), ((1752, 1772), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (1763, 1772), False, 'import os\n'), ((5439, 5459), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (5451, 5459), False, 'import torch\n'), ((6908, 6993), 'logging.info', 'logging.info', (['"""Will only train partial parameters under tranable_params method."""'], {}), "('Will only train partial parameters under tranable_params method.'\n )\n", (6920, 6993), False, 'import logging\n'), ((7082, 7181), 'logging.info', 'logging.info', (['"""Will train all the parameters inherited from nn.Module.parameters method."""'], {}), "(\n 'Will train all the parameters inherited from nn.Module.parameters method.'\n )\n", (7094, 7181), False, 'import logging\n'), ((11437, 11466), 'logging.info', 'logging.info', (['epoch_loss_info'], {}), '(epoch_loss_info)\n', (11449, 11466), False, 'import logging\n'), ((14850, 14863), 'numpy.exp', 'np.exp', (['proba'], {}), '(proba)\n', (14856, 14863), True, 'import numpy as np\n'), ((1998, 2029), 'os.path.join', 'os.path.join', (['log_dir', 'log_file'], {}), '(log_dir, log_file)\n', (2010, 2029), False, 'import os\n'), ((5640, 5665), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5663, 5665), False, 'import torch\n'), ((6045, 6079), 'os.path.isdir', 'os.path.isdir', (['self.checkpoint_dir'], {}), '(self.checkpoint_dir)\n', (6058, 6079), False, 'import os\n'), ((6101, 6133), 'os.makedirs', 'os.makedirs', (['self.checkpoint_dir'], {}), '(self.checkpoint_dir)\n', (6112, 6133), False, 'import os\n'), ((12937, 12977), 'torch.save', 'torch.save', (['_best_model', 'checkpoint_path'], {}), '(_best_model, checkpoint_path)\n', (12947, 12977), False, 'import torch\n'), ((12444, 12474), 'logging.info', 'logging.info', (['"""Early stopped."""'], {}), "('Early stopped.')\n", (12456, 12474), False, 'import logging\n'), ((10115, 10168), 'torch.nn.utils.clip_grad_norm', 'torch.nn.utils.clip_grad_norm', (['trainable_params', 'clip'], {}), '(trainable_params, clip)\n', (10144, 10168), False, 'import torch\n'), ((14389, 14420), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output', 'target'], {}), '(output, target)\n', (14404, 14420), True, 'import torch.nn.functional as F\n')]
''' Helper functions to work with matrices ''' import numpy as np def random_unique_permutations(num_permutations, permutation_size): '''Returns a set of randomly chosen unique permutations Example: array([[3, 1, 2, 0], [1, 2, 0, 3], [2, 0, 3, 1]]) References: * https://stackoverflow.com/questions/45437988/numpy-random-choice-to-produce-a-2d-array-with-all-unique-values/45438143#45438143 ''' return np.random.rand(num_permutations, permutation_size).argsort(axis=-1)
[ "numpy.random.rand" ]
[((468, 518), 'numpy.random.rand', 'np.random.rand', (['num_permutations', 'permutation_size'], {}), '(num_permutations, permutation_size)\n', (482, 518), True, 'import numpy as np\n')]
import collections import numpy as np import pandas as pd from sklearn.neighbors import KDTree from sklearn.externals.joblib import Parallel, delayed train = pd.read_csv('../input/training.txt', header=None, names=['id', 'type']) test = pd.read_csv('../input/testing.txt', header=None, names=['id']) type_to_idx = {t: i for i, t in enumerate(['trawler', 'longliner', 'seiner', 'other', 'support'])} train.type = train.type.transform(lambda x: type_to_idx[x]) def read_track(idx): return pd.read_csv('../input/VesselTracks/{}.csv'.format(idx)) def calc_distances(idx, kdtrees): data = read_track(idx)[['Latitude', 'Longitude']] columns = [] for tree in kdtrees.values(): distances, indices = query = tree.query(data, k=1) columns.append(distances) columns_np = np.hstack(columns) df = pd.DataFrame(columns_np, columns=kdtrees.keys()) df['TrackNumber'] = idx df.to_csv('../input/TrackDistances/{}.csv'.format(idx, kdtrees), index=None) print('DONE', idx) kdtrees = collections.OrderedDict() for idx in train.id: data = read_track(idx) data = data[['Latitude', 'Longitude']] data_mid = data data_left = data_mid[data_mid.Longitude >= 0].copy() data_left.Longitude -= 360 data_right = data_mid[data_mid.Longitude < 0].copy() data_right.Longitude += 360 data = pd.concat([data_left, data_mid, data_right]) kdtrees[idx] = KDTree(data, leaf_size=100) print('FINISHED BUILDING TREES') Parallel(n_jobs=-1)(delayed(calc_distances)(idx, kdtrees) for idx in train.id) Parallel(n_jobs=-1)(delayed(calc_distances)(idx, kdtrees) for idx in test.id)
[ "sklearn.externals.joblib.Parallel", "sklearn.externals.joblib.delayed", "pandas.read_csv", "numpy.hstack", "collections.OrderedDict", "sklearn.neighbors.KDTree", "pandas.concat" ]
[((160, 231), 'pandas.read_csv', 'pd.read_csv', (['"""../input/training.txt"""'], {'header': 'None', 'names': "['id', 'type']"}), "('../input/training.txt', header=None, names=['id', 'type'])\n", (171, 231), True, 'import pandas as pd\n'), ((239, 301), 'pandas.read_csv', 'pd.read_csv', (['"""../input/testing.txt"""'], {'header': 'None', 'names': "['id']"}), "('../input/testing.txt', header=None, names=['id'])\n", (250, 301), True, 'import pandas as pd\n'), ((1024, 1049), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (1047, 1049), False, 'import collections\n'), ((803, 821), 'numpy.hstack', 'np.hstack', (['columns'], {}), '(columns)\n', (812, 821), True, 'import numpy as np\n'), ((1350, 1394), 'pandas.concat', 'pd.concat', (['[data_left, data_mid, data_right]'], {}), '([data_left, data_mid, data_right])\n', (1359, 1394), True, 'import pandas as pd\n'), ((1414, 1441), 'sklearn.neighbors.KDTree', 'KDTree', (['data'], {'leaf_size': '(100)'}), '(data, leaf_size=100)\n', (1420, 1441), False, 'from sklearn.neighbors import KDTree\n'), ((1477, 1496), 'sklearn.externals.joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (1485, 1496), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((1556, 1575), 'sklearn.externals.joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (1564, 1575), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((1497, 1520), 'sklearn.externals.joblib.delayed', 'delayed', (['calc_distances'], {}), '(calc_distances)\n', (1504, 1520), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((1576, 1599), 'sklearn.externals.joblib.delayed', 'delayed', (['calc_distances'], {}), '(calc_distances)\n', (1583, 1599), False, 'from sklearn.externals.joblib import Parallel, delayed\n')]
#!/usr/bin/env python # coding: utf-8 # this kernel was based https://www.kaggle.com/corochann/ashrae-training-lgbm-by-meter-type import argparse import gc import os from pathlib import Path import random import sys from tqdm import tqdm_notebook as tqdm import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn import preprocessing from sklearn.model_selection import KFold import lightgbm as lgb from sklearn.metrics import mean_squared_error from utils import reduce_mem_usage, add_holiyday, correct_localtime, add_lag_feature, add_sg, preprocess, timer from pathlib import Path import pickle # dirs LEAK_DIR = Path('../input') DATA_DIR = Path('../processed') OUTPUT_DIR = Path('../output') MODEL_DIR = Path('../models') parser = argparse.ArgumentParser(description='') parser.add_argument('--debug', action='store_true', help='debug mode') args = parser.parse_args() category_cols = ['building_id', 'site_id', 'primary_use', 'IsHoliday'] # , 'meter' feature_cols = ['square_feet', 'year_built'] + [ 'hour', 'weekend', 'day', # 'month' , # 'dayofweek', # 'building_median' ] + [ 'air_temperature', 'cloud_coverage', 'dew_temperature', 'precip_depth_1_hr', 'sea_level_pressure', #'wind_direction', 'wind_speed', 'air_temperature_mean_lag72', 'air_temperature_max_lag72', 'air_temperature_min_lag72', 'air_temperature_std_lag72', 'cloud_coverage_mean_lag72', 'dew_temperature_mean_lag72', 'precip_depth_1_hr_mean_lag72', 'sea_level_pressure_mean_lag72', #'wind_direction_mean_lag72', 'wind_speed_mean_lag72', 'air_temperature_mean_lag3', 'air_temperature_max_lag3', 'air_temperature_min_lag3', 'cloud_coverage_mean_lag3', 'dew_temperature_mean_lag3', 'precip_depth_1_hr_mean_lag3', 'sea_level_pressure_mean_lag3', # 'wind_direction_mean_lag3', 'wind_speed_mean_lag3', # 'floor_area', 'year_cnt', 'bid_cnt', 'dew_smooth', 'air_smooth', 'dew_diff', 'air_diff', 'dew_diff2', 'air_diff2', ] def create_X(test_df, building_meta_df, weather_test_df, target_meter): target_test_df = test_df[test_df['meter'] == target_meter] target_test_df = target_test_df.merge(building_meta_df, on='building_id', how='left') target_test_df = target_test_df.merge(weather_test_df, on=['site_id', 'timestamp'], how='left') #X_test = target_test_df[feature_cols + category_cols + ['month']] X_test = target_test_df[feature_cols + category_cols ] return X_test def pred(X_test, models, batch_size=1000000): iterations = (X_test.shape[0] + batch_size -1) // batch_size print('iterations', iterations) y_test_pred_total = np.zeros(X_test.shape[0]) for i, (mindex, model) in enumerate(models): print(f'predicting {i}-th model') for k in tqdm(range(iterations)): y_pred_test = model.predict(X_test[k*batch_size:(k+1)*batch_size], num_iteration=model.best_iteration) y_test_pred_total[k*batch_size:(k+1)*batch_size] += y_pred_test y_test_pred_total /= len(models) return y_test_pred_total def predict(deubg=True): # replace leak before submission replace_leak = True # some tuning parameters of models black_day = 10 # threshold of removing continuos zero values # # Prediction on test data #with open(model_dir/'meter_split.pickle', mode='rb') as f: with open(MODEL_DIR/'meter_split_model.pickle', mode='rb') as f: [models0, models1, models2, models3, bid_map] = pickle.load(f) with timer("Preprocessing"): # categorize primary_use column to reduce memory on merge... building_meta_df = pd.read_feather(DATA_DIR/'building_metadata.feather') primary_use_list = building_meta_df['primary_use'].unique() primary_use_dict = {key: value for value, key in enumerate(primary_use_list)} print('primary_use_dict: ', primary_use_dict) building_meta_df['primary_use'] = building_meta_df['primary_use'].map(primary_use_dict) year_map = building_meta_df.year_built.value_counts() building_meta_df['year_cnt'] = building_meta_df.year_built.map(year_map) building_meta_df = reduce_mem_usage(building_meta_df, use_float16=True) gc.collect() print('loading...') test_df = pd.read_feather(DATA_DIR/'test.feather') weather_test_df = pd.read_feather(DATA_DIR/'weather_test.feather') weather_test_df = weather_test_df.drop_duplicates(['timestamp', 'site_id']) correct_localtime(weather_test_df) add_holiyday(weather_test_df) print('preprocessing building...') test_df['date'] = test_df['timestamp'].dt.date preprocess(test_df) print('preprocessing weather...') weather_test_df = weather_test_df.groupby('site_id').apply(lambda group: group.interpolate(limit_direction='both')) weather_test_df.groupby('site_id').apply(lambda group: group.isna().sum()) add_sg(weather_test_df) with timer("Feature engineering"): add_lag_feature(weather_test_df, window=3) add_lag_feature(weather_test_df, window=72) test_df['bid_cnt'] = test_df.building_id.map(bid_map) print('reduce mem usage...') test_df = reduce_mem_usage(test_df, use_float16=True) weather_test_df = reduce_mem_usage(weather_test_df, use_float16=True) gc.collect() print (test_df.shape) sample_submission = pd.read_feather(os.path.join(DATA_DIR, 'sample_submission.feather')) sample_submission = reduce_mem_usage(sample_submission) # meter 0 X_test = create_X(test_df, building_meta_df, weather_test_df, target_meter=0) gc.collect() X_test.info() with timer("Predicting meter# 0"): y_test0 = pred(X_test, models0) #sns.distplot(y_test0) print(X_test.shape, y_test0.shape) del X_test gc.collect() # meter 1 X_test = create_X(test_df, building_meta_df, weather_test_df, target_meter=1) gc.collect() with timer("Predicting meter# 1"): y_test1 = pred(X_test, models1) #sns.distplot(y_test1) print(X_test.shape, y_test1.shape) del X_test gc.collect() # meter 2 X_test = create_X(test_df, building_meta_df, weather_test_df, target_meter=2) gc.collect() with timer("Predicting meter# 2"): y_test2 = pred(X_test, models2) #sns.distplot(y_test2) print(X_test.shape, y_test2.shape) del X_test gc.collect() # meter 3 X_test = create_X(test_df, building_meta_df, weather_test_df, target_meter=3) gc.collect() with timer("Predicting meter# 3"): y_test3 = pred(X_test, models3) #sns.distplot(y_test3) print(X_test.shape, y_test3.shape) del X_test gc.collect() # check print(sample_submission.loc[test_df['meter'] == 0, 'meter_reading'].shape,np.expm1(y_test0).shape) print(sample_submission.loc[test_df['meter'] == 1, 'meter_reading'].shape,np.expm1(y_test1).shape) print(sample_submission.loc[test_df['meter'] == 2, 'meter_reading'].shape,np.expm1(y_test2).shape) print(sample_submission.loc[test_df['meter'] == 3, 'meter_reading'].shape,np.expm1(y_test3).shape) sample_submission.loc[test_df['meter'] == 0, 'meter_reading'] = np.expm1(y_test0) sample_submission.loc[test_df['meter'] == 1, 'meter_reading'] = np.expm1(y_test1) sample_submission.loc[test_df['meter'] == 2, 'meter_reading'] = np.expm1(y_test2) sample_submission.loc[test_df['meter'] == 3, 'meter_reading'] = np.expm1(y_test3) # # site-0 correction # https://www.kaggle.com/c/ashrae-energy-prediction/discussion/119261#latest-684102 site_0_bids = building_meta_df[building_meta_df.site_id == 0].building_id.unique() sample_submission.loc[(test_df.building_id.isin(site_0_bids)) & (test_df.meter==0), 'meter_reading'] = sample_submission[(test_df.building_id.isin(site_0_bids)) & (test_df.meter==0)]['meter_reading'] * 3.4118 if not debug: sample_submission.to_csv(OUTPUT_DIR/'submission_meter.csv', index=False, float_format='%.4f') #np.log1p(sample_submission['meter_reading']).hist(bins=100) # # replace leak data with timer("Post-processing"): if replace_leak: leak_df = pd.read_feather(LEAK_DIR/'leak.feather') print(leak_df.duplicated().sum()) print(leak_df.meter.value_counts()) leak_df.fillna(0, inplace=True) leak_df = leak_df[(leak_df.timestamp.dt.year > 2016) & (leak_df.timestamp.dt.year < 2019)] leak_df.loc[leak_df.meter_reading < 0, 'meter_reading'] = 0 # remove large negative values leak_df = leak_df[leak_df.building_id!=245] sample_submission.loc[sample_submission.meter_reading < 0, 'meter_reading'] = 0 test_df['pred'] = sample_submission.meter_reading leak_df = leak_df.merge(test_df[['building_id', 'meter', 'timestamp', 'pred', 'row_id']], left_on = ['building_id', 'meter', 'timestamp'], right_on = ['building_id', 'meter', 'timestamp'], how = "left") leak_df = leak_df.merge(building_meta_df[['building_id', 'site_id']], on='building_id', how='left') if replace_leak: leak_df.site_id.unique() if replace_leak: leak_df['pred_l1p'] = np.log1p(leak_df.pred) leak_df['meter_reading_l1p'] = np.log1p(leak_df.meter_reading) sns.distplot(leak_df.pred_l1p) sns.distplot(leak_df.meter_reading_l1p) leak_score = np.sqrt(mean_squared_error(leak_df.pred_l1p, leak_df.meter_reading_l1p)) if replace_leak: leak_df = leak_df[['meter_reading', 'row_id']].set_index('row_id').dropna() sample_submission.loc[leak_df.index, 'meter_reading'] = leak_df['meter_reading'] if not debug and replace_leak: sample_submission.to_csv(OUTPUT_DIR/'submission_replaced_meter.csv', index=False, float_format='%.4f') # # Scores #LV score= 0.9743280741946935 print('LV score=', leak_score) if __name__ == '__main__': debug = args.debug print ('debug=', debug) predict(debug)
[ "utils.timer", "argparse.ArgumentParser", "utils.add_lag_feature", "sklearn.metrics.mean_squared_error", "numpy.zeros", "pandas.read_feather", "utils.add_holiyday", "utils.reduce_mem_usage", "gc.collect", "pathlib.Path", "numpy.expm1", "pickle.load", "seaborn.distplot", "utils.add_sg", "utils.preprocess", "os.path.join", "numpy.log1p", "utils.correct_localtime" ]
[((672, 688), 'pathlib.Path', 'Path', (['"""../input"""'], {}), "('../input')\n", (676, 688), False, 'from pathlib import Path\n'), ((700, 720), 'pathlib.Path', 'Path', (['"""../processed"""'], {}), "('../processed')\n", (704, 720), False, 'from pathlib import Path\n'), ((735, 752), 'pathlib.Path', 'Path', (['"""../output"""'], {}), "('../output')\n", (739, 752), False, 'from pathlib import Path\n'), ((766, 783), 'pathlib.Path', 'Path', (['"""../models"""'], {}), "('../models')\n", (770, 783), False, 'from pathlib import Path\n'), ((794, 833), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (817, 833), False, 'import argparse\n'), ((2702, 2727), 'numpy.zeros', 'np.zeros', (['X_test.shape[0]'], {}), '(X_test.shape[0])\n', (2710, 2727), True, 'import numpy as np\n'), ((5310, 5353), 'utils.reduce_mem_usage', 'reduce_mem_usage', (['test_df'], {'use_float16': '(True)'}), '(test_df, use_float16=True)\n', (5326, 5353), False, 'from utils import reduce_mem_usage, add_holiyday, correct_localtime, add_lag_feature, add_sg, preprocess, timer\n'), ((5376, 5427), 'utils.reduce_mem_usage', 'reduce_mem_usage', (['weather_test_df'], {'use_float16': '(True)'}), '(weather_test_df, use_float16=True)\n', (5392, 5427), False, 'from utils import reduce_mem_usage, add_holiyday, correct_localtime, add_lag_feature, add_sg, preprocess, timer\n'), ((5433, 5445), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5443, 5445), False, 'import gc\n'), ((5591, 5626), 'utils.reduce_mem_usage', 'reduce_mem_usage', (['sample_submission'], {}), '(sample_submission)\n', (5607, 5626), False, 'from utils import reduce_mem_usage, add_holiyday, correct_localtime, add_lag_feature, add_sg, preprocess, timer\n'), ((5733, 5745), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5743, 5745), False, 'import gc\n'), ((5932, 5944), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5942, 5944), False, 'import gc\n'), ((6047, 6059), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6057, 6059), False, 'import gc\n'), ((6227, 6239), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6237, 6239), False, 'import gc\n'), ((6341, 6353), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6351, 6353), False, 'import gc\n'), ((6521, 6533), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6531, 6533), False, 'import gc\n'), ((6636, 6648), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6646, 6648), False, 'import gc\n'), ((6815, 6827), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6825, 6827), False, 'import gc\n'), ((7322, 7339), 'numpy.expm1', 'np.expm1', (['y_test0'], {}), '(y_test0)\n', (7330, 7339), True, 'import numpy as np\n'), ((7408, 7425), 'numpy.expm1', 'np.expm1', (['y_test1'], {}), '(y_test1)\n', (7416, 7425), True, 'import numpy as np\n'), ((7494, 7511), 'numpy.expm1', 'np.expm1', (['y_test2'], {}), '(y_test2)\n', (7502, 7511), True, 'import numpy as np\n'), ((7580, 7597), 'numpy.expm1', 'np.expm1', (['y_test3'], {}), '(y_test3)\n', (7588, 7597), True, 'import numpy as np\n'), ((3537, 3551), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3548, 3551), False, 'import pickle\n'), ((3563, 3585), 'utils.timer', 'timer', (['"""Preprocessing"""'], {}), "('Preprocessing')\n", (3568, 3585), False, 'from utils import reduce_mem_usage, add_holiyday, correct_localtime, add_lag_feature, add_sg, preprocess, timer\n'), ((3691, 3746), 'pandas.read_feather', 'pd.read_feather', (["(DATA_DIR / 'building_metadata.feather')"], {}), "(DATA_DIR / 'building_metadata.feather')\n", (3706, 3746), True, 'import pandas as pd\n'), ((4223, 4275), 'utils.reduce_mem_usage', 'reduce_mem_usage', (['building_meta_df'], {'use_float16': '(True)'}), '(building_meta_df, use_float16=True)\n', (4239, 4275), False, 'from utils import reduce_mem_usage, add_holiyday, correct_localtime, add_lag_feature, add_sg, preprocess, timer\n'), ((4285, 4297), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4295, 4297), False, 'import gc\n'), ((4346, 4388), 'pandas.read_feather', 'pd.read_feather', (["(DATA_DIR / 'test.feather')"], {}), "(DATA_DIR / 'test.feather')\n", (4361, 4388), True, 'import pandas as pd\n'), ((4413, 4463), 'pandas.read_feather', 'pd.read_feather', (["(DATA_DIR / 'weather_test.feather')"], {}), "(DATA_DIR / 'weather_test.feather')\n", (4428, 4463), True, 'import pandas as pd\n'), ((4556, 4590), 'utils.correct_localtime', 'correct_localtime', (['weather_test_df'], {}), '(weather_test_df)\n', (4573, 4590), False, 'from utils import reduce_mem_usage, add_holiyday, correct_localtime, add_lag_feature, add_sg, preprocess, timer\n'), ((4599, 4628), 'utils.add_holiyday', 'add_holiyday', (['weather_test_df'], {}), '(weather_test_df)\n', (4611, 4628), False, 'from utils import reduce_mem_usage, add_holiyday, correct_localtime, add_lag_feature, add_sg, preprocess, timer\n'), ((4736, 4755), 'utils.preprocess', 'preprocess', (['test_df'], {}), '(test_df)\n', (4746, 4755), False, 'from utils import reduce_mem_usage, add_holiyday, correct_localtime, add_lag_feature, add_sg, preprocess, timer\n'), ((5015, 5038), 'utils.add_sg', 'add_sg', (['weather_test_df'], {}), '(weather_test_df)\n', (5021, 5038), False, 'from utils import reduce_mem_usage, add_holiyday, correct_localtime, add_lag_feature, add_sg, preprocess, timer\n'), ((5057, 5085), 'utils.timer', 'timer', (['"""Feature engineering"""'], {}), "('Feature engineering')\n", (5062, 5085), False, 'from utils import reduce_mem_usage, add_holiyday, correct_localtime, add_lag_feature, add_sg, preprocess, timer\n'), ((5104, 5146), 'utils.add_lag_feature', 'add_lag_feature', (['weather_test_df'], {'window': '(3)'}), '(weather_test_df, window=3)\n', (5119, 5146), False, 'from utils import reduce_mem_usage, add_holiyday, correct_localtime, add_lag_feature, add_sg, preprocess, timer\n'), ((5155, 5198), 'utils.add_lag_feature', 'add_lag_feature', (['weather_test_df'], {'window': '(72)'}), '(weather_test_df, window=72)\n', (5170, 5198), False, 'from utils import reduce_mem_usage, add_holiyday, correct_localtime, add_lag_feature, add_sg, preprocess, timer\n'), ((5514, 5565), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""sample_submission.feather"""'], {}), "(DATA_DIR, 'sample_submission.feather')\n", (5526, 5565), False, 'import os\n'), ((5774, 5802), 'utils.timer', 'timer', (['"""Predicting meter# 0"""'], {}), "('Predicting meter# 0')\n", (5779, 5802), False, 'from utils import reduce_mem_usage, add_holiyday, correct_localtime, add_lag_feature, add_sg, preprocess, timer\n'), ((6070, 6098), 'utils.timer', 'timer', (['"""Predicting meter# 1"""'], {}), "('Predicting meter# 1')\n", (6075, 6098), False, 'from utils import reduce_mem_usage, add_holiyday, correct_localtime, add_lag_feature, add_sg, preprocess, timer\n'), ((6364, 6392), 'utils.timer', 'timer', (['"""Predicting meter# 2"""'], {}), "('Predicting meter# 2')\n", (6369, 6392), False, 'from utils import reduce_mem_usage, add_holiyday, correct_localtime, add_lag_feature, add_sg, preprocess, timer\n'), ((6659, 6687), 'utils.timer', 'timer', (['"""Predicting meter# 3"""'], {}), "('Predicting meter# 3')\n", (6664, 6687), False, 'from utils import reduce_mem_usage, add_holiyday, correct_localtime, add_lag_feature, add_sg, preprocess, timer\n'), ((8245, 8269), 'utils.timer', 'timer', (['"""Post-processing"""'], {}), "('Post-processing')\n", (8250, 8269), False, 'from utils import reduce_mem_usage, add_holiyday, correct_localtime, add_lag_feature, add_sg, preprocess, timer\n'), ((6919, 6936), 'numpy.expm1', 'np.expm1', (['y_test0'], {}), '(y_test0)\n', (6927, 6936), True, 'import numpy as np\n'), ((7022, 7039), 'numpy.expm1', 'np.expm1', (['y_test1'], {}), '(y_test1)\n', (7030, 7039), True, 'import numpy as np\n'), ((7125, 7142), 'numpy.expm1', 'np.expm1', (['y_test2'], {}), '(y_test2)\n', (7133, 7142), True, 'import numpy as np\n'), ((7228, 7245), 'numpy.expm1', 'np.expm1', (['y_test3'], {}), '(y_test3)\n', (7236, 7245), True, 'import numpy as np\n'), ((8319, 8361), 'pandas.read_feather', 'pd.read_feather', (["(LEAK_DIR / 'leak.feather')"], {}), "(LEAK_DIR / 'leak.feather')\n", (8334, 8361), True, 'import pandas as pd\n'), ((9369, 9391), 'numpy.log1p', 'np.log1p', (['leak_df.pred'], {}), '(leak_df.pred)\n', (9377, 9391), True, 'import numpy as np\n'), ((9435, 9466), 'numpy.log1p', 'np.log1p', (['leak_df.meter_reading'], {}), '(leak_df.meter_reading)\n', (9443, 9466), True, 'import numpy as np\n'), ((9480, 9510), 'seaborn.distplot', 'sns.distplot', (['leak_df.pred_l1p'], {}), '(leak_df.pred_l1p)\n', (9492, 9510), True, 'import seaborn as sns\n'), ((9523, 9562), 'seaborn.distplot', 'sns.distplot', (['leak_df.meter_reading_l1p'], {}), '(leak_df.meter_reading_l1p)\n', (9535, 9562), True, 'import seaborn as sns\n'), ((9597, 9660), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['leak_df.pred_l1p', 'leak_df.meter_reading_l1p'], {}), '(leak_df.pred_l1p, leak_df.meter_reading_l1p)\n', (9615, 9660), False, 'from sklearn.metrics import mean_squared_error\n')]
# Project: AttractionRepulsionModel # Filename: arm_exp.py # Authors: <NAME> (<EMAIL>). """ arm_exp: A framework for defining and running experiments for the Attraction-Repulsion Model. """ import argparse from arm import arm from itertools import product import pickle import math from matplotlib.animation import FFMpegWriter, FuncAnimation import matplotlib.cm as cm from matplotlib.colors import LogNorm import matplotlib.pyplot as plt import numpy as np from tqdm import tqdm class Experiment(object): """ A flexible, unifying framework for experiments. """ def __init__(self, id, params={}, iters=1, savehist=True, seed=None): """ Inputs: id (str): identifier for the experiment, e.g., 'A' or 'baseline' params (dict): the full parameter set for the simulation runs { 'N' : [int: > 1], 'D' : [int: > 0], 'E' : [[float: > 0]], 'T' : [float: > 0 and < sqrt(D)], 'R' : [float: > 0 and <= 1], 'K' : [float: > 1], 'S' : [int: > 0], 'P' : [float: >= 0 and <= 1], 'shock' : [(int: >= 0, float: >= 0 and <= 1)], 'init' : ['norm', 'emp'] } iters (int): the number of iterated runs for each parameter setting savehist (bool): True if a run's history should be saved seed (int): random seed """ # Unpack singular parameters. self.id, self.iters, self.savehist, self.seed, = id, iters, savehist, seed # Unpack ARM parameters. defaults = {'N' : [100], 'D' : [1], 'E' : [[0.1]], 'T' : [0.25], \ 'R' : [0.25], 'K' : [math.inf], 'S' : [500000], 'P' : [0], \ 'shock' : [(None, None)], 'init' : ['norm']} plist = [params[p] if p in params else defaults[p] for p in defaults] self.params = list(product(*plist)) # Set up data and results filenames. self.fname = 'exp{}_{}'.format(self.id, self.seed) # Instantiate a list to hold runs data. This data will have shape # A x B x [N x D, N x D, S x (D + 2)] where A is the number of runs # (i.e., unique parameter combinations); B is the number of iterations # per run; and N, D, S are as they are in the polarization framework. self.runs_data = [[] for p in self.params] def run(self): tqdm.write('Running Experiment ' + self.id + '...') # Set up random seeds for iterated runs. rng = np.random.default_rng(self.seed) run_seeds = rng.integers(0, 2**32, size=self.iters) # For each parameter combination, do iterated runs of polarization. silent = len(self.params) > 1 or self.iters > 1 for i, param in enumerate(tqdm(self.params, desc='Simulating runs')): N, D, E, T, R, K, S, P, shock, init = param for seed in tqdm(run_seeds, desc='Iterating run', \ leave=bool(i == len(self.params) - 1)): run_data = arm(N, D, E, T, R, K, S, P, shock, init, seed, silent) if not self.savehist: self.runs_data[i].append((run_data[0], run_data[1], [])) else: self.runs_data[i].append(run_data) def save(self): """ Saves this experiment, including all parameters and run data, to a file named according to the experiment's ID and seed. """ tqdm.write('Saving Experiment ' + self.id + '...') with open('data/' + self.fname + '.pkl', 'wb') as f: pickle.dump(self, f) def variance(self, config): """ Takes as input an N x D configuration and returns its variance. """ return sum(np.var(config, axis=0)) def variances(self, run, iter): """ Takes as input a run and iteration index and returns a 1 x S array of variances of the agents' ideological positions at each step. """ # Check that a configuration history exists. assert self.savehist, 'ERROR: No history to calculate variance per step.' # Get the initial configuration and move history; initialize variances. config, _, moves = self.runs_data[run][iter] config = np.copy(config) # Avoid editing actual data. vars = np.zeros(len(moves) + 1) # Replay the agent movements one step at a time and calculate variance. vars[0] = self.variance(config) for step, move in enumerate(moves): config[int(move[0])] = move[2:] vars[step+1] = self.variance(config) return vars def plot_evo(self, runs, iters, title='', anno=''): """ Takes indices of either (i) one run and multiple iterations or (ii) one iteration of multiple runs and plots the given metrics against time. """ tqdm.write('Plotting variance over time...') # Sanity checks and setup. assert self.savehist, 'ERROR: No history to calculate metrics per step.' assert len(runs) == 1 or len(iters) == 1, 'ERROR: One run or one iter' runits = [i for i in product(runs, iters)] # Set up colors. cmap = np.vectorize(lambda x : cm.plasma(x)) colors = np.array(cmap(np.linspace(0, 0.9, len(runits)))).T # Plot variance over time for each run/iteration. fig, ax = plt.subplots() for i, runit in enumerate(tqdm(runits, desc='Calculating variance')): y = self.variances(runit[0], runit[1]) ax.plot(np.arange(len(y)), y, color=colors[i]) ax.set(title=title, xlabel='# Steps', ylabel='Variance') ax.grid() plt.tight_layout() fig.savefig('figs/' + self.fname + anno + '.png', dpi=300) plt.close() def plot_sweep(self, p1, p2, plabels, runs, cmax=None, title='', anno=''): """ Plots the average variance for each run's iterations as a 2D color mesh, where the mesh is organized according to the given parameter ranges. """ tqdm.write('Plotting average variance...') # Calculate average variance per run. aves = np.zeros(len(runs)) for i, run in enumerate(tqdm(runs, desc='Averaging iterations')): aves[i] = np.average([self.variance(iter[1]) \ for iter in self.runs_data[run]]) # Plot average variances. fig, ax = plt.subplots() pcm = ax.pcolormesh(p1, p2, aves.reshape(len(p1), len(p2)).T, \ cmap='plasma', vmin=0, vmax=cmax, shading='nearest') fig.colorbar(pcm, ax=ax, label='Variance') ax.set(title=title, xlabel=plabels[0], ylabel=plabels[1]) plt.tight_layout() fig.savefig('figs/' + self.fname + anno + '.png', dpi=300) plt.close() def animate_1D(self, run, iter, frame=None, anno='', colormode=None): """ Animate a 1D histogram of agent ideological positions. """ tqdm.write('Animating histogram of ideological positions...') # Check that a configuration history exists. assert self.savehist, 'ERROR: No history to show cliques per step.' config, _, moves = self.runs_data[run][iter] config = np.copy(config) # Avoid editing actual data. S, N, D = len(moves) + 1, np.shape(config)[0], self.params[run][1] assert D == 1, 'ERROR: Can only animate 1D' # Set up colors. cmap = np.vectorize(lambda x : cm.plasma(x)) if colormode == 0: # Color corresponding to the run number. c = np.array(cmap(np.linspace(0, 0.9, len(self.params)))).T[run] elif colormode == 1: # Color corresponding to the iteration number. c = np.array(cmap(np.linspace(0, 0.9, self.iters))).T[iter] else: # Use black no matter what. c = 'k' # Set up plot and histogram. fig, ax = plt.subplots(dpi=300) bins = 50 hist, edges = np.histogram(config, bins=bins, range=[0, 1]) bar = ax.bar(edges[:-1], hist, width=1/bins, align='edge', color=c) def init(): ax.set_title('step 0 of {}'.format(S-1), loc='right', \ fontsize='small') ax.set(xlabel='D1', ylabel='# Agents', xlim=[0, 1], ylim=[0, N]) ax.grid() plt.tight_layout() return [b for b in bar] # Set frame step. if frame == None: # Target 50fps with duration that scales linearly with steps. secs = (11 / 49600) * S + (565 / 62) frame_step = int(S / (50 * secs)) else: frame_step = frame def update(i): # Replay the configuration's move history for the elapsed time. if i > 0: for step in np.arange(i - frame_step + 1, i + 1): config[int(moves[step-1][0])] = moves[step-1][2:] # Update the figure. ax.set_title('step {} of {}'.format(i, S-1), loc='right', \ fontsize='small') hist, _ = np.histogram(config, bins=bins, range=[0, 1]) [b.set_height(hist[bi]) for bi, b in enumerate(bar)] return [b for b in bar] # Animate. frames = np.arange(0, S, frame_step) ani = FuncAnimation(fig, update, frames, init, interval=20, blit=True) ani.save('figs/' + self.fname + '_ani' + anno + '.mp4') plt.close() def animate_2D(self, run, iter, frame=None, anno=''): """ Animate a 2D histogram of agent ideological positions. """ tqdm.write('Animating histogram of ideological positions...') # Check that a configuration history exists. assert self.savehist, 'ERROR: No history to show cliques per step.' config, _, moves = self.runs_data[run][iter] config = np.copy(config) # Avoid editing actual data. S, N, D = len(moves) + 1, np.shape(config)[0], self.params[run][1] assert D == 2, 'ERROR: Can only animate 2D' fig, ax = plt.subplots(dpi=300) num_bins = [50, 50] hist, xedges, yedges = \ np.histogram2d(config.T[0], config.T[1], bins=num_bins, \ range=[[0, 1], [0, 1]]) X, Y = np.meshgrid(xedges, yedges) pcm = ax.pcolormesh(X, Y, hist.T, cmap='plasma', norm=LogNorm(1, N)) fig.colorbar(pcm, ax=ax, label='# Agents') def init(): ax.set_title('step 0 of {}'.format(S-1), loc='right', \ fontsize='small') ax.set(xlabel='D1', ylabel='D2') plt.tight_layout() return pcm, # Set frame step. if frame == None: # Target 50fps with duration that scales linearly with steps. secs = (11 / 49600) * S + (565 / 62) frame_step = int(S / (50 * secs)) else: frame_step = frame def update(i): # Replay the configuration's move history for the elapsed time. if i > 0: for step in np.arange(i - frame_step + 1, i + 1): config[int(moves[step-1][0])] = moves[step-1][2:] # Update the figure. ax.set_title('step {} of {}'.format(i, S-1), loc='right', \ fontsize='small') hist = np.histogram2d(config.T[0], config.T[1], bins=num_bins, \ range=[[0, 1], [0, 1]])[0] pcm.set_array(hist.T) return pcm, # Animate. frames = np.arange(0, S, frame_step) ani = FuncAnimation(fig, update, frames, init, interval=20, blit=True) ani.save('figs/' + self.fname + '_ani' + anno + '.mp4') plt.close() def expA_evo(seed=None): """ With default parameters in 1D and a subset of tolerance-responsiveness space, investigate the system's evolution w.r.t. variance. Data from this experiment produces Figs. 1 and 2. """ T = np.arange(0.05, 1.01, 0.1) params = {'N' : [100], 'D' : [1], 'E' : [[0.1]], 'T' : T, 'R' : [0.25], \ 'K' : [math.inf], 'S' : [2500000], 'P' : [0], \ 'shock' : [(None, None)], 'init' : ['norm']} exp = Experiment('A_evo', params, seed=seed) exp.run() exp.save() exp.plot_evo(runs=np.arange(len(exp.params)), iters=[0]) def expA_sweep(seed=None): """ With default parameters in 1D, sweep tolerance-responsiveness space and plot average final variance. Data from this experiment produces Figs. 3, S1C, and S3A-B. """ T, R = np.arange(0.05, 1.01, 0.05), np.arange(0.05, 1.01, 0.05) params = {'N' : [100], 'D' : [1], 'E' : [[0.1]], 'T' : T, 'R' : R, \ 'K' : [math.inf], 'S' : [1000000], 'P' : [0], \ 'shock' : [(None, None)], 'init' : ['norm']} exp = Experiment('A_sweep', params, iters=20, savehist=False, seed=seed) exp.run() exp.save() exp.plot_sweep(T, R, ('T', 'R'), runs=np.arange(len(exp.params)), cmax=0.25) def expB_evo(seed=None): """ With default parameters in 1D and a subset of tolerance-exposure space, investigate the system's evolution w.r.t. variance. Data from this experiment produces Fig. 5. """ E = [[e] for e in np.arange(0.05, 0.51, 0.05)] params = {'N' : [100], 'D' : [1], 'E' : E, 'T' : [0.3], 'R' : [0.25], \ 'K' : [math.inf], 'S' : [2500000], 'P' : [0], \ 'shock' : [(None, None)], 'init' : ['norm']} exp = Experiment('B_evo', params, seed=seed) exp.run() exp.save() exp.plot_evo(runs=np.arange(len(exp.params)), iters=[0]) def expB_sweep(seed=None): """ With default parameters in 1D, sweep tolerance-exposure space and plot average final variance. Data from this experiment produces Figs. 4 and S3C. """ T, E = np.arange(0.05, 1.01, 0.05), [[e] for e in np.arange(0.05, 0.51, 0.05)] params = {'N' : [100], 'D' : [1], 'E' : E, 'T' : T, 'R' : [0.25], \ 'K' : [math.inf], 'S' : [2000000], 'P' : [0], \ 'shock' : [(None, None)], 'init' : ['norm']} exp = Experiment('B_sweep', params, iters=20, savehist=False, seed=seed) exp.run() exp.save() exp.plot_sweep(T, E, ('T', 'E'), runs=np.arange(len(exp.params)), cmax=0.25) def expC_evo(seed=None): """ With default parameters in 2D and a subset of tolerance-responsiveness space, investigate the system's evolution w.r.t. variance. Data from this experiment was not used in the paper. """ T = np.arange(0.05, 2**0.5, 0.1) params = {'N' : [100], 'D' : [2], 'E' : [[0.1, 0.1]], 'T' : T, \ 'R' : [0.25], 'K' : [math.inf], 'S' : [2500000], 'P' : [0], \ 'shock' : [(None, None)], 'init' : ['norm']} exp = Experiment('C_evo', params, seed=seed) exp.run() exp.save() exp.plot_evo(runs=np.arange(len(exp.params)), iters=[0]) def expC_sweep(seed=None): """ With default parameters in 2D, sweep tolerance-responsiveness space and plot average final variance. Data from this experiment produces Fig. S4. """ T, R = np.arange(0.05, 2**0.5, 0.05), np.arange(0.05, 1.01, 0.05) params = {'N' : [100], 'D' : [2], 'E' : [[0.1, 0.1]], 'T' : T, 'R' : R, \ 'K' : [math.inf], 'S' : [1000000], 'P' : [0], \ 'shock' : [(None, None)], 'init' : ['norm']} exp = Experiment('C_sweep', params, iters=20, savehist=False, seed=seed) exp.run() exp.save() exp.plot_sweep(T, R, ('T', 'R'), runs=np.arange(len(exp.params)), cmax=0.5) def expD_evo(seed=None): """ With default parameters in 2D and a subset of exposures, investigate the system's evolution w.r.t. variance. Data from this experiment produces Fig. 6. """ E = [[0.1, e] for e in np.arange(0.05, 0.51, 0.05)] params = {'N' : [100], 'D' : [2], 'E' : E, 'T' : [0.25], 'R' : [0.25], \ 'K' : [math.inf], 'S' : [2500000], 'P' : [0], \ 'shock' : [(None, None)], 'init' : ['norm']} exp = Experiment('D_evo', params, seed=seed) exp.run() exp.save() exp.plot_evo(runs=np.arange(len(exp.params)), iters=[0]) def expD_sweep(seed=None): """ With default parameters in 2D, sweep exposures and plot average final variance. Data from this experiment produces Fig. S5. """ E = np.arange(0.05, 0.51, 0.05) params = {'N' : [100], 'D' : [2], 'E' : list(product(E, E)), 'T' : [0.25], \ 'R' : [0.25], 'K' : [math.inf], 'S' : [2000000], 'P' : [0], \ 'shock' : [(None, None)], 'init' : ['norm']} exp = Experiment('D_sweep', params, iters=20, savehist=False, seed=seed) exp.run() exp.save() exp.plot_sweep(E, E, ('E1', 'E2'), runs=np.arange(len(exp.params)), cmax=0.5) def expE_evo(seed=None): """ With default parameters in 1D and a subset of self-interest space, investigate the system's evolution w.r.t. variance. Data from this experiment produces Fig. 7. """ P = np.arange(0, 0.11, 0.01) params = {'N' : [100], 'D' : [1], 'E' : [[0.1]], 'T' : [0.25], \ 'R' : [0.25], 'K' : [math.inf], 'S' : [2500000], 'P' : P, \ 'shock' : [(None, None)], 'init' : ['norm']} exp = Experiment('E_evo', params, seed=seed) exp.run() exp.save() exp.plot_evo(runs=np.arange(len(exp.params)), iters=[0]) def expE_sweep(seed=None): """ With default parameters in 1D, sweep self-interest space and plot average final variance. Data from this experiment produces Fig. S6. """ T, P = np.arange(0.05, 1.01, 0.1), np.arange(0, 1.001, 0.05) params = {'N' : [100], 'D' : [1], 'E' : [[0.1]], 'T' : [0.25], \ 'R' : [0.25], 'K' : [math.inf], 'S' : [2000000], 'P' : P, \ 'shock' : [(None, None)], 'init' : ['norm']} exp = Experiment('E_sweep', params, iters=20, savehist=False, seed=seed) exp.run() exp.save() exp.plot_sweep(T, P, ('T', 'P'), runs=np.arange(len(exp.params)), cmax=0.25) def expF_evo(seed=None): """ With default parameters in 1D and a subset of external shocks, investigate the system's evolution w.r.t. variance. Data from this experiment produces Fig. 8. """ shocks = [(500000, delta) for delta in np.arange(0, 0.81, 0.05)] params = {'N' : [100], 'D' : [1], 'E' : [[0.1]], 'T' : [0.25], \ 'R' : [0.25], 'K' : [math.inf], 'S' : [2500000], 'P' : [0], \ 'shock' : shocks, 'init' : ['norm']} exp = Experiment('F_evo', params, seed=seed) exp.run() exp.save() exp.plot_evo(runs=np.arange(len(exp.params)), iters=[0]) def expF_sweep(seed=None): """ With default parameters in 1D, sweep external shocks and plot final average variance. Data from this experiment produces Fig. 9. """ steps, deltas = np.arange(100000, 900001, 100000), np.arange(0, 0.81, 0.05) params = {'N' : [100], 'D' : [1], 'E' : [[0.1]], 'T' : [0.25], \ 'R' : [0.25], 'K' : [math.inf], 'S' : [2000000], 'P' : [0], \ 'shock' : list(product(steps, deltas)), 'init' : ['norm']} exp = Experiment('F_sweep', params, iters=20, savehist=False, seed=seed) exp.run() exp.save() exp.plot_sweep(steps, deltas, ('Shock Step', 'Shock Strength'), \ runs=np.arange(len(exp.params)), cmax=0.25) def expR1_emp_evo(seed=None): """ With default parameters in 1D, a subset of tolerance-responsiveness space, and empirical initialization, investigate the system's evolution w.r.t. variance. Data from this experiment produces Fig. S1B. """ T = np.arange(0.05, 1.01, 0.1) params = {'N' : [100], 'D' : [1], 'E' : [[0.1]], 'T' : T, 'R' : [0.25], \ 'K' : [math.inf], 'S' : [2500000], 'P' : [0], \ 'shock' : [(None, None)], 'init' : ['emp']} exp = Experiment('R1_emp_evo', params, seed=seed) exp.run() exp.save() exp.plot_evo(runs=np.arange(len(exp.params)), iters=[0]) def expR1_emp_its(seed=None): """ Same as the above experiment, but with more iterations for average behavior. Data from this experiment produces Fig. S1C. """ T = np.arange(0.05, 1.01, 0.05) params = {'N' : [100], 'D' : [1], 'E' : [[0.1]], 'T' : T, 'R' : [0.25], \ 'K' : [math.inf], 'S' : [1000000], 'P' : [0], \ 'shock' : [(None, None)], 'init' : ['emp']} exp = Experiment('R1_emp_its', params, iters=20, savehist=False, seed=seed) exp.run() exp.save() def expR1_sto_evo(seed=None): """ With default parameters in 1D and varying steepness of stochastic repulsion, investigate the system's evolution w.r.t. variance. Data from this experiment produces Fig. S2B. """ K = np.append([np.power(2, i) for i in np.arange(1, 7)], [math.inf]) params = {'N' : [100], 'D' : [1], 'E' : [[0.1]], 'T' : [0.25], \ 'R' : [0.25], 'K' : K, 'S' : [2000000], 'P' : [0], \ 'shock' : [(None, None)], 'init' : ['norm']} exp = Experiment('R1_sto_evo', params, seed=seed) exp.run() exp.save() exp.plot_evo(runs=np.arange(len(exp.params)), iters=[0]) def expR1_sto_its(seed=None): """ Same as the above experiment, but with more iterations for average behavior. Data from this experiment produces Fig. S2C. """ K = np.append([np.power(2, i) for i in np.arange(1, 7)], [math.inf]) params = {'N' : [100], 'D' : [1], 'E' : [[0.1]], 'T' : [0.25], \ 'R' : [0.25], 'K' : K, 'S' : [1500000], 'P' : [0], \ 'shock' : [(None, None)], 'init' : ['norm']} exp = Experiment('R1_sto_its', params, iters=20, savehist=False, seed=seed) exp.run() exp.save() if __name__ == '__main__': # Parse command line arguments. parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-E', '--exps', type=str, nargs='+', required=True, \ help='IDs of experiments to run') parser.add_argument('-R', '--rand_seed', type=int, default=None, \ help='Seed for random number generation') args = parser.parse_args() # Run selected experiments. exps = {'A_evo' : expA_evo, 'A_sweep' : expA_sweep, 'B_evo' : expB_evo, \ 'B_sweep' : expB_sweep, 'C_evo' : expC_evo, 'C_sweep' : expC_sweep,\ 'D_evo' : expD_evo, 'D_sweep' : expD_sweep, 'E_evo' : expE_evo, \ 'E_sweep' : expE_sweep, 'F_evo' : expF_evo, 'F_sweep' : expF_sweep,\ 'R1_emp_evo' : expR1_emp_evo, 'R1_emp_its' : expR1_emp_its, \ 'R1_sto_evo' : expR1_sto_evo, 'R1_sto_its' : expR1_sto_its} for id in args.exps: exps[id](args.rand_seed)
[ "pickle.dump", "argparse.ArgumentParser", "matplotlib.animation.FuncAnimation", "numpy.random.default_rng", "numpy.shape", "numpy.histogram", "matplotlib.colors.LogNorm", "numpy.arange", "arm.arm", "matplotlib.pyplot.tight_layout", "numpy.meshgrid", "numpy.copy", "matplotlib.pyplot.close", "numpy.histogram2d", "numpy.power", "numpy.linspace", "itertools.product", "numpy.var", "matplotlib.pyplot.subplots", "tqdm.tqdm", "matplotlib.cm.plasma", "tqdm.tqdm.write" ]
[((12440, 12466), 'numpy.arange', 'np.arange', (['(0.05)', '(1.01)', '(0.1)'], {}), '(0.05, 1.01, 0.1)\n', (12449, 12466), True, 'import numpy as np\n'), ((15066, 15096), 'numpy.arange', 'np.arange', (['(0.05)', '(2 ** 0.5)', '(0.1)'], {}), '(0.05, 2 ** 0.5, 0.1)\n', (15075, 15096), True, 'import numpy as np\n'), ((16938, 16965), 'numpy.arange', 'np.arange', (['(0.05)', '(0.51)', '(0.05)'], {}), '(0.05, 0.51, 0.05)\n', (16947, 16965), True, 'import numpy as np\n'), ((17613, 17637), 'numpy.arange', 'np.arange', (['(0)', '(0.11)', '(0.01)'], {}), '(0, 0.11, 0.01)\n', (17622, 17637), True, 'import numpy as np\n'), ((20312, 20338), 'numpy.arange', 'np.arange', (['(0.05)', '(1.01)', '(0.1)'], {}), '(0.05, 1.01, 0.1)\n', (20321, 20338), True, 'import numpy as np\n'), ((20884, 20911), 'numpy.arange', 'np.arange', (['(0.05)', '(1.01)', '(0.05)'], {}), '(0.05, 1.01, 0.05)\n', (20893, 20911), True, 'import numpy as np\n'), ((22543, 22587), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (22566, 22587), False, 'import argparse\n'), ((2519, 2570), 'tqdm.tqdm.write', 'tqdm.write', (["('Running Experiment ' + self.id + '...')"], {}), "('Running Experiment ' + self.id + '...')\n", (2529, 2570), False, 'from tqdm import tqdm\n'), ((2638, 2670), 'numpy.random.default_rng', 'np.random.default_rng', (['self.seed'], {}), '(self.seed)\n', (2659, 2670), True, 'import numpy as np\n'), ((3617, 3667), 'tqdm.tqdm.write', 'tqdm.write', (["('Saving Experiment ' + self.id + '...')"], {}), "('Saving Experiment ' + self.id + '...')\n", (3627, 3667), False, 'from tqdm import tqdm\n'), ((4452, 4467), 'numpy.copy', 'np.copy', (['config'], {}), '(config)\n', (4459, 4467), True, 'import numpy as np\n'), ((5081, 5125), 'tqdm.tqdm.write', 'tqdm.write', (['"""Plotting variance over time..."""'], {}), "('Plotting variance over time...')\n", (5091, 5125), False, 'from tqdm import tqdm\n'), ((5609, 5623), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5621, 5623), True, 'import matplotlib.pyplot as plt\n'), ((5909, 5927), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5925, 5927), True, 'import matplotlib.pyplot as plt\n'), ((6005, 6016), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6014, 6016), True, 'import matplotlib.pyplot as plt\n'), ((6296, 6338), 'tqdm.tqdm.write', 'tqdm.write', (['"""Plotting average variance..."""'], {}), "('Plotting average variance...')\n", (6306, 6338), False, 'from tqdm import tqdm\n'), ((6684, 6698), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6696, 6698), True, 'import matplotlib.pyplot as plt\n'), ((6982, 7000), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6998, 7000), True, 'import matplotlib.pyplot as plt\n'), ((7078, 7089), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7087, 7089), True, 'import matplotlib.pyplot as plt\n'), ((7268, 7329), 'tqdm.tqdm.write', 'tqdm.write', (['"""Animating histogram of ideological positions..."""'], {}), "('Animating histogram of ideological positions...')\n", (7278, 7329), False, 'from tqdm import tqdm\n'), ((7537, 7552), 'numpy.copy', 'np.copy', (['config'], {}), '(config)\n', (7544, 7552), True, 'import numpy as np\n'), ((8217, 8238), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'dpi': '(300)'}), '(dpi=300)\n', (8229, 8238), True, 'import matplotlib.pyplot as plt\n'), ((8281, 8326), 'numpy.histogram', 'np.histogram', (['config'], {'bins': 'bins', 'range': '[0, 1]'}), '(config, bins=bins, range=[0, 1])\n', (8293, 8326), True, 'import numpy as np\n'), ((9616, 9643), 'numpy.arange', 'np.arange', (['(0)', 'S', 'frame_step'], {}), '(0, S, frame_step)\n', (9625, 9643), True, 'import numpy as np\n'), ((9659, 9723), 'matplotlib.animation.FuncAnimation', 'FuncAnimation', (['fig', 'update', 'frames', 'init'], {'interval': '(20)', 'blit': '(True)'}), '(fig, update, frames, init, interval=20, blit=True)\n', (9672, 9723), False, 'from matplotlib.animation import FFMpegWriter, FuncAnimation\n'), ((9798, 9809), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9807, 9809), True, 'import matplotlib.pyplot as plt\n'), ((9972, 10033), 'tqdm.tqdm.write', 'tqdm.write', (['"""Animating histogram of ideological positions..."""'], {}), "('Animating histogram of ideological positions...')\n", (9982, 10033), False, 'from tqdm import tqdm\n'), ((10241, 10256), 'numpy.copy', 'np.copy', (['config'], {}), '(config)\n', (10248, 10256), True, 'import numpy as np\n'), ((10437, 10458), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'dpi': '(300)'}), '(dpi=300)\n', (10449, 10458), True, 'import matplotlib.pyplot as plt\n'), ((10535, 10614), 'numpy.histogram2d', 'np.histogram2d', (['config.T[0]', 'config.T[1]'], {'bins': 'num_bins', 'range': '[[0, 1], [0, 1]]'}), '(config.T[0], config.T[1], bins=num_bins, range=[[0, 1], [0, 1]])\n', (10549, 10614), True, 'import numpy as np\n'), ((10661, 10688), 'numpy.meshgrid', 'np.meshgrid', (['xedges', 'yedges'], {}), '(xedges, yedges)\n', (10672, 10688), True, 'import numpy as np\n'), ((11992, 12019), 'numpy.arange', 'np.arange', (['(0)', 'S', 'frame_step'], {}), '(0, S, frame_step)\n', (12001, 12019), True, 'import numpy as np\n'), ((12035, 12099), 'matplotlib.animation.FuncAnimation', 'FuncAnimation', (['fig', 'update', 'frames', 'init'], {'interval': '(20)', 'blit': '(True)'}), '(fig, update, frames, init, interval=20, blit=True)\n', (12048, 12099), False, 'from matplotlib.animation import FFMpegWriter, FuncAnimation\n'), ((12174, 12185), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12183, 12185), True, 'import matplotlib.pyplot as plt\n'), ((13052, 13079), 'numpy.arange', 'np.arange', (['(0.05)', '(1.01)', '(0.05)'], {}), '(0.05, 1.01, 0.05)\n', (13061, 13079), True, 'import numpy as np\n'), ((13081, 13108), 'numpy.arange', 'np.arange', (['(0.05)', '(1.01)', '(0.05)'], {}), '(0.05, 1.01, 0.05)\n', (13090, 13108), True, 'import numpy as np\n'), ((14350, 14377), 'numpy.arange', 'np.arange', (['(0.05)', '(1.01)', '(0.05)'], {}), '(0.05, 1.01, 0.05)\n', (14359, 14377), True, 'import numpy as np\n'), ((15669, 15700), 'numpy.arange', 'np.arange', (['(0.05)', '(2 ** 0.5)', '(0.05)'], {}), '(0.05, 2 ** 0.5, 0.05)\n', (15678, 15700), True, 'import numpy as np\n'), ((15700, 15727), 'numpy.arange', 'np.arange', (['(0.05)', '(1.01)', '(0.05)'], {}), '(0.05, 1.01, 0.05)\n', (15709, 15727), True, 'import numpy as np\n'), ((18199, 18225), 'numpy.arange', 'np.arange', (['(0.05)', '(1.01)', '(0.1)'], {}), '(0.05, 1.01, 0.1)\n', (18208, 18225), True, 'import numpy as np\n'), ((18227, 18252), 'numpy.arange', 'np.arange', (['(0)', '(1.001)', '(0.05)'], {}), '(0, 1.001, 0.05)\n', (18236, 18252), True, 'import numpy as np\n'), ((19501, 19534), 'numpy.arange', 'np.arange', (['(100000)', '(900001)', '(100000)'], {}), '(100000, 900001, 100000)\n', (19510, 19534), True, 'import numpy as np\n'), ((19536, 19560), 'numpy.arange', 'np.arange', (['(0)', '(0.81)', '(0.05)'], {}), '(0, 0.81, 0.05)\n', (19545, 19560), True, 'import numpy as np\n'), ((1996, 2011), 'itertools.product', 'product', (['*plist'], {}), '(*plist)\n', (2003, 2011), False, 'from itertools import product\n'), ((2903, 2944), 'tqdm.tqdm', 'tqdm', (['self.params'], {'desc': '"""Simulating runs"""'}), "(self.params, desc='Simulating runs')\n", (2907, 2944), False, 'from tqdm import tqdm\n'), ((3743, 3763), 'pickle.dump', 'pickle.dump', (['self', 'f'], {}), '(self, f)\n', (3754, 3763), False, 'import pickle\n'), ((3920, 3942), 'numpy.var', 'np.var', (['config'], {'axis': '(0)'}), '(config, axis=0)\n', (3926, 3942), True, 'import numpy as np\n'), ((5659, 5700), 'tqdm.tqdm', 'tqdm', (['runits'], {'desc': '"""Calculating variance"""'}), "(runits, desc='Calculating variance')\n", (5663, 5700), False, 'from tqdm import tqdm\n'), ((6457, 6496), 'tqdm.tqdm', 'tqdm', (['runs'], {'desc': '"""Averaging iterations"""'}), "(runs, desc='Averaging iterations')\n", (6461, 6496), False, 'from tqdm import tqdm\n'), ((8654, 8672), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8670, 8672), True, 'import matplotlib.pyplot as plt\n'), ((9425, 9470), 'numpy.histogram', 'np.histogram', (['config'], {'bins': 'bins', 'range': '[0, 1]'}), '(config, bins=bins, range=[0, 1])\n', (9437, 9470), True, 'import numpy as np\n'), ((11014, 11032), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11030, 11032), True, 'import matplotlib.pyplot as plt\n'), ((13752, 13779), 'numpy.arange', 'np.arange', (['(0.05)', '(0.51)', '(0.05)'], {}), '(0.05, 0.51, 0.05)\n', (13761, 13779), True, 'import numpy as np\n'), ((16365, 16392), 'numpy.arange', 'np.arange', (['(0.05)', '(0.51)', '(0.05)'], {}), '(0.05, 0.51, 0.05)\n', (16374, 16392), True, 'import numpy as np\n'), ((17016, 17029), 'itertools.product', 'product', (['E', 'E'], {}), '(E, E)\n', (17023, 17029), False, 'from itertools import product\n'), ((18916, 18940), 'numpy.arange', 'np.arange', (['(0)', '(0.81)', '(0.05)'], {}), '(0, 0.81, 0.05)\n', (18925, 18940), True, 'import numpy as np\n'), ((19738, 19760), 'itertools.product', 'product', (['steps', 'deltas'], {}), '(steps, deltas)\n', (19745, 19760), False, 'from itertools import product\n'), ((21489, 21503), 'numpy.power', 'np.power', (['(2)', 'i'], {}), '(2, i)\n', (21497, 21503), True, 'import numpy as np\n'), ((22096, 22110), 'numpy.power', 'np.power', (['(2)', 'i'], {}), '(2, i)\n', (22104, 22110), True, 'import numpy as np\n'), ((3167, 3221), 'arm.arm', 'arm', (['N', 'D', 'E', 'T', 'R', 'K', 'S', 'P', 'shock', 'init', 'seed', 'silent'], {}), '(N, D, E, T, R, K, S, P, shock, init, seed, silent)\n', (3170, 3221), False, 'from arm import arm\n'), ((5356, 5376), 'itertools.product', 'product', (['runs', 'iters'], {}), '(runs, iters)\n', (5363, 5376), False, 'from itertools import product\n'), ((5446, 5458), 'matplotlib.cm.plasma', 'cm.plasma', (['x'], {}), '(x)\n', (5455, 5458), True, 'import matplotlib.cm as cm\n'), ((7618, 7634), 'numpy.shape', 'np.shape', (['config'], {}), '(config)\n', (7626, 7634), True, 'import numpy as np\n'), ((7780, 7792), 'matplotlib.cm.plasma', 'cm.plasma', (['x'], {}), '(x)\n', (7789, 7792), True, 'import matplotlib.cm as cm\n'), ((9140, 9176), 'numpy.arange', 'np.arange', (['(i - frame_step + 1)', '(i + 1)'], {}), '(i - frame_step + 1, i + 1)\n', (9149, 9176), True, 'import numpy as np\n'), ((10322, 10338), 'numpy.shape', 'np.shape', (['config'], {}), '(config)\n', (10330, 10338), True, 'import numpy as np\n'), ((10752, 10765), 'matplotlib.colors.LogNorm', 'LogNorm', (['(1)', 'N'], {}), '(1, N)\n', (10759, 10765), False, 'from matplotlib.colors import LogNorm\n'), ((11488, 11524), 'numpy.arange', 'np.arange', (['(i - frame_step + 1)', '(i + 1)'], {}), '(i - frame_step + 1, i + 1)\n', (11497, 11524), True, 'import numpy as np\n'), ((11770, 11849), 'numpy.histogram2d', 'np.histogram2d', (['config.T[0]', 'config.T[1]'], {'bins': 'num_bins', 'range': '[[0, 1], [0, 1]]'}), '(config.T[0], config.T[1], bins=num_bins, range=[[0, 1], [0, 1]])\n', (11784, 11849), True, 'import numpy as np\n'), ((14393, 14420), 'numpy.arange', 'np.arange', (['(0.05)', '(0.51)', '(0.05)'], {}), '(0.05, 0.51, 0.05)\n', (14402, 14420), True, 'import numpy as np\n'), ((21513, 21528), 'numpy.arange', 'np.arange', (['(1)', '(7)'], {}), '(1, 7)\n', (21522, 21528), True, 'import numpy as np\n'), ((22120, 22135), 'numpy.arange', 'np.arange', (['(1)', '(7)'], {}), '(1, 7)\n', (22129, 22135), True, 'import numpy as np\n'), ((8051, 8082), 'numpy.linspace', 'np.linspace', (['(0)', '(0.9)', 'self.iters'], {}), '(0, 0.9, self.iters)\n', (8062, 8082), True, 'import numpy as np\n')]
import importlib from hydroDL import kPath, utils from hydroDL.app import waterQuality from hydroDL.master import basins from hydroDL.data import usgs, gageII, gridMET, ntn from hydroDL.master import slurm from hydroDL.post import axplot, figplot import numpy as np import matplotlib.pyplot as plt import os import pandas as pd import json import scipy from astropy.timeseries import LombScargle import matplotlib.gridspec as gridspec dirSel = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteSel') with open(os.path.join(dirSel, 'dictRB_Y30N5.json')) as f: dictSite = json.load(f) codeLst = sorted(usgs.newC) ep = 500 reTest = False dataName = 'rbWN5' siteNoLst = dictSite['comb'] nSite = len(siteNoLst) # load all sequence if True: dictLSTMLst = list() # LSTM labelLst = ['QTFP_C'] for label in labelLst: dictLSTM = dict() trainSet = 'comb-B10' outName = '{}-{}-{}-{}'.format(dataName, 'comb', label, trainSet) for k, siteNo in enumerate(siteNoLst): print('\t site {}/{}'.format(k, len(siteNoLst)), end='\r') df = basins.loadSeq(outName, siteNo) dictLSTM[siteNo] = df dictLSTMLst.append(dictLSTM) # WRTDS dictWRTDS = dict() dirWRTDS = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-W', 'B10', 'output') for k, siteNo in enumerate(siteNoLst): print('\t site {}/{}'.format(k, len(siteNoLst)), end='\r') saveFile = os.path.join(dirWRTDS, siteNo) df = pd.read_csv(saveFile, index_col=None).set_index('date') # df = utils.time.datePdf(df) dictWRTDS[siteNo] = df # Observation dictObs = dict() for k, siteNo in enumerate(siteNoLst): print('\t site {}/{}'.format(k, len(siteNoLst)), end='\r') df = waterQuality.readSiteTS( siteNo, varLst=['00060']+codeLst, freq='W') dictObs[siteNo] = df # calculate correlation tt = np.datetime64('2010-01-01') t0 = np.datetime64('1980-01-01') ind1 = np.where((df.index.values < tt) & (df.index.values >= t0))[0] ind2 = np.where(df.index.values >= tt)[0] dictLSTM = dictLSTMLst[0] corrMat = np.full([len(siteNoLst), len(codeLst), 3], np.nan) rmseMat = np.full([len(siteNoLst), len(codeLst), 3], np.nan) for ic, code in enumerate(codeLst): for siteNo in dictSite[code]: indS = siteNoLst.index(siteNo) v1 = dictLSTM[siteNo][code].iloc[ind2].values v2 = dictWRTDS[siteNo][code].iloc[ind2].values v3 = dictObs[siteNo][code].iloc[ind2].values vv1, vv2, vv3 = utils.rmNan([v1, v2, v3], returnInd=False) rmse1, corr1 = utils.stat.calErr(vv1, vv2) rmse2, corr2 = utils.stat.calErr(vv1, vv3) rmse3, corr3 = utils.stat.calErr(vv2, vv3) corrMat[indS, ic, 0] = corr1 corrMat[indS, ic, 1] = corr2 corrMat[indS, ic, 2] = corr3 rmseMat[indS, ic, 0] = rmse1 rmseMat[indS, ic, 1] = rmse2 rmseMat[indS, ic, 2] = rmse3 # for code in codeLst: dfT = pd.DataFrame(index=codeLst, columns=['nSite', 'nSample']) for code in codeLst: siteNoCode = dictSite[code] nSite = len(dictSite[code]) nSampleLst = list() for siteNo in siteNoCode: nSample = np.where(~np.isnan(dictObs[siteNo][code].values))[0].shape[0] nSampleLst.append(nSample) dfT.at[code, 'nSite'] = nSite dfT.at[code, 'nSample'] = int(np.mean(nSampleLst))
[ "pandas.DataFrame", "json.load", "numpy.datetime64", "pandas.read_csv", "hydroDL.utils.stat.calErr", "numpy.isnan", "hydroDL.master.basins.loadSeq", "numpy.where", "numpy.mean", "os.path.join", "hydroDL.utils.rmNan", "hydroDL.app.waterQuality.readSiteTS" ]
[((445, 504), 'os.path.join', 'os.path.join', (['kPath.dirData', '"""USGS"""', '"""inventory"""', '"""siteSel"""'], {}), "(kPath.dirData, 'USGS', 'inventory', 'siteSel')\n", (457, 504), False, 'import os\n'), ((3108, 3165), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'codeLst', 'columns': "['nSite', 'nSample']"}), "(index=codeLst, columns=['nSite', 'nSample'])\n", (3120, 3165), True, 'import pandas as pd\n'), ((579, 591), 'json.load', 'json.load', (['f'], {}), '(f)\n', (588, 591), False, 'import json\n'), ((1253, 1319), 'os.path.join', 'os.path.join', (['kPath.dirWQ', '"""modelStat"""', '"""WRTDS-W"""', '"""B10"""', '"""output"""'], {}), "(kPath.dirWQ, 'modelStat', 'WRTDS-W', 'B10', 'output')\n", (1265, 1319), False, 'import os\n'), ((1956, 1983), 'numpy.datetime64', 'np.datetime64', (['"""2010-01-01"""'], {}), "('2010-01-01')\n", (1969, 1983), True, 'import numpy as np\n'), ((1993, 2020), 'numpy.datetime64', 'np.datetime64', (['"""1980-01-01"""'], {}), "('1980-01-01')\n", (2006, 2020), True, 'import numpy as np\n'), ((515, 556), 'os.path.join', 'os.path.join', (['dirSel', '"""dictRB_Y30N5.json"""'], {}), "(dirSel, 'dictRB_Y30N5.json')\n", (527, 556), False, 'import os\n'), ((1477, 1507), 'os.path.join', 'os.path.join', (['dirWRTDS', 'siteNo'], {}), '(dirWRTDS, siteNo)\n', (1489, 1507), False, 'import os\n'), ((1808, 1877), 'hydroDL.app.waterQuality.readSiteTS', 'waterQuality.readSiteTS', (['siteNo'], {'varLst': "(['00060'] + codeLst)", 'freq': '"""W"""'}), "(siteNo, varLst=['00060'] + codeLst, freq='W')\n", (1831, 1877), False, 'from hydroDL.app import waterQuality\n'), ((2032, 2090), 'numpy.where', 'np.where', (['((df.index.values < tt) & (df.index.values >= t0))'], {}), '((df.index.values < tt) & (df.index.values >= t0))\n', (2040, 2090), True, 'import numpy as np\n'), ((2105, 2136), 'numpy.where', 'np.where', (['(df.index.values >= tt)'], {}), '(df.index.values >= tt)\n', (2113, 2136), True, 'import numpy as np\n'), ((3488, 3507), 'numpy.mean', 'np.mean', (['nSampleLst'], {}), '(nSampleLst)\n', (3495, 3507), True, 'import numpy as np\n'), ((1100, 1131), 'hydroDL.master.basins.loadSeq', 'basins.loadSeq', (['outName', 'siteNo'], {}), '(outName, siteNo)\n', (1114, 1131), False, 'from hydroDL.master import basins\n'), ((2623, 2665), 'hydroDL.utils.rmNan', 'utils.rmNan', (['[v1, v2, v3]'], {'returnInd': '(False)'}), '([v1, v2, v3], returnInd=False)\n', (2634, 2665), False, 'from hydroDL import kPath, utils\n'), ((2693, 2720), 'hydroDL.utils.stat.calErr', 'utils.stat.calErr', (['vv1', 'vv2'], {}), '(vv1, vv2)\n', (2710, 2720), False, 'from hydroDL import kPath, utils\n'), ((2748, 2775), 'hydroDL.utils.stat.calErr', 'utils.stat.calErr', (['vv1', 'vv3'], {}), '(vv1, vv3)\n', (2765, 2775), False, 'from hydroDL import kPath, utils\n'), ((2803, 2830), 'hydroDL.utils.stat.calErr', 'utils.stat.calErr', (['vv2', 'vv3'], {}), '(vv2, vv3)\n', (2820, 2830), False, 'from hydroDL import kPath, utils\n'), ((1521, 1558), 'pandas.read_csv', 'pd.read_csv', (['saveFile'], {'index_col': 'None'}), '(saveFile, index_col=None)\n', (1532, 1558), True, 'import pandas as pd\n'), ((3333, 3371), 'numpy.isnan', 'np.isnan', (['dictObs[siteNo][code].values'], {}), '(dictObs[siteNo][code].values)\n', (3341, 3371), True, 'import numpy as np\n')]
""" Choosing good features is one of your most important jobs. """ import numpy as np from sklearn.datasets import load_iris from sklearn import tree iris = load_iris() # print(iris.feature_names) # print(iris.target_names) # print(iris.data[:10]) # print(iris.target[:10]) # one of each kind of iris test_indexes = [ 0, 50, 100, ] # traning_data train_data = np.delete(iris.data, test_indexes, axis=0) train_target = np.delete(iris.target, test_indexes) # testing_data test_data = iris.data[test_indexes] test_target = iris.target[test_indexes] # train classifier classifier = tree.DecisionTreeClassifier() classifier.fit(train_data, train_target) print(test_target) print(classifier.predict(test_data))
[ "sklearn.datasets.load_iris", "numpy.delete", "sklearn.tree.DecisionTreeClassifier" ]
[((160, 171), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (169, 171), False, 'from sklearn.datasets import load_iris\n'), ((379, 421), 'numpy.delete', 'np.delete', (['iris.data', 'test_indexes'], {'axis': '(0)'}), '(iris.data, test_indexes, axis=0)\n', (388, 421), True, 'import numpy as np\n'), ((437, 473), 'numpy.delete', 'np.delete', (['iris.target', 'test_indexes'], {}), '(iris.target, test_indexes)\n', (446, 473), True, 'import numpy as np\n'), ((601, 630), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (628, 630), False, 'from sklearn import tree\n')]