content
stringlengths
0
894k
type
stringclasses
2 values
# Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """API for interacting with the buildbucket service directly. Instead of triggering jobs by emitting annotations then handled by the master, this module allows slaves to directly post requests to buildbucket. """ import json import os import uuid from recipe_engine import recipe_api class BuildbucketApi(recipe_api.RecipeApi): """A module for interacting with buildbucket.""" def __init__(self, buildername, buildnumber, *args, **kwargs): super(BuildbucketApi, self).__init__(*args, **kwargs) self._buildername = buildername self._buildnumber = buildnumber self._properties = None def get_config_defaults(self): if self.m.platform.is_win: return {'PLATFORM': 'win'} return {'PLATFORM': 'default'} def _configure_defaults(self): """Apply default configuration if no configuration has been set. Ideally whoever uses this api will explicitly set the configuration by doing `api.buildbucket.set_config('production_buildbucket')`, but to make this module usable even in case they don't configure it, we set the default to the production instance of buildbucket.""" # There's only two items in this module's configuration, the path to the # buildbucket cli client binary and the buildbucket hostname, this default # configuration will override them. if not self.c or not self.c.complete(): self.set_config('production_buildbucket') def _tags_for_build(self, bucket, parameters, override_tags=None): buildbucket_info = self.properties or {} original_tags_list = buildbucket_info.get('build', {}).get('tags', []) original_tags = dict(t.split(':', 1) for t in original_tags_list) new_tags = {'user_agent': 'recipe'} if 'buildset' in original_tags: new_tags['buildset'] = original_tags['buildset'] builder_name = parameters.get('builder_name') if builder_name: new_tags['builder'] = builder_name if bucket.startswith('master.'): new_tags['master'] = bucket[7:] if self._buildnumber is not None: new_tags['parent_buildnumber'] = str(self._buildnumber) if self._buildername is not None: new_tags['parent_buildername'] = str(self._buildername) new_tags.update(override_tags or {}) return sorted([':'.join((x, y)) for x, y in new_tags.iteritems()]) @property def properties(self): """Returns (dict-like or None): The BuildBucket properties, if present.""" if self._properties is None: # Not cached, load and deserialize from properties. props = self.m.properties.get('buildbucket') if props is not None: if isinstance(props, basestring): props = json.loads(props) self._properties = props return self._properties def put(self, builds, service_account=None, **kwargs): """Puts a batch of builds. Args: builds (list): A list of dicts, where keys are: 'bucket': (required) name of the bucket for the request. 'parameters' (dict): (required) arbitrary json-able parameters that a build system would be able to interpret. 'tags': (optional) a dict(str->str) of tags for the build. These will be added to those generated by this method and override them if appropriate. 'client_operation_id': (optional) an arbitary string, ideally random, used to prevent duplication of requests upon retry. service_account (str): (optional) path to locally saved secrets for service account to authenticate as. Returns: A step that as its .stdout property contains the response object as returned by buildbucket. """ build_specs = [] for build in builds: client_operation_id = build.get('client_operation_id', uuid.uuid4().hex) build_specs.append(json.dumps({ 'bucket': build['bucket'], 'parameters_json': json.dumps(build['parameters'], sort_keys=True), 'client_operation_id': client_operation_id, 'tags': self._tags_for_build(build['bucket'], build['parameters'], build.get('tags')) }, sort_keys=True)) return self._call_service('put', build_specs, service_account, **kwargs) def cancel_build(self, build_id, service_account=None, **kwargs): return self._call_service('cancel', [build_id], service_account, **kwargs) def get_build(self, build_id, service_account=None, **kwargs): return self._call_service('get', [build_id], service_account, **kwargs) def _call_service(self, command, args, service_account=None, **kwargs): # TODO: Deploy buildbucket client using cipd. self._configure_defaults() step_name = kwargs.pop('name', 'buildbucket.' + command) if service_account: args = ['--service-account-json', service_account] + args args = [str(self.c.buildbucket_client_path), command, '--host', self.c.buildbucket_host] + args return self.m.step( step_name, args, stdout=self.m.json.output(), **kwargs)
python
from parsers import golden_horse_parser parser = golden_horse_parser() args = parser.parse_args() REDUNDANT_INFO_LINE_NUM = 4 TRAILING_USELESS_INFO_LINE_NUM = -1 def clean_line(string, remove_trainling_position=-2): return string.replace('\t', '').split(',')[:remove_trainling_position] def main(): with open(args.input, encoding=args.encoding) as file_handle: lines = file_handle.readlines()[ REDUNDANT_INFO_LINE_NUM:TRAILING_USELESS_INFO_LINE_NUM ] cleaned_lines = [ clean_line(line) for line in lines ] with open(args.output, 'w') as file_handle: for line in cleaned_lines: file_handle.write(f'{",".join(line)}\n') if __name__ == '__main__': main()
python
from setuptools import setup setup(name='money', version='0.1', description='Implementation of Fowler\s Money', url='https://github.com/luka-mladenovic/fowlers-money', author='Luka Mladenovic', author_email='', license='MIT', packages=['money'], install_requires=[ 'pyyaml', ], zip_safe=False)
python
from . import sequence from . import sampler as sampler_trw import numpy as np import collections import copy from ..utils import get_batch_n, len_batch # this the name used for the sample UID sample_uid_name = 'sample_uid' class SequenceArray(sequence.Sequence): """ Create a sequence of batches from numpy arrays, lists and :class:`torch.Tensor` """ def __init__( self, split, sampler=sampler_trw.SamplerRandom(), transforms=None, use_advanced_indexing=True, sample_uid_name=sample_uid_name): """ Args: split: a dictionary of tensors. Tensors may be `numpy.ndarray`, `torch.Tensor`, numeric sampler: the sampler to be used to iterate through the sequence transforms: a transform or list of transforms to be applied on each batch of data use_advanced_indexing: sample_uid_name: if not `None`, create a unique UID per sample so that it is easy to track particular samples (e.g., during data augmentation) """ super().__init__(None) # there is no source sequence for this as we get our input from a numpy split self.split = split self.nb_samples = None self.sampler = sampler self.sampler_iterator = None self.transforms = transforms self.use_advanced_indexing = use_advanced_indexing # create a unique UID if sample_uid_name is not None and sample_uid_name not in split: split[sample_uid_name] = np.asarray(np.arange(len_batch(split))) def subsample(self, nb_samples): # get random indices subsample_sample = sampler_trw.SamplerRandom(batch_size=nb_samples) subsample_sample.initializer(self.split) # extract the indices indices = next(iter(subsample_sample)) subsampled_split = get_batch_n( self.split, len_batch(self.split), indices, self.transforms, # use `use_advanced_indexing` so that we keep the types as close as possible to original use_advanced_indexing=True ) return SequenceArray( subsampled_split, copy.deepcopy(self.sampler), transforms=self.transforms, use_advanced_indexing=self.use_advanced_indexing ) def subsample_uids(self, uids, uids_name, new_sampler=None): uid_values = self.split.get(uids_name) assert uid_values is not None, 'no UIDs with name={}'.format(uids_name) # find the samples that are in `uids` indices_to_keep = [] uids_set = set(uids) for index, uid in enumerate(uid_values): if uid in uids_set: indices_to_keep.append(index) # reorder the `indices_to_keep` following the `uids` ordering uids_ordering = {uid: index for index, uid in enumerate(uids)} kvp_index_ordering = [] for index in indices_to_keep: uid = uid_values[index] ordering = uids_ordering[uid] kvp_index_ordering.append((index, ordering)) kvp_uids_ordering = sorted(kvp_index_ordering, key=lambda value: value[1]) indices_to_keep = [index for index, ordering in kvp_uids_ordering] # extract the samples subsampled_split = get_batch_n( self.split, len_batch(self.split), indices_to_keep, self.transforms, # use `use_advanced_indexing` so that we keep the types as close as possible to original use_advanced_indexing=True ) if new_sampler is None: new_sampler = copy.deepcopy(self.sampler) else: new_sampler = copy.deepcopy(new_sampler) return SequenceArray( subsampled_split, new_sampler, transforms=self.transforms, use_advanced_indexing=self.use_advanced_indexing ) def __iter__(self): # make sure the sampler is copied so that we can have multiple iterators of the # same sequence return SequenceIteratorArray(self, copy.deepcopy(self.sampler)) def close(self): pass class SequenceIteratorArray(sequence.SequenceIterator): """ Iterate the elements of an :class:`trw.train.SequenceArray` sequence Assumptions: - underlying `base_sequence` doesn't change sizes while iterating """ def __init__(self, base_sequence, sampler): super().__init__() self.base_sequence = base_sequence self.nb_samples = len_batch(self.base_sequence.split) self.sampler = sampler self.sampler.initializer(self.base_sequence.split) self.sampler_iterator = iter(self.sampler) def __next__(self): indices = self.sampler_iterator.__next__() if not isinstance(indices, (np.ndarray, collections.Sequence)): indices = [indices] return get_batch_n( self.base_sequence.split, self.nb_samples, indices, self.base_sequence.transforms, self.base_sequence.use_advanced_indexing) def close(self): pass
python
from cli import * # # VTOC layout: (with unimportant fields removed) # # OFFSET SIZE NUM NAME # 0 128 1 label VTOC_VERSION = 128 # 128 4 1 version # 132 8 1 volume name VTOC_NUMPART = 140 # 140 2 1 number of partitions VTOC_PART_S2 = 142 # 142 4 8 partition headers, section 2 # 2 bytes tag # 2 bytes permission flag # 174 2 1 <pad> # 176 4 3 bootinfo VTOC_SANITY = 188 # 188 4 1 sanity # 192 4 10 <reserved> # 232 4 8 partition timestamp # 264 2 1 write reinstruct # 266 2 1 read reinstruct # 268 152 1 <pad> VTOC_RPM = 420 # 420 2 1 rpm VTOC_PHYS_CYL = 422 # 422 2 1 physical cylinders VTOC_ALT_P_CYL = 424 # 424 2 1 alternates per cylinder # 426 2 1 <obsolete> # 428 2 1 <obsolete> VTOC_INTRLV = 430 # 430 2 1 interleave VTOC_DATA_CYL = 432 # 432 2 1 data cylinders VTOC_ALT_CYL = 434 # 434 2 1 alt cylinders VTOC_HEADS = 436 # 436 2 1 heads VTOC_TRACKS = 438 # 438 2 1 sectors per track # 440 2 1 <obsolete> # 442 2 1 <obsolete> VTOC_PART_S1 = 444 # 444 8 8 partition headers, section 1 # 4 bytes start cylinder # 4 bytes number of blocks VTOC_MAGIC = 508 # 508 2 1 magic = 0xDABE VTOC_CHECKSUM = 510 # 510 2 1 checksum tag_list = { 0 : "unused", 1 : "boot", 2 : "root", 3 : "swap", 4 : "usr", 5 : "backup", 7 : "var", 8 : "home", 130 : "Linux swap", 131 : "Linux" } flag_list = { 0 : "RW", 1 : "unmountable", 2 : "RO" } def get_tag_str(tag): try: return "(" + tag_list[tag] + ")" except: return "(unknown)" def get_flag_str(flag): try: return "(" + flag_list[flag] + ")" except: return "(unknown)" def calculate_checksum(vtoc): chk = 0 for i in range(0, 510, 2): chk ^= get_vtoc_int16(vtoc, i) return chk def get_vtoc_label(vtoc): str = "" for i in vtoc: if i == 0: return str str += chr(i) def set_vtoc_label(vtoc, str): for i in range(0, len(str)): vtoc[i] = ord(str[i]) for j in range(i + 1, 512): vtoc[j] = 0 def get_vtoc_int16(vtoc, offset): return (vtoc[offset] << 8) | vtoc[offset + 1] def set_vtoc_int16(vtoc, offset, value): vtoc[offset] = (value >> 8) & 0xff vtoc[offset + 1] = value & 0xff def get_vtoc_int32(vtoc, offset): return (get_vtoc_int16(vtoc, offset) << 16) | get_vtoc_int16(vtoc, offset + 2) def set_vtoc_int32(vtoc, offset, value): set_vtoc_int16(vtoc, offset, (value >> 16) & 0xffff) set_vtoc_int16(vtoc, offset + 2, value & 0xffff) def read_block(obj, offset): if obj.classname == "scsi-disk": return list(obj.sector_data[offset * 512]) elif obj.classname == "ide-disk": block = [] for i in range(0, 512): block.append(obj.image.byte_access[offset * 512 + i]) return block else: raise Exception, "Unknown disk type" def write_block(obj, offset, block): if obj.classname == "scsi-disk": obj.sector_data[offset * 512] = block elif obj.classname == "ide-disk": for i in range(0, 512): obj.image.byte_access[offset * 512 + i] = block[i] else: raise Exception, "Unknown disk type" def print_partitions(obj, vtoc): heads = get_vtoc_int16(vtoc, VTOC_HEADS) s_per_t = get_vtoc_int16(vtoc, VTOC_TRACKS) print "Partition Table:" print "Number Tag Flag Start End Size" for i in range(0, 8): tag = get_vtoc_int16(vtoc, VTOC_PART_S2 + 4 * i + 0) flag = get_vtoc_int16(vtoc, VTOC_PART_S2 + 4 * i + 2) start = get_vtoc_int32(vtoc, VTOC_PART_S1 + 8 * i + 0) blocks = get_vtoc_int32(vtoc, VTOC_PART_S1 + 8 * i + 4) if blocks == 0: continue start *= heads * s_per_t print " %d %d %-12s %d %-13s %9d %9d %9d" % ( i, tag, get_tag_str(tag), flag, get_flag_str(flag), start, start + blocks - 1, blocks) def print_sun_vtoc_cmd(obj): vtoc = read_block(obj, 0) if get_vtoc_int16(vtoc, VTOC_MAGIC) != 0xDABE: print "This does not appear to be a Sun Disk." print "The magic is %x, expected 0xDABE" % get_vtoc_int16(vtoc, VTOC_MAGIC) print return data_cyl = get_vtoc_int16(vtoc, VTOC_DATA_CYL) phys_cyl = get_vtoc_int16(vtoc, VTOC_PHYS_CYL) heads = get_vtoc_int16(vtoc, VTOC_HEADS) s_per_t = get_vtoc_int16(vtoc, VTOC_TRACKS) print print " Label : %s" % get_vtoc_label(vtoc) print " RPM : %s" % get_vtoc_int16(vtoc, VTOC_RPM) print " Data cylinders : %d" % data_cyl print " Alt cylinders : %d" % get_vtoc_int16(vtoc, VTOC_ALT_CYL) print "Physical cylinders : %d" % phys_cyl print " Heads : %d" % heads print " Sectors per Track : %d" % s_per_t print print " Number of data blocks : %d" % (data_cyl * s_per_t * heads) print print_partitions(obj, vtoc) num_part = get_vtoc_int16(vtoc, VTOC_NUMPART) chk_sum = get_vtoc_int16(vtoc, VTOC_CHECKSUM) if num_part != 8: print print "### Illegal number of partitions set (%d), only 8 supported" % num_part if calculate_checksum(vtoc) != chk_sum: print "### Incorrect checksum: %d. Expected: %d" % (chk_sum, calculate_checksum(vtoc)) print def write_sun_vtoc_cmd(obj, C, H, S, quiet): vtoc = [0] * 512 if -1 in [C, H, S] and [C, H, S] != [-1, -1, -1]: print "Only Partial geometry specified." SIM_command_has_problem() return alt = 2 if [C, H, S] != [-1, -1, -1]: cyl = C - alt heads = H s_per_t = S elif obj.classname == "scsi-disk": print "No geometry specified for SCSI disk VTOC." SIM_command_has_problem() return elif obj.classname == "ide-disk": cyl = obj.disk_cylinders - alt heads = obj.disk_heads s_per_t = obj.disk_sectors_per_track pass else: raise Exception, "Unknown disk type" set_vtoc_label(vtoc, "SIMDISK cyl %d alt %d hd %d sec %d" % (cyl, alt, heads, s_per_t)) set_vtoc_int32(vtoc, VTOC_VERSION, 1) set_vtoc_int16(vtoc, VTOC_MAGIC, 0xDABE) set_vtoc_int16(vtoc, VTOC_DATA_CYL, cyl) set_vtoc_int16(vtoc, VTOC_ALT_CYL, alt) set_vtoc_int16(vtoc, VTOC_INTRLV, 1) set_vtoc_int16(vtoc, VTOC_PHYS_CYL, cyl + alt) set_vtoc_int16(vtoc, VTOC_HEADS, heads) set_vtoc_int16(vtoc, VTOC_TRACKS, s_per_t) set_vtoc_int16(vtoc, VTOC_NUMPART, 8) set_vtoc_int16(vtoc, VTOC_RPM, 7200) set_vtoc_int32(vtoc, VTOC_SANITY, 0x600ddeee) # set checksum last! set_vtoc_int16(vtoc, VTOC_CHECKSUM, calculate_checksum(vtoc)) write_block(obj, 0, tuple(vtoc)) # create the backup slice write_sun_vtoc_partition_cmd(obj, 2, "backup", "unmountable", 0, cyl * heads * s_per_t, 1) if not quiet and SIM_get_quiet() == 0: print "New VTOC written to disk:" print_sun_vtoc_cmd(obj) def write_sun_vtoc_partition_cmd(obj, nbr, tag_str, flag_str, start, blocks, quiet): if nbr < 0 or nbr > 7: print "Partitions are numbered 0 ..7\n" return try: tag = tag_list.keys()[tag_list.values().index(tag_str)] except: print "Unknown tag type '%s'" % tag_str print "Try one of:" for i in tag_list.values(): print " " + i print return try: flag = flag_list.keys()[flag_list.values().index(flag_str)] except: print "Unknown flag '%s'" % flag_str print "Try one of:" for i in flag_list.values(): print " " + i print return vtoc = read_block(obj, 0) heads = get_vtoc_int16(vtoc, VTOC_HEADS) s_per_t = get_vtoc_int16(vtoc, VTOC_TRACKS) set_vtoc_int16(vtoc, VTOC_PART_S2 + 4 * nbr + 0, tag) set_vtoc_int16(vtoc, VTOC_PART_S2 + 4 * nbr + 2, flag) set_vtoc_int32(vtoc, VTOC_PART_S1 + 8 * nbr + 0, start / (heads * s_per_t)) set_vtoc_int32(vtoc, VTOC_PART_S1 + 8 * nbr + 4, blocks) # set checksum last! set_vtoc_int16(vtoc, VTOC_CHECKSUM, calculate_checksum(vtoc)) write_block(obj, 0, tuple(vtoc)) if not quiet and SIM_get_quiet() == 0: print_partitions(obj, vtoc) print def delete_sun_vtoc_partition_cmd(obj, nbr, quiet): if nbr < 0 or nbr > 7: print "Partitions are numbered 0 ..7\n" return vtoc = read_block(obj, 0) set_vtoc_int16(vtoc, VTOC_PART_S2 + 4 * nbr + 0, 0) set_vtoc_int32(vtoc, VTOC_PART_S1 + 8 * nbr + 4, 0) # set checksum last! set_vtoc_int16(vtoc, VTOC_CHECKSUM, calculate_checksum(vtoc)) write_block(obj, 0, tuple(vtoc)) if not quiet and SIM_get_quiet() == 0: print_partitions(obj, vtoc) print def dump_sun_partition_cmd(obj, nbr, file): if nbr < 0 or nbr > 7: print "Partitions are numbered 0 ..7\n" return vtoc = read_block(obj, 0) heads = get_vtoc_int16(vtoc, VTOC_HEADS) s_per_t = get_vtoc_int16(vtoc, VTOC_TRACKS) start = get_vtoc_int32(vtoc, VTOC_PART_S1 + 8 * nbr) * heads * s_per_t blocks = get_vtoc_int32(vtoc, VTOC_PART_S1 + 8 * nbr + 4) if blocks == 0: print "No partition %d.\n" % nbr return print "Dumping partition %d. Start block %d. Size in blocks: %d" % (nbr, start, blocks) # index with list, since python doesn't have 4 bit indexes try: obj.image.dump[[file, start * 512, blocks * 512]] except Exception, msg: print "Failed getting a dump from the disk image." print "Error message was: %s\n" % msg return print "Partition dumped successfully.\n" def add_sun_partition_cmd(obj, nbr, file): if nbr < 0 or nbr > 7: print "Partitions are numbered 0 ..7\n" return vtoc = read_block(obj, 0) heads = get_vtoc_int16(vtoc, VTOC_HEADS) s_per_t = get_vtoc_int16(vtoc, VTOC_TRACKS) start = get_vtoc_int32(vtoc, VTOC_PART_S1 + 8 * nbr) * heads * s_per_t blocks = get_vtoc_int32(vtoc, VTOC_PART_S1 + 8 * nbr + 4) if blocks == 0: print "No partition %d.\n" % nbr return print "Adding partition %d. Start block %d. Size in blocks: %d" % (nbr, start, blocks) # index with list, since python doesn't have 4 bit indexes files = obj.image.files files += [[file, "ro", start * 512, blocks * 512]] try: obj.image.files = files except Exception, msg: print "Failed adding the diff file '%s' to image '%s'." % (file, obj.name) print "Error message was: %s\n" % msg return print "Partition added.\n" def tag_expander(string, obj): return get_completions(string, tag_list.values()) def flag_expander(string, obj): return get_completions(string, flag_list.values()) def create_sun_vtoc_commands(name): new_command("print-sun-vtoc", print_sun_vtoc_cmd, [], alias = "", type = "%s commands" % name, short = "print the VTOC for a Sun disk", namespace = "%s" % name, see_also = ["<" + "%s" % name + '>.' + 'create-sun-vtoc-header', "<" + "%s" % name + '>.' + 'create-sun-vtoc-partition', "<" + "%s" % name + '>.' + 'delete-sun-vtoc-partition'], doc = """ Print the contents of the VTOC (volume table of contents) for a Sun disk. This is similar to the Solaris 'prtvtoc' command. """, filename="/mp/simics-3.0/src/extensions/apps-python/sun_vtoc_commands.py", linenumber="317") new_command("create-sun-vtoc-header", write_sun_vtoc_cmd, [arg(int_t, "C", "?", -1), arg(int_t, "H", "?", -1), arg(int_t, "S", "?", -1), arg(flag_t, "-quiet")], alias = "", type = "%s commands" % name, short = "write a new VTOC to a Sun disk", namespace = "%s" % name, see_also = ["<" + "%s" % name + '>.' + 'print-sun-vtoc', "<" + "%s" % name + '>.' + 'create-sun-vtoc-partition', "<" + "%s" % name + '>.' + 'delete-sun-vtoc-partition'], doc = """ Create and write a new VTOC to a Sun disk. The geometry information written is taken from the configuration attribute 'geometry' of the disk, unless specified with the <b>C</b>, <b>H</b> and <b>S</b> parameters. A new empty partition table is also created, with only the standard 'backup' partition as number 2. <arg>-quiet</arg> makes the command silent in case of success. """, filename="/mp/simics-3.0/src/extensions/apps-python/sun_vtoc_commands.py", linenumber="330") new_command("create-sun-vtoc-partition", write_sun_vtoc_partition_cmd, [arg(int_t, "number"), arg(str_t, "tag", expander = tag_expander), arg(str_t, "flag", expander = flag_expander), arg(int_t, "start-block"), arg(int_t, "num-blocks"), arg(flag_t, "-quiet")], alias = "", type = "%s commands" % name, short = "write partition data in the VTOC on a Sun disk", namespace = "%s" % name, see_also = ["<" + "%s" % name + '>.' + 'print-sun-vtoc', "<" + "%s" % name + '>.' + 'create-sun-vtoc-header', "<" + "%s" % name + '>.' + 'delete-sun-vtoc-partition'], doc = """ Write partition information to the VTOC on a Sun disk. This command does not change the format of the disk, and it does not create any file system on the partition. Only the 'Volume Table Of Contents' is modified. No checking is performed to make sure that partitions do not overlap, or that they do not exceed the disk size. <arg>-quiet</arg> makes the command silent in case of success. """, filename="/mp/simics-3.0/src/extensions/apps-python/sun_vtoc_commands.py", linenumber="349") new_command("delete-sun-vtoc-partition", delete_sun_vtoc_partition_cmd, [arg(int_t, "number"), arg(flag_t, "-quiet")], alias = "", type = "%s commands" % name, short = "delete partition data from the VTOC on a Sun disk", namespace = "%s" % name, see_also = ["<" + "%s" % name + '>.' + 'print-sun-vtoc', "<" + "%s" % name + '>.' + 'create-sun-vtoc-header', "<" + "%s" % name + '>.' + 'create-sun-vtoc-partition'], doc = """ Delete the information in the VTOC on a Sun disk for the specified partition. No other modification on the disk is performed. <arg>-quiet</arg> makes the command silent in case of success. """, filename="/mp/simics-3.0/src/extensions/apps-python/sun_vtoc_commands.py", linenumber="371") new_command("dump-sun-partition", dump_sun_partition_cmd, [arg(int_t, "number"), arg(filename_t(), "file")], alias = "", type = "%s commands" % name, short = "write partition as a file", namespace = "%s" % name, see_also = ["<" + "%s" % name + '>.' + 'print-sun-vtoc', "<" + "%s" % name + '>.' + 'add-sun-partition'], doc = """ Write all data from a Sun disk partition to the specified file in raw format. """, filename="/mp/simics-3.0/src/extensions/apps-python/sun_vtoc_commands.py", linenumber="387") new_command("add-sun-partition", add_sun_partition_cmd, [arg(int_t, "number"), arg(filename_t(), "file")], alias = "", type = "%s commands" % name, short = "add partition from a file", namespace = "%s" % name, see_also = ["<" + "%s" % name + '>.' + 'dump-sun-partition'], doc = """ Adds an image or diff as a sun partition to the current disk. """, filename="/mp/simics-3.0/src/extensions/apps-python/sun_vtoc_commands.py", linenumber="399")
python
import os from pathlib import Path from setuptools import find_packages, setup def parse_req_file(fname, initial=None): """Reads requires.txt file generated by setuptools and outputs a new/updated dict of extras as keys and corresponding lists of dependencies as values. The input file's contents are similar to a `ConfigParser` file, e.g. pkg_1 pkg_2 pkg_3 [extras1] pkg_4 pkg_5 [extras2] pkg_6 pkg_7 """ reqs = {} if initial is None else initial cline = None with open(fname, "r") as f: for line in f.readlines(): line = line[:-1].strip() if len(line) == 0: continue if line[0] == "[": # Add new key for current extras (if missing in dict) cline = line[1:-1] if cline not in reqs: reqs[cline] = [] else: # Only keep dependencies from extras if cline is not None: reqs[cline].append(line) return reqs def get_version(fname): """Reads PKG-INFO file generated by setuptools and extracts the Version number.""" res = "UNK" with open(fname, "r") as f: for line in f.readlines(): line = line[:-1] if line.startswith("Version:"): res = line.replace("Version:", "").strip() break if res in ["UNK", ""]: raise ValueError(f"Missing Version number in {fname}") return res if __name__ == "__main__": base_dir = os.path.abspath(os.path.dirname(Path(__file__))) if not os.path.exists( os.path.join(base_dir, "allenact.egg-info/dependency_links.txt") ): # Build mode for sdist os.chdir(os.path.join(base_dir, "..")) with open(".VERSION", "r") as f: __version__ = f.readline().strip() # Extra dependencies for development (actually unnecessary) extras = { "dev": [ l.strip() for l in open("dev_requirements.txt", "r").readlines() if l.strip() != "" ] } else: # Install mode from sdist __version__ = get_version(os.path.join(base_dir, "allenact.egg-info/PKG-INFO")) extras = parse_req_file( os.path.join(base_dir, "allenact.egg-info/requires.txt") ) setup( name="allenact", version=__version__, description="AllenAct framework", long_description=( "AllenAct is a modular and flexible learning framework designed with" " a focus on the unique requirements of Embodied-AI research." ), classifiers=[ "Intended Audience :: Science/Research", "Development Status :: 3 - Alpha", "License :: OSI Approved :: MIT License", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Programming Language :: Python", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", ], keywords=["reinforcement learning", "embodied-AI", "AI", "RL", "SLAM"], url="https://github.com/allenai/allenact", author="Allen Institute for Artificial Intelligence", author_email="[email protected]", license="MIT", packages=find_packages(include=["allenact", "allenact.*"]), install_requires=[ "gym>=0.17.0,<0.18.0", "torch>=1.6.0,!=1.8.0,<1.9.0", "tensorboardx>=2.1", "torchvision>=0.7.0,<0.10.0", "setproctitle", "moviepy>=1.0.3", "filelock", "numpy>=1.22.2", "Pillow==9.0.1", "matplotlib>=3.3.1", "networkx==2.6", "opencv-python", "wheel>=0.36.2", ], setup_requires=["pytest-runner"], tests_require=["pytest", "pytest-cov", "compress_pickle"], entry_points={"console_scripts": ["allenact=allenact.main:main"]}, extras_require=extras, )
python
import sys from PyQt5 import QtWidgets from gui import MainWindow """ Guitario, simple chord recognizer All created MP4 files are stored in saved_accords directory """ if __name__ == '__main__': print("Loading application!") app = QtWidgets.QApplication(sys.argv) app.setApplicationName("Guitario") app.setStyle("Fusion") window = MainWindow() window.show() sys.exit(app.exec_())
python
from abc import ABC, abstractmethod class MyAbstract(ABC): def __init__(self): pass @abstractmethod def doSomething(self): pass class MyClass1(MyAbstract): def __init__(self): pass def doSomething(self): print("abstract method") def doSomethingElse(self): print("abstract method 2") c1 = MyClass1() c1.doSomething() c1.doSomethingElse()
python
""" Overrides the align-items value for specific flex items. """ from ..defaults import BREAKPOINTS, UP, DOWN, FULL, ONLY from ...core import CssModule vals = [ ('fs', 'flex-start'), ('fe', 'flex-end'), ('c', 'center'), ('b', 'baseline'), ('s', 'stretch') ] mdl = CssModule( 'Align self', [FULL], dynamic={'.as': ['align-self']}, values=vals, docstring=__doc__ )
python
#!/usr/bin/env python3 import sys try: import psycopg2 postgres = True except: import sqlite3 postgres = False if __name__ == "__main__": if len(sys.argv) != 2: print("You must supply the database name as the first argument") sys.exit() if postgres: conn = psycopg2.connect(sys.argv[1]) c = conn.cursor() c.execute("""CREATE TABLE admins ( admin_id bigserial NOT NULL, user_id integer NOT NULL, level smallint NOT NULL DEFAULT 1, CONSTRAINT admins_pkey PRIMARY KEY (admin_id), CONSTRAINT admins_user_id_key UNIQUE (user_id) );""") c.execute("""CREATE TABLE posts_per_channel ( post_id bigserial NOT NULL, replyto_id integer, channel_id integer NOT NULL, message_id integer NOT NULL, contenttype text, contenttext text, file_id text, CONSTRAINT posts_per_channel_pkey PRIMARY KEY (post_id) );""") c.execute("""CREATE TABLE channels ( channel_id bigserial NOT NULL, channelname text NOT NULL, channelurl text, CONSTRAINT channels_pkey PRIMARY KEY (channel_id) );""") else: conn = sqlite3.connect(sys.argv[1]) c = conn.cursor() c.execute("""CREATE TABLE "channels" ( `channel_id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, `channelname` TEXT NOT NULL, `channelurl` TEXT NOT NULL UNIQUE );""") c.execute("""CREATE TABLE `posts_per_channel` ( `post_id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, `replyto_id` INTEGER, `channel_id` INTEGER NOT NULL, `message_id` INTEGER NOT NULL, `contenttype` TEXT NOT NULL, `contenttext` TEXT NOT NULL, `file_id` TEXT );""") c.execute("""CREATE TABLE `admins` ( `admin_id` INTEGER NOT NULL UNIQUE, `user_id` INTEGER NOT NULL UNIQUE, `level` INTEGER NOT NULL DEFAULT 1, PRIMARY KEY(admin_id) );""") conn.commit() conn.close()
python
class Cache(object): def __init__(self, j): self.raw = j if "beforeRequest" in self.raw: self.before_request = CacheRequest(self.raw["beforeRequest"]) else: self.before_request = None if "afterRequest" in self.raw: self.after_request = CacheRequest(self.raw["afterRequest"]) else: self.after_request = None if "comment" in self.raw: self.comment = self.raw["comment"] else: self.comment = '' class CacheRequest(object): def __init__(self, j): self.raw = j if "expires" in self.raw: self.expires = CacheRequest(self.raw["expires"]) else: self.expires = None self.last_access = self.raw["lastAccess"] self.etag = self.raw["eTag"] self.hit_count = self.raw["hitCount"] if "comment" in self.raw: self.comment = self.raw["comment"] else: self.comment = ''
python
from OpenGL.GL import * from OpenGL.GL.ARB import * from OpenGL.GLU import * from OpenGL.GLUT import * from OpenGL.GLUT.special import * from OpenGL.GL.shaders import * frame_count = 0 def pre_frame(): pass def post_fram(): frame_count += 1 def disable_vsyc(): import glfw glfw.swap_interval(0) def enable_vsyc(): import glfw glfw.swap_interval(1) #return GLuint def LoadShaders(vertex_file_path,fragment_file_path): # Create the shaders VertexShaderID = glCreateShader(GL_VERTEX_SHADER) FragmentShaderID = glCreateShader(GL_FRAGMENT_SHADER) # Read the Vertex Shader code from the file VertexShaderCode = "" with open(vertex_file_path,'r') as fr: for line in fr: VertexShaderCode += line # alternatively you could use fr.readlines() and then join in to a single string FragmentShaderCode = "" with open(fragment_file_path,'r') as fr: for line in fr: FragmentShaderCode += line # alternatively you could use fr.readlines() and then join in to a single string # Compile Vertex Shader print("Compiling shader: %s"%(vertex_file_path)) glShaderSource(VertexShaderID, VertexShaderCode) glCompileShader(VertexShaderID) # Check Vertex Shader result = glGetShaderiv(VertexShaderID, GL_COMPILE_STATUS) if not result: raise RuntimeError(glGetShaderInfoLog(VertexShaderID)) # Compile Fragment Shader print("Compiling shader: %s"%(fragment_file_path)) glShaderSource(FragmentShaderID,FragmentShaderCode) glCompileShader(FragmentShaderID) # Check Fragment Shader result = glGetShaderiv(VertexShaderID, GL_COMPILE_STATUS) if not result: raise RuntimeError(glGetShaderInfoLog(FragmentShaderID)) # Link the program print("Linking program") ProgramID = glCreateProgram() glAttachShader(ProgramID, VertexShaderID) glAttachShader(ProgramID, FragmentShaderID) glLinkProgram(ProgramID) # Check the program result = glGetShaderiv(VertexShaderID, GL_COMPILE_STATUS) if not result: raise RuntimeError(glGetShaderInfoLog(ProgramID)) glDeleteShader(VertexShaderID); glDeleteShader(FragmentShaderID); return ProgramID;
python
# Create your tasks here from __future__ import absolute_import, unicode_literals from celery import shared_task """ @shared_task def hello(): print("It's a beautiful day in the neighborhood") """
python
# -*- coding: utf-8 -*- import logging import lecoresdk def handler(event, context): it = lecoresdk.IoTData() set_params = {"productKey": "YourProductKey", "deviceName": "YourDeviceName", "payload": {"LightSwitch":0}} res = it.setThingProperties(set_params) print(res) get_params = {"productKey": "YourProductKey", "deviceName": "YourDeviceName", "payload": ["LightSwitch"]} res = it.getThingProperties(get_params) print(res) pub_params = {"topic": "/topic/hello", "payload": "hello world"} it.publish(pub_params) print("publish success") get_params = {"payload": [{"home":"123"}]} res = it.getThingsWithTags(get_params) print(res) get_params = {"productKey": "YourProductKey", "deviceName": "YourDeviceName", "service":"upgrade", "payload": {"LightSwitch": 0}} res = it.callThingService(get_params) print(res) return 'hello world'
python
from pathlib import Path from cgr_gwas_qc.exceptions import GtcMagicNumberError, GtcTruncatedFileError, GtcVersionError from cgr_gwas_qc.parsers.illumina import GenotypeCalls def validate(file_name: Path): try: # Illumina's parser has a bunch of different error checks, so I am just # using those to validate the file. However, I will throw custom errors # for clarity. GenotypeCalls(file_name.as_posix()) except Exception as err: if err.args[0] == "GTC format error: bad format identifier": raise GtcMagicNumberError elif err.args[0] == "Unsupported GTC File version": raise GtcVersionError elif err.args[0] == "GTC file is incomplete": raise GtcTruncatedFileError else: raise err
python
from selenium import webdriver #import time #import unittest browser = webdriver.Chrome() browser.get('http://localhost:8000') #unittest.TestCase.assertTrue(browser.get('http://localhost:8000'),msg='OK!') assert 'The install worked successfully!' in browser.title print('pass!') browser.quit()
python
# Django imports from django.shortcuts import render from django.core.urlresolvers import reverse_lazy from django.views import generic as django_generic from django.http import HttpResponse from django.contrib import messages # 3rd Party Package imports from braces.views import LoginRequiredMixin #Lackawanna Specific imports from .models import Collection from .forms import CollectionCreationForm from datapoint.models import Datapoint # REST API related imports from rest_framework import generics as rest_generics, permissions, filters from collection.serializers import CollectionSerializer class CollectionList(rest_generics.ListAPIView): queryset = Collection.objects.all() serializer_class = CollectionSerializer filter_fields = ('project', 'owner') class CollectionReadUpdateDeleteView(rest_generics.RetrieveUpdateDestroyAPIView): queryset = Collection.objects.all() serializer_class = CollectionSerializer class CollectionListView(LoginRequiredMixin, django_generic.ListView): model = Collection class CollectionCreateView(LoginRequiredMixin, django_generic.CreateView): form_class = CollectionCreationForm template_name = 'collection/collection_create.html' success_message = "Collection created. Start adding datapoints to it using the 'Add to Collection' button in the datapoint viewer." def form_valid(self, form): form.instance.owner = self.request.user messages.success(self.request, self.success_message) return super(CollectionCreateView, self).form_valid(form) def get_success_url(self): return reverse_lazy('project:detail', kwargs={'slug': self.object.project.slug}) class CollectionUpdateView(LoginRequiredMixin, django_generic.UpdateView): model = Collection fields = ('owner', 'project', 'name', 'description',) success_url = reverse_lazy('collection:list') class CollectionDeleteView(LoginRequiredMixin, django_generic.DeleteView): model = Collection success_message = "Collection deleted successfully" def delete(self, request, *args, **kwargs): messages.success(self.request, self.success_message) return super(CollectionDeleteView, self).delete(request, *args, **kwargs) def get_success_url(self): return reverse_lazy('project:detail', kwargs={'slug': self.object.project.slug}) class CollectionDetailView(LoginRequiredMixin, django_generic.DetailView): template_name = 'collection/collection_detail.html' model = Collection def get_context_data(self, **kwargs): context = super(CollectionDetailView, self).get_context_data(**kwargs) context['datapoints'] = Datapoint.objects.filter(collections=self.get_object()) return context class CollectionSettingsView(LoginRequiredMixin, django_generic.View): template_name = 'collection/collection_settings.html' model = Collection def get_context_data(self, **kwargs): context = super(CollectionSettingsView, self).get_context_data(**kwargs) return context
python
from __future__ import absolute_import, division, print_function from .version import __version__ import __main__ try: import etelemetry etelemetry.check_available_version("incf-nidash/pynidm", __version__) except ImportError: pass
python
import sys import os sys.path.append(os.path.join(os.getcwd(), 'deep_api')) from deep_app import create_app application = create_app() if __name__ == '__main__': application.run()
python
""" 切片:定位多个元素 for number in range(开始,结束,间隔) """ message = "我是花果山水帘洞美猴王孙悟空" # 写法1:容器名[开始: 结束: 间隔] # 注意:不包含结束 print(message[2: 5: 1]) # 写法2:容器名[开始: 结束] # 注意:间隔默认为1 print(message[2: 5]) # 写法3:容器名[:结束] # 注意:开始默认为头 print(message[:5]) # 写法4:容器名[:] # 注意:结束默认为尾 print(message[:]) message = "我是花果山水帘洞美猴王孙悟空" # 水帘洞 print(message[5:8]) # 花果山水帘洞美猴王 print(message[2: -3]) # 空 print(message[1: 1]) # 是花果山水帘洞美猴王孙悟空 print(message[1: 100]) # 孙悟空 print(message[-3:]) print(message[:5]) # 特殊:空悟孙王猴美洞帘水山果花是我 print(message[::-1]) # 空孙猴洞水果是 print(message[::-2])
python
from flask import request, make_response import json from themint import app from themint.service import message_service from datatypes.exceptions import DataDoesNotMatchSchemaException @app.route('/', methods=['GET']) def index(): return "Mint OK" # TODO remove <title_number> below, as it is not used. @app.route('/titles/<title_number>', methods=['POST']) def post(title_number): try: message_service.wrap_message_for_system_of_record(request.json) #app.logger.debug("Minting new title with payload %s" % (request.json)) return make_response( json.dumps({ 'message': 'OK', 'status_code': 201 }), 201) except DataDoesNotMatchSchemaException as e: app.logger.error('Validation error with data sent to mint %s' % e.field_errors) return make_response( json.dumps({ 'error': e.field_errors }), 400) except Exception as e: app.logger.error('Error when minting new', exc_info=e) return make_response( json.dumps({ 'message': 'Error', 'status_code': 400 }), 400)
python
import json import os os.environ['GIT_PYTHON_REFRESH'] = 'quiet' from configparser import ConfigParser import lstm_model as lm from itertools import product from datetime import datetime import data_preprocess as dp from sacred import Experiment from sacred.observers import MongoObserver ex = Experiment() ex.observers.append(MongoObserver(url='mongodb://132.72.80.61/netflow_roman', db_name='netflow_roman')) conf = ConfigParser() conf.read('config.ini') @ex.config def my_config(): folder_name = conf.get('Paths', 'output_folder_name') data_file = conf.get('Paths', 'data_file') data_path = conf.get('Paths', 'data_path') output_path = conf.get('Paths', 'output_path') lstm_units = None optimizer = None loss = None epochs = None batch_size = None n_steps = None slide_len = None repetitions = None n_features = None n_steps_out = None l_rate = None timestamp = None rep = None csv_logger = None overflow_thresh = None lstm_layers = None use_mini_batches = None @ex.main def handle_netflow(data_path, data_file, folder_name, output_path, lstm_units, optimizer, loss, epochs, batch_size, n_steps, n_features, slide_len, timestamp, rep, csv_logger, l_rate, n_steps_out, overflow_thresh, lstm_layers, use_mini_batches): if not os.path.exists(output_path + folder_name + '//' + str(int(timestamp))): os.mkdir(output_path + folder_name + '//' + str(int(timestamp))) X, y, dates_X, dates_y, all_data = dp.preprocess_netflow_data([data_path + data_file], n_steps, n_steps_out, slide_len, 2, overflow_thresh) lm.lstm_classification(batch_size, epochs, folder_name, loss, lstm_units, n_steps, optimizer, output_path, rep, slide_len, timestamp, l_rate, n_steps_out, X, y, dates_X, dates_y, ex, lstm_layers, all_data, use_mini_batches) def main(folder_name, output_path, lstm_units_list, optimizers, losses, epochs_list, batch_sizes, n_steps_list, n_features_list, slide_lens, repetitions, n_steps_out, l_rates, overflow_thresholds, lstm_layers, use_mini_batches): os.environ['CUDA_VISIBLE_DEVICES'] = '1' if not os.path.exists(output_path + folder_name): os.mkdir(output_path + folder_name) prod = product(lstm_units_list, optimizers, losses, epochs_list, batch_sizes, n_steps_list, n_features_list, slide_lens, l_rates, n_steps_out, overflow_thresholds, lstm_layers) for lstm_units, optimizer, loss, epochs, batch_size, n_steps, n_features, slide_len, l_rate, \ n_steps_out, overflow_thresh, n_lstm_layers in prod: timestamp = datetime.timestamp(datetime.now()) for rep in range(repetitions): ex.run(config_updates={'lstm_units': lstm_units, 'optimizer': optimizer, 'loss': loss, 'epochs': epochs, 'batch_size': batch_size, 'n_steps': n_steps, 'n_features': n_features, 'slide_len': slide_len, 'l_rate': l_rate, 'n_steps_out': n_steps_out, 'timestamp': timestamp, 'rep': rep, 'overflow_thresh': overflow_thresh, 'lstm_layers': n_lstm_layers, 'use_mini_batches': use_mini_batches}) lm.update_results_file(batch_size, epochs, folder_name, l_rate, loss, lstm_units, n_features, n_steps, optimizer, output_path, repetitions, slide_len, timestamp, n_steps_out) if __name__ == '__main__': main(conf.get('Paths', 'output_folder_name'), conf.get('Paths', 'output_path'), json.loads(conf.get('LSTM', 'lstm_units')), json.loads(conf.get('LSTM', 'optimizer')), json.loads(conf.get('LSTM', 'loss')), json.loads(conf.get('LSTM', 'epochs')), json.loads(conf.get('LSTM', 'batch_size')), json.loads(conf.get('LSTM', 'n_steps')), json.loads(conf.get('LSTM', 'n_features')), json.loads(conf.get('LSTM', 'slide_len')), json.loads(conf.get('LSTM', 'repetitions')), json.loads(conf.get('LSTM', 'look_forward')), json.loads(conf.get('LSTM', 'l_rates')), json.loads(conf.get('LSTM', 'overflow_threshold')), json.loads(conf.get('LSTM', 'lstm_layers')), conf.get('LSTM', 'use_mini_batches'))
python
from PIL import Image import os from os.path import join import scipy.io as sio import matplotlib.pyplot as plt import numpy as np from scipy import ndimage from Network import Network from utils import plot_images , sigmoid , dsigmoid_to_dval , make_results_reproducible , make_results_random make_results_reproducible() current_dir = os.path.abspath(".") data_dir = join(current_dir, 'data') file_name = join(data_dir,"ex3data1.mat") mat_dict = sio.loadmat(file_name) # print("mat_dict.keys() : ",mat_dict.keys()) X = mat_dict["X"] # print(f"X.shape : {X.shape}") y = mat_dict["y"] # make order random so test is ok because mnist is arrange # such that each 500 samples are the same indices = np.arange(len(y)) np.random.shuffle(indices) X = X[indices] y = y[indices] m = y.size # print(f"y.shape : {y.shape}") Y = np.zeros((m,10)) # fix Y for logistic regression for row,y_sample in enumerate(y): if y_sample == 10: # digit 0 is marked as 10 in y Y[row,0]=1 else: # digit 1-9 are marked as is y Y[row,y_sample]=1 def plot_image(ax , sample,_X,_y): image = _X[sample].reshape(20,20) ax.set_title(f'image of X[{sample}] , y[{sample}][0] : {_y[sample][0]} ') ax.imshow(image, cmap='gray') def plots(_X,_y): _ , axs = plt.subplots(2,2) # pick a sample to plot plot_image(axs[0,1],4300,_X,_y) sample = 10 plot_image(axs[0,0],sample,_X,_y) axs[1,0].set_title(f'X[{sample}]') axs[1,0].grid() axs[1,0].plot(_X[sample],'o') axs[1,1].set_title('y') axs[1,1].plot(_y,'o') plt.show() def compute_success_percentage(net,_X,_Y): count_correct=0 error_indecis = [] i_sample=0 for x_sample , y_sample_fixed in zip(_X,_Y): h = net.feedforward(x_sample) i_max = np.argmax(h) # index of max probability if y_sample_fixed[i_max] == 1: count_correct += 1 else: error_indecis.append(i_sample) i_sample += 1 return (100*count_correct/len(_Y) , error_indecis) def learn_nn(_X,_Y): net = Network([400, 30 , 10],sigmoid , dsigmoid_to_dval) epochs = 20 test_samples_percentage = 20 test_samples = int(m * (test_samples_percentage / 100)) traning_samples = m - test_samples training_data = [(x_sample.reshape(x_sample.size,1),y_sample.reshape(y_sample.size,1)) for x_sample , y_sample in zip(_X[:traning_samples,:],_Y[:traning_samples,:])] mini_batch_size = 1 learning_rate = 1 net.SGD(training_data, epochs, mini_batch_size, learning_rate) (correct_test_percentage , error_test_indices) = \ compute_success_percentage(net,_X[-test_samples:,:],_Y[-test_samples:,:]) (correct_training_percentage , error_training_indices) = \ compute_success_percentage(net,_X[:traning_samples,:],_Y[:traning_samples,:]) return ((correct_test_percentage,error_test_indices) , \ (correct_training_percentage,error_training_indices)) def learning_curves_engine(samples_vec): correct_trainings = [] correct_tests = [] for samples in samples_vec: ((correct_test_percentage ,_),(correct_training_percentage, _)) = \ learn_nn(X[:samples,:],Y[:samples,:]) correct_trainings.append(100 - correct_training_percentage) correct_tests.append(100 - correct_test_percentage) return (correct_trainings , correct_tests) def learning_curves(): make_results_random() # it is a must loops_for_mean = 5 samples_vec = [50 , 75, 100 , 200 , 500, 1000, 2000,5000] np_correct_trainings = np.array([]) np_correct_tests = np.array([]) _ , (ax1, ax2 , ax3) = plt.subplots(3) for i in range(loops_for_mean): print(f"\n********* loop : {i+1} ***************\n") correct_trainings , correct_tests = learning_curves_engine(samples_vec) np_correct_trainings = np.append(np_correct_trainings,correct_trainings) np_correct_tests = np.append(np_correct_tests,correct_tests) ax1.plot(samples_vec,correct_tests) ax1.set_title("test error [%]") ax2.plot(samples_vec,correct_trainings) ax2.set_title("traing error [%]") np_correct_trainings = np_correct_trainings.reshape((loops_for_mean,len(samples_vec))) np_correct_tests = np_correct_tests.reshape((loops_for_mean,len(samples_vec))) ax3.plot(samples_vec,np_correct_trainings.mean(axis=0),'x') ax3.plot(samples_vec,np_correct_tests.mean(axis=0),'o') ax3.set_title("mean error [%] . training - x , test - o") plt.tight_layout() plt.show() make_results_reproducible() # outside of this function i want reproducible def get_samples_to_show(_indices , _images_in_row , _max_images_to_show): possible_images = int(len(_indices) / _images_in_row) * _images_in_row return min(possible_images , _max_images_to_show) def learn(show_error_images=False): _ , (ax1,ax2) = plt.subplots(2,1) ((correct_test_percentage,error_test_indices) , \ (correct_training_percentage,error_training_indices)) = learn_nn(X,Y) print(f"percentage of correct estimations test : {correct_test_percentage}") print(f"percentage of correct estimations training : {correct_training_percentage}") if show_error_images: images_in_row = 20 max_images_to_show = 100 image_height = 20 image_width = 20 show_training = get_samples_to_show(error_training_indices ,\ images_in_row , max_images_to_show) show_test = get_samples_to_show(error_test_indices , \ images_in_row , max_images_to_show) plot_images(ax1 ,images_in_row,image_height, \ image_width, error_training_indices[:show_training],X,y) ax1.set_title(f"training error images. total error images : {len(error_training_indices)}") plot_images(ax2 ,images_in_row,image_height, \ image_width, error_test_indices[:show_test],X,y) ax2.set_title(f"test error images. total error images : {len(error_test_indices)}") plt.show() # plots(X,Y) learn(True) # learning_curves()
python
import json import re import os import pytest import requests import pytz import datetime as dt import connaisseur.trust_data import connaisseur.notary_api as notary_api from connaisseur.image import Image from connaisseur.tuf_role import TUFRole from connaisseur.exceptions import BaseConnaisseurException @pytest.fixture def napi(monkeypatch): monkeypatch.setenv("IS_ACR", "0") monkeypatch.setenv("SELFSIGNED_NOTARY", "1") return notary_api @pytest.fixture def acrapi(monkeypatch): monkeypatch.setenv("IS_ACR", "1") monkeypatch.setenv("SELFSIGNED_NOTARY", "1") return notary_api @pytest.fixture def mock_request(monkeypatch): class MockResponse: content: dict headers: dict status_code: int = 200 def __init__(self, content: dict, headers: dict = None, status_code: int = 200): self.content = content self.headers = headers self.status_code = status_code def raise_for_status(self): pass def json(self): return self.content def mock_get_request(**kwargs): regex = ( r"https:\/\/([^\/]+)\/v2\/([^\/]+)\/([^\/]+\/)?" r"([^\/]+)\/_trust\/tuf\/(.+)\.json" ) m = re.search(regex, kwargs["url"]) if m: host, registry, repo, image, role = ( m.group(1), m.group(2), m.group(3), m.group(4), m.group(5), ) if "unhealthy" in kwargs["url"]: return MockResponse({}, status_code=500) if "health" in kwargs["url"]: return MockResponse(None) if "azurecr.io" in kwargs["url"]: return MockResponse({"access_token": "d.e.f"}) if "token" in kwargs["url"]: auth = kwargs.get("auth") if "bad" in kwargs["url"]: if "no" in kwargs["url"]: return MockResponse({"nay": "butwhy"}) if "aint" in kwargs["url"]: return MockResponse({}, status_code=500) return MockResponse({"token": "token"}) elif auth: return MockResponse({"token": f"BA.{auth.username}.{auth.password}a"}) return MockResponse({"token": "no.BA.no"}) elif registry == "auth.io" and not kwargs.get("headers"): return MockResponse( {}, { "Www-Authenticate": ( 'Bearer realm="https://core.harbor.domain/service/' 'token",service="harbor-notary",scope="repository:' 'core.harbor.domain/connaisseur/sample-image:pull"' ) }, 401, ) elif registry == "empty.io": return MockResponse({}, status_code=404) else: with open(f"tests/data/{image}/{role}.json", "r") as file: file_content = json.load(file) return MockResponse(file_content) monkeypatch.setattr(requests, "get", mock_get_request) @pytest.fixture def mock_trust_data(monkeypatch): def validate_expiry(self): pass def trust_init(self, data: dict, role: str): self.schema_path = "res/targets_schema.json" self.kind = role self._validate_schema(data) self.signed = data["signed"] self.signatures = data["signatures"] monkeypatch.setattr( connaisseur.trust_data.TrustData, "validate_expiry", validate_expiry ) monkeypatch.setattr(connaisseur.trust_data.TargetsData, "__init__", trust_init) connaisseur.trust_data.TrustData.schema_path = "res/{}_schema.json" def trust_data(path: str): with open(path, "r") as file: return json.load(file) @pytest.mark.parametrize( "host, out", [("host", True), ("", False), ("https://unhealthy.registry", False)] ) def test_health_check(napi, mock_request, host: str, out: bool): assert napi.health_check(host) == out @pytest.mark.parametrize( "host, out", [("host", True), ("", False), ("https://unhealthy.registry", True)] ) def test_health_check_acr(acrapi, mock_request, host: str, out: bool): assert acrapi.health_check(host) == out @pytest.mark.parametrize("slfsig, out", [("1", True), ("0", False), ("", False)]) def test_is_notary_selfsigned(napi, slfsig: str, out: bool, monkeypatch): monkeypatch.setenv("SELFSIGNED_NOTARY", slfsig) assert napi.is_notary_selfsigned() == out @pytest.mark.parametrize( "image, role, out", [ ("alice-image:tag", "root", trust_data("tests/data/alice-image/root.json")), ( "alice-image:tag", "targets", trust_data("tests/data/alice-image/targets.json"), ), ( "alice-image:tag", "targets/phbelitz", trust_data("tests/data/alice-image/targets/phbelitz.json"), ), ( "auth.io/sample-image:tag", "targets", trust_data("tests/data/sample-image/targets.json"), ), ], ) def test_get_trust_data( napi, mock_request, mock_trust_data, image: str, role: str, out: dict ): trust_data_ = napi.get_trust_data("host", Image(image), TUFRole(role)) assert trust_data_.signed == out["signed"] assert trust_data_.signatures == out["signatures"] def test_get_trust_data_error(napi, mock_request, mock_trust_data): with pytest.raises(BaseConnaisseurException) as err: napi.get_trust_data("host", Image("empty.io/image:tag"), TUFRole("targets")) assert 'no trust data for image "empty.io/image:tag".' in str(err.value) def test_parse_auth(napi): header = ( 'Bearer realm="https://core.harbor.domain/service/token",' 'service="harbor-notary",scope="repository:core.harbor.domain/' 'connaisseur/sample-image:pull"' ) url = ( "https://core.harbor.domain/service/token?service=harbor-notary" "&scope=repository:core.harbor.domain/connaisseur/sample-image:pull" ) assert napi.parse_auth(header) == url @pytest.mark.parametrize( "header, error", [ ( 'Basic realm="https://mordor.de",scope="conquer"', "unsupported authentication type for getting trust data.", ), ( 'Super realm="https://super.de",service="toll"', "unsupported authentication type for getting trust data.", ), ( 'Bearer realmm="https://auth.server.com",service="auth"', "could not find any realm in authentication header.", ), ( 'Bearer realm="http://auth.server.com",service="auth"', "authentication through insecure channel.", ), ( 'Bearer realm="https://exam.pl/path/../traversal.key",service="no"', "potential path traversal.", ), ], ) def test_parse_auth_error(napi, header: str, error: str): with pytest.raises(BaseConnaisseurException) as err: napi.parse_auth(header) assert error in str(err.value) @pytest.mark.parametrize( "user, password, out", [ (None, None, "no.BA.no"), (None, "password123", "no.BA.no"), ("myname", "password456", "BA.myname.password456a"), ("myname", None, "BA.myname.a"), ], ) def test_get_auth_token(napi, mock_request, monkeypatch, user, password, out): if user: monkeypatch.setenv("NOTARY_USER", user) if password is not None: monkeypatch.setenv("NOTARY_PASS", password) url = "https://auth.server.good/token/very/good" assert napi.get_auth_token(url) == out def test_get_auth_token_acr(acrapi, mock_request): url = "https://myregistry.azurecr.io/auth/oauth2?scope=someId" assert acrapi.get_auth_token(url) == "d.e.f" @pytest.mark.parametrize( "url, error", [ ( "https://auth.server.bad/token/very/bad/very", "authentication token has wrong format.", ), ( "https://auth.server.bad/token/no/token", "no token in authentication server response.", ), ( "https://auth.server.bad/token/it/aint/there/token", "unable to get auth token, likely because of missing trust data.", ), ( "https://myregistry.azurecr.io/auth/oauth2?scope=someId", "no token in authentication server response.", ), ], ) def test_get_auth_token_error(napi, mock_request, url: str, error: str): with pytest.raises(BaseConnaisseurException) as err: napi.get_auth_token(url) assert error in str(err.value) @pytest.mark.parametrize( "url, error", [ ( "https://auth.server.bad/token/very/bad/very", "no token in authentication server response.", ), ( "https://auth.server.good/token/very/good", "no token in authentication server response.", ), ], ) def test_get_auth_token_error_acr(acrapi, mock_request, url: str, error: str): with pytest.raises(BaseConnaisseurException) as err: acrapi.get_auth_token(url) assert error in str(err.value)
python
def identidade(n): I = [[0 for x in range(n)] for y in range(n)] for i in range(0,n): I[i][i] = 1 return I def transposta(mA): #transposta n = len(mA) mT = identidade(n) for i in range(n): for j in range(n): mT[i][j] = mA[j][i] print("Matriz Transposta : ") for x in mT: print(*x, sep=" ") return mT def inversa(A, arred = 0): n = len(A) inversa = identidade(n) indices = list(range(n)) # Auxiliar no loop "for" #print(indices) for fd in range(n): # fd serve para focar na diagonal fdScaler = 1.0 / A[fd][fd] # 1º: Reduz a matriz A aplicando as operações na inversa for j in range(n): # j analisa as colunas A[fd][j] *= fdScaler inversa[fd][j] *= fdScaler # 2º: Operando todas as linhas exceto alinha fd for i in indices[0:fd] + indices[fd+1:]: # Pular a linha fd crScaler = A[i][fd] # crScaler = Índice para escalonar as linhas atuais for j in range(n): # cr - crScaler * fdRow A[i][j] = A[i][j] - crScaler * A[fd][j] inversa[i][j] = inversa[i][j] - crScaler * inversa[fd][j] if arred == 1: for i in range (n): for j in range(n): inversa[i][j] = int(inversa[i][j]) print("Matriz Inversa : ") for x in inversa: print(*x, sep=" ") return inversa M = [[12,3,1],[8,4,3],[1,1,1]] t = transposta(M) inv = inversa(M, 1)
python
# Copyright 2019 The FastEstimator Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import os import re from fastestimator.summary import Summary from fastestimator.util.loader import PathLoader from fastestimator.util.util import strip_suffix from fastestimator.summary.logs import visualize_logs def _parse_file(file_path, file_extension): """A function which will parse log files into a dictionary of metrics Args: file_path (str): The path to a log file file_extension (str): The extension of the log file Returns: An experiment summarizing the given log file """ # TODO: need to handle multi-line output like confusion matrix experiment = Summary(strip_suffix(os.path.split(file_path)[1].strip(), file_extension)) with open(file_path) as file: for line in file: mode = None if line.startswith("FastEstimator-Train"): mode = "train" elif line.startswith("FastEstimator-Eval"): mode = "eval" if mode is None: continue parsed_line = re.findall(r"([^:^;\s]+):[\s]*([-]?[0-9]+[.]?[0-9]*);", line) step = parsed_line[0] assert step[0] == "step", \ "Log file (%s) seems to be missing step information, or step is not listed first" % file for metric in parsed_line[1:]: experiment.history[mode][metric[0]].update({int(step[1]): float(metric[1])}) return experiment def parse_log_files(file_paths, log_extension='.txt', smooth_factor=0, save=False, save_path=None, ignore_metrics=None, share_legend=True, pretty_names=False): """A function which will iterate through the given log file paths, parse them to extract metrics, remove any metrics which are blacklisted, and then pass the necessary information on the graphing function Args: file_paths: A list of paths to various log files log_extension: The extension of the log files smooth_factor: A non-negative float representing the magnitude of gaussian smoothing to apply (zero for none) save: Whether to save (true) or display (false) the generated graph save_path: Where to save the image if save is true. Defaults to dir_path if not provided ignore_metrics: Any metrics within the log files which will not be visualized share_legend: Whether to have one legend across all graphs (true) or one legend per graph (false) pretty_names: Whether to modify the metric names in graph titles (true) or leave them alone (false) Returns: None """ if file_paths is None or len(file_paths) < 1: raise AssertionError("must provide at least one log file") if save and save_path is None: save_path = file_paths[0] experiments = [] for file_path in file_paths: experiments.append(_parse_file(file_path, log_extension)) visualize_logs(experiments, save_path=save_path, smooth_factor=smooth_factor, share_legend=share_legend, pretty_names=pretty_names, ignore_metrics=ignore_metrics) def parse_log_dir(dir_path, log_extension='.txt', recursive_search=False, smooth_factor=1, save=False, save_path=None, ignore_metrics=None, share_legend=True, pretty_names=False): """A function which will gather all log files within a given folder and pass them along for visualization Args: dir_path: The path to a directory containing log files log_extension: The extension of the log files recursive_search: Whether to recursively search sub-directories for log files smooth_factor: A non-negative float representing the magnitude of gaussian smoothing to apply(zero for none) save: Whether to save (true) or display (false) the generated graph save_path: Where to save the image if save is true. Defaults to dir_path if not provided ignore_metrics: Any metrics within the log files which will not be visualized share_legend: Whether to have one legend across all graphs (true) or one legend per graph (false) pretty_names: Whether to modify the metric names in graph titles (true) or leave them alone (false) Returns: None """ loader = PathLoader(dir_path, input_extension=log_extension, recursive_search=recursive_search) file_paths = [x[0] for x in loader.path_pairs] parse_log_files(file_paths, log_extension, smooth_factor, save, save_path, ignore_metrics, share_legend, pretty_names)
python
from datetime import datetime, timedelta import pytest from api.models.timetables import Timetable from fastapi import status from fastapi.testclient import TestClient pytestmark = pytest.mark.asyncio @pytest.fixture def timetable2(timetable): return Timetable( id=1, action="on", start=datetime.now(tz=None), duration=timedelta(minutes=3), repeat=timedelta(weeks=1) ) @pytest.fixture def modified_timetable(timetable): return Timetable( id=11, action="on", start=datetime.now(tz=None), duration=timedelta(minutes=6), repeat=timedelta(weeks=7) ) class TestRouteTimetable: async def test_add_timetables(self, client: TestClient, timetable2: Timetable): # We need to use content=timetable.json() because datetime is not json serializable # but pydantic can serialize it. responseAdd = await client.post("/timetables", content=timetable2.json()) timetableAdd = Timetable(**responseAdd.json()) assert responseAdd.status_code == status.HTTP_200_OK responseGet = await client.get(f"/timetables/{timetable2.id}") timetableGet = Timetable(**responseGet.json()) assert timetableAdd == timetableGet async def test_get_timetables_id(self, client: TestClient, timetable: Timetable): response = await client.get(f"/timetables/{timetable.id}") assert response.status_code == status.HTTP_200_OK assert timetable == Timetable(**response.json()) response = await client.get("/timetables/666") assert response.status_code == status.HTTP_404_NOT_FOUND async def test_edit_a_timetable(self, client: TestClient, timetable: Timetable, modified_timetable: Timetable): response = await client.put(f"/timetables/{timetable.id}", content=modified_timetable.json()) assert modified_timetable == Timetable(**response.json()) assert timetable != Timetable(**response.json()) response = await client.get(f"/timetables/{response.json()['id']}") assert modified_timetable == Timetable(**response.json()) response = await client.put("/timetables/10", content=modified_timetable.json()) assert response.status_code == status.HTTP_404_NOT_FOUND async def test_delete_timetable(self, client: TestClient, timetable: Timetable): response = await client.delete(f"/timetables/{timetable.id}") assert response.status_code == status.HTTP_200_OK response = await client.get(f"/timetables/{timetable.id}") assert response.status_code == status.HTTP_404_NOT_FOUND response = await client.delete(f"/timetables/{timetable.id}") assert response.status_code == status.HTTP_404_NOT_FOUND async def test_get_timetables(self, client: TestClient, timetable: Timetable): response = await client.get("/timetables") assert response.status_code == status.HTTP_200_OK assert len(response.json()) == 1
python
import enum from typing import Optional from sqlalchemy import ( BigInteger, Boolean, Column, DateTime, Enum, Float, ForeignKey, ForeignKeyConstraint, Index, Integer, String, UnicodeText, func ) from sqlalchemy.orm import relationship from .database import Base UNKNOWN_OWNER = "Unknown" HOUSING_DEVAL_FACTOR = 0.0042 class EventType(enum.Enum): HOUSING_WARD_INFO = "HOUSING_WARD_INFO" # LAND_UPDATE (house sold, reloed, autodemoed, etc) # https://github.com/SapphireServer/Sapphire/blob/master/src/common/Network/PacketDef/Zone/ServerZoneDef.h#L1888 # https://github.com/SapphireServer/Sapphire/blob/master/src/world/Manager/HousingMgr.cpp#L365 # LAND_SET_INITIALIZE (sent on zonein) # https://github.com/SapphireServer/Sapphire/blob/master/src/common/Network/PacketDef/Zone/ServerZoneDef.h#L1943 # https://github.com/SapphireServer/Sapphire/blob/master/src/world/Territory/HousingZone.cpp#L197 # LAND_SET_MAP (sent on zonein, after init, probably the useful one) # https://github.com/SapphireServer/Sapphire/blob/master/src/common/Network/PacketDef/Zone/ServerZoneDef.h#L1929 # https://github.com/SapphireServer/Sapphire/blob/master/src/world/Territory/HousingZone.cpp#L154 # other packets: # LAND_INFO_SIGN (view placard on owned house) - probably not useful, if we get this we already got a LAND_SET_MAP # and if the ward changed since then, we got a LAND_UPDATE # LAND_PRICE_UPDATE (view placard on unowned house) - similar to above, plus spammy if someone is buying a house # ==== Table defs ==== class Sweeper(Base): __tablename__ = "sweepers" id = Column(BigInteger, primary_key=True) name = Column(String) world_id = Column(Integer, ForeignKey("worlds.id")) last_seen = Column(DateTime, nullable=True, server_default=func.now(), onupdate=func.now()) world = relationship("World", back_populates="sweepers") events = relationship("Event", back_populates="sweeper") class World(Base): __tablename__ = "worlds" id = Column(Integer, primary_key=True) name = Column(String, index=True) sweepers = relationship("Sweeper", back_populates="world") class District(Base): __tablename__ = "districts" id = Column(Integer, primary_key=True) # territoryTypeId name = Column(String, unique=True) land_set_id = Column(Integer, unique=True, index=True) class PlotInfo(Base): __tablename__ = "plotinfo" territory_type_id = Column(Integer, ForeignKey("districts.id"), primary_key=True) plot_number = Column(Integer, primary_key=True) house_size = Column(Integer) house_base_price = Column(Integer) district = relationship("District", viewonly=True) class PlotState(Base): __tablename__ = "plot_states" __table_args__ = ( ForeignKeyConstraint( ("territory_type_id", "plot_number"), ("plotinfo.territory_type_id", "plotinfo.plot_number") ), ) id = Column(Integer, primary_key=True) world_id = Column(Integer, ForeignKey("worlds.id")) territory_type_id = Column(Integer, ForeignKey("districts.id")) ward_number = Column(Integer) plot_number = Column(Integer) last_seen = Column(Float) # UNIX seconds first_seen = Column(Float) is_owned = Column(Boolean) last_seen_price = Column(Integer, nullable=True) # null for unknown price owner_name = Column(String, nullable=True) # "Unknown" for unknown owner (UNKNOWN_OWNER), used to build relo graph is_fcfs = Column(Boolean) lotto_entries = Column(Integer, nullable=True) # null if the plot is FCFS world = relationship("World", viewonly=True) district = relationship("District", viewonly=True) plot_info = relationship("PlotInfo", viewonly=True) @property def num_devals(self) -> Optional[int]: # todo(6.1): delete me """ Returns the number of price this house has devalued. If the price is unknown, returns None. If price>max, returns 0. """ if self.last_seen_price is None: return None max_price = self.plot_info.house_base_price if self.last_seen_price >= max_price: return 0 return round((max_price - self.last_seen_price) / (HOUSING_DEVAL_FACTOR * max_price)) # common query indices Index( "ix_plot_states_loc_last_seen_desc", # these 4 make up the plot state's unique location PlotState.world_id, PlotState.territory_type_id, PlotState.ward_number, PlotState.plot_number, # and this is for convenience PlotState.last_seen.desc() ) Index("ix_plot_states_last_seen_desc", PlotState.last_seen.desc()) # store of all ingested events for later analysis (e.g. FC/player ownership, relocation/resell graphs, etc) class Event(Base): __tablename__ = "events" id = Column(Integer, primary_key=True) sweeper_id = Column(BigInteger, ForeignKey("sweepers.id", ondelete="SET NULL"), nullable=True, index=True) timestamp = Column(Float, index=True) event_type = Column(Enum(EventType), index=True) data = Column(UnicodeText) sweeper = relationship("Sweeper", back_populates="events")
python
#!/usr/local/bin/python3 import os import re import sys import argparse import plistlib import json def modifyPbxproj(): data = '' flag = False end = False with open(filePath, 'r') as file: for line in file.readlines(): if not end: find = line.find('3B02599D20F49A43001F9C82 /* Debug */') if find != -1: flag = True if flag and re.search('PRODUCT_BUNDLE_IDENTIFIER', line): line = line.replace('quanbin.jin-test.sharkORMDemo', 'quanbin.jin-test.Demo') end = True data += line with open(filePath, 'w') as file: file.writelines(data) # modify display name, version and build in info.plist file def modifyInfoPlist (displayName, version, build): plistPath = os.path.join(filePath, 'Butler/ButlerForRemain/ButlerForRemain-Info.plist') with open(plistPath, 'rb') as fp: plist = plistlib.load(fp) plist['CFBundleVersion'] = build plist['CFBundleDisplayName'] = displayName plist['CFBundleShortVersionString'] = version with open(plistPath, 'wb') as fp: plistlib.dump(plist, fp) # 解析JSON文件, 验证数据完整性 def jsonParser(filePath): with open(filePath) as fp: jsonObj = json.load(fp) try: jsonObj["requestURL"] jsonObj["version"] jsonObj["build"] jsonObj["displayName"] except KeyError as undefinedKey: print(str(undefinedKey) + ' missed') exit(0) return jsonObj def setRequestBaseURL(baseURL): with open as target: pass pass if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('filePath', help='工程根目录') filePath = parser.parse_args().filePath # modifyInfoPlist('物管APP', '1.9.2_A1', '2') config = jsonParser('/Users/remain/Desktop/pythonTest/jsonFile') exit(0)
python
# coding: utf-8 # # Dogs-vs-cats classification with ViT # # In this notebook, we'll finetune a [Vision Transformer] # (https://arxiv.org/abs/2010.11929) (ViT) to classify images of dogs # from images of cats using TensorFlow 2 / Keras and HuggingFace's # [Transformers](https://github.com/huggingface/transformers). # # **Note that using a GPU with this notebook is highly recommended.** # # First, the needed imports. from transformers import __version__ as transformers_version from transformers.utils import check_min_version check_min_version("4.13.0.dev0") from transformers import ViTFeatureExtractor, TFViTForImageClassification import tensorflow as tf from tensorflow.keras.utils import plot_model from tensorflow.keras.callbacks import TensorBoard from PIL import Image import os, sys, datetime import pathlib import numpy as np print('Using TensorFlow version:', tf.__version__, 'Keras version:', tf.keras.__version__, 'Transformers version:', transformers_version) # ## Data if 'DATADIR' in os.environ: DATADIR = os.environ['DATADIR'] else: DATADIR = "/scratch/project_2005299/data/" print('Using DATADIR', DATADIR) datapath = os.path.join(DATADIR, "dogs-vs-cats/train-2000/") assert os.path.exists(datapath), "Data not found at "+datapath # The training dataset consists of 2000 images of dogs and cats, split # in half. In addition, the validation set and test set consists of # 1000 and 22000 images, respectively. nimages = {'train':2000, 'validation':1000, 'test':22000} # ### Image paths and labels def get_paths(dataset): data_root = pathlib.Path(datapath+dataset) image_paths = list(data_root.glob('*/*')) image_paths = [str(path) for path in image_paths] image_count = len(image_paths) assert image_count == nimages[dataset], \ "Found {} images, expected {}".format(image_count, nimages[dataset]) return image_paths image_paths = dict() image_paths['train'] = get_paths('train') image_paths['validation'] = get_paths('validation') image_paths['test'] = get_paths('test') label_names = sorted(item.name for item in pathlib.Path(datapath+'train').glob('*/') if item.is_dir()) label_to_index = dict((name, index) for index,name in enumerate(label_names)) def get_labels(dataset): return [label_to_index[pathlib.Path(path).parent.name] for path in image_paths[dataset]] image_labels = dict() image_labels['train'] = get_labels('train') image_labels['validation'] = get_labels('validation') image_labels['test'] = get_labels('test') # ### Data loading # # First we specify the pre-trained ViT model we are going to use. The # model ["google/vit-base-patch16-224"] # (https://huggingface.co/google/vit-base-patch16-224) is pre-trained # on ImageNet-21k (14 million images, 21,843 classes) at resolution # 224x224, and fine-tuned on ImageNet 2012 (1 million images, 1,000 # classes) at resolution 224x224. # # We'll use a pre-trained ViT feature extractor that matches the ViT # model to preprocess the input images. VITMODEL = 'google/vit-base-patch16-224' feature_extractor = ViTFeatureExtractor.from_pretrained(VITMODEL) # Next we define functions to load and preprocess the images: def _load_and_process_image(path, label): img = Image.open(path.numpy()).convert("RGB") proc_img = feature_extractor(images=img, return_tensors="np")['pixel_values'] return np.squeeze(proc_img), label def load_and_process_image(path, label): image, label = tf.py_function(_load_and_process_image, (path, label), (tf.float32, tf.int32)) image.set_shape([None, None, None]) label.set_shape([]) return image, label # ### TF Datasets # # Let's now define our TF Datasets for training and validation data. BATCH_SIZE = 32 dataset_train = tf.data.Dataset.from_tensor_slices((image_paths['train'], image_labels['train'])) dataset_train = dataset_train.map(load_and_process_image, num_parallel_calls=tf.data.AUTOTUNE) dataset_train = dataset_train.shuffle(len(dataset_train)).batch( BATCH_SIZE, drop_remainder=True) dataset_validation = tf.data.Dataset.from_tensor_slices( (image_paths['validation'], image_labels['validation'])) dataset_validation = dataset_validation.map(load_and_process_image, num_parallel_calls=tf.data.AUTOTUNE) dataset_validation = dataset_validation.batch(BATCH_SIZE, drop_remainder=True) # ## Model # # ### Initialization model = TFViTForImageClassification.from_pretrained( VITMODEL, num_labels=1, ignore_mismatched_sizes=True) LR = 1e-5 optimizer = tf.keras.optimizers.Adam(learning_rate=LR) loss = tf.keras.losses.BinaryCrossentropy(from_logits=False) metric = 'accuracy' model.compile(optimizer=optimizer, loss=loss, metrics=[metric]) print(model.summary()) # ### Learning logdir = os.path.join( os.getcwd(), "logs", "dvc-vit-"+datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')) print('TensorBoard log directory:', logdir) os.makedirs(logdir) callbacks = [TensorBoard(log_dir=logdir)] EPOCHS = 4 history = model.fit(dataset_train, validation_data=dataset_validation, epochs=EPOCHS, verbose=2, callbacks=callbacks) # ### Inference # # We now evaluate the model using the test set. First we'll define the # TF Dataset for the test images. dataset_test = tf.data.Dataset.from_tensor_slices((image_paths['test'], image_labels['test'])) dataset_test = dataset_test.map(load_and_process_image, num_parallel_calls=tf.data.AUTOTUNE) dataset_test = dataset_test.batch(BATCH_SIZE, drop_remainder=False) scores = model.evaluate(dataset_test, verbose=2) print("Test set %s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
python
################################################################################# # Autor: Richard Alexander Cordova Herrera # TRABAJO FIN DE MASTER # CURSO 2019-2020 # MASTER EN INTERNET DE LAS COSAS # FACULTAD DE INFORMATICA # UNIVERSIDAD COMPLUTENSE DE MADRID ################################################################################# ################################################################################# # Importa librerias necesarias para el funcionamiento de la aplicacion import csv import pymongo from datetime import datetime from datetime import timedelta ################################################################################# ################################################################################# # Inicio - Definicion Funciones ################################################################################# # Funcion dataGrap: Genera un pydictionary def dataGraph(dateStart, dateEnd, dataAcc, dataGyr): dataAccAux = [] dataGyrAux = [] dataAccAux.append({'time' : dateStart, 'sensorAccX' : dataAcc[0], 'sensorAccY' : dataAcc[1], 'sensorAccZ' : dataAcc[2]}) dataAccAux.append({'time' : dateEnd, 'sensorAccX' : dataAcc[0], 'sensorAccY' : dataAcc[1], 'sensorAccZ' : dataAcc[2]}) dataGyrAux.append({'time' : dateStart, 'sensorGyrX' : dataGyr[0], 'sensorGyrY' : dataGyr[1], 'sensorGyrZ' : dataGyr[2]}) dataGyrAux.append({'time' : dateEnd, 'sensorGyrX' : dataGyr[0], 'sensorGyrY' : dataGyr[1], 'sensorGyrZ' : dataGyr[2]}) return dataAccAux, dataGyrAux ################################################################################# ################################################################################# # Inicio Programa General # Path General, archivos del codigo globalPath = "/home/tfm-iot/Documentos/TFM/Ejecutables/" # Bandera Inicio de la aplicacion print("Incio Script: " + str(datetime.now())) ################################################################################# ################################################################################# # Abrir archivo configuracion parametros de busqueda archivo = open(globalPath + "TFMMIoTIgnDataSearch.txt", 'r') dateParameters = archivo.read() archivo.close() if len(dateParameters) == 0 : dateStart = 1577461660762 dateEnd = 1577461668910 cmd = 0 else: dateStart = int(dateParameters[0 : dateParameters.find(" ")]) dateParametersAux = dateParameters[dateParameters.find(" ") + 1 :] dateEnd = int(dateParametersAux[0 : dateParametersAux.find(" ")]) dateParametersAux = dateParametersAux[dateParametersAux.find(" ") + 1 :] cmd = int(dateParametersAux[0 : dateParametersAux.find(" ")]) cmdSearch = "$eq" if cmd == 0: cmdSearch = "$gte" ################################################################################# ################################################################################# # Configuracion conexion base de datos MongoDB serverIp = "192.168.1.52" serverIp = "cripta.fdi.ucm.es" serverPort = "27017" serverPort = "27118" database = "TFMMIoT" collection = "Dobot" serverAddress = "mongodb://" + serverIp + ":" + serverPort + "/" myclient = pymongo.MongoClient(serverAddress) mydb = myclient[database] mycol = mydb[collection] ################################################################################# ################################################################################# # Busqueda de datos en MongoDB, correspondiente al rango # ingresado queryData = mycol.find({"sensor": "movimiento", "movCode" : {cmdSearch: cmd}, "time" : { "$gt" : dateStart, "$lt" : dateEnd }},{ "_id" : 0, "time" : 1, "sensorAccX" : 1, "sensorAccZ" : 1, "sensorAccY" : 1, "sensorGyrX" : 1, "sensorGyrZ" : 1, "sensorGyrY" : 1}).sort("time", pymongo.ASCENDING) data = list(queryData) ################################################################################# ################################################################################# # Caso 1: No existen Datos en el rango de fechas seleccionado. # Accion a realizar: Busqueda del ultimo dato registrado if (len(data) == 0 ) : queryData = mycol.find({"sensor": "movimiento", "time" : { "$lt" : dateEnd }},{ "_id" : 0, "time" : 1, "sensorAccX" : 1, "sensorAccY" : 1, "sensorAccZ" : 1, "sensorGyrX" : 1, "sensorGyrY" : 1, "sensorGyrZ" : 1}).sort("time", pymongo.DESCENDING).limit(1) data = list(queryData) ############################################################## # Caso 1.1: No existen ningun registro almacenado. # Accion a Realizar: Grafica con valores en 0 if len(data) == 0 : dataAccAux = [0, 0, 0] dataGyrAux = [0, 0, 0] dataAcc, dataGyr = dataGraph(dateStart, dateEnd, dataAccAux, dataGyrAux) ############################################################## ############################################################## # Caso 1.2: Existen registros almacenados. # Accion a Realizar: Seleccionar ultimo valor y construir # la estructura para graficar los datos else : dataAccAux = [data[0]["sensorAccX"], data[0]["sensorAccY"], data[0]["sensorAccZ"]] dataGyrAux = [data[0]["sensorGyrX"], data[0]["sensorGyrY"], data[0]["sensorGyrZ"]] dataAcc, dataGyr = dataGraph(dateStart, dateEnd, dataAccAux, dataGyrAux) ############################################################## ################################################################################# ################################################################################# # Caso 2: Existen Datos en el rango de fechas seleccionado. # Accion a realizar: Procesar datos y construir la estructura # para graficar los datos else : dataSize = 6000 if len(data) < dataSize : dataSize = len(data) dataToSkip = int(len(data) / dataSize) mycol.create_index('time') dataAcc = [] dataGyr = [] for i in range(dataSize) : dataAcc.append({'time' : data[i*dataToSkip]['time'], 'sensorAccX' : data[i*dataToSkip]['sensorAccX'], 'sensorAccY' : data[i*dataToSkip]['sensorAccY'], 'sensorAccZ' : data[i*dataToSkip]['sensorAccZ']}) dataGyr.append({'time' : data[i*dataToSkip]['time'], 'sensorGyrX' : data[i*dataToSkip]['sensorGyrX'], 'sensorGyrY' : data[i*dataToSkip]['sensorGyrY'], 'sensorGyrZ' : data[i*dataToSkip]['sensorGyrZ']}) ################################################################################# ################################################################################# # Actualizar Ficheros dataAcc.txt y dataGyr.txt, para graficar # en Ignition fileName = "TFMMIoTIgnDataAcc.txt" file = open(globalPath + fileName, "w") file.write(str(dataAcc)) file.close() fileName = "TFMMIoTIgnDataGyr.txt" file = open(globalPath + fileName, "w") file.write(str(dataGyr)) file.close() fileName = "TFMMIoTIgnDataSearch.txt" file = open(globalPath + fileName, "w") file.write(str(dateStart) + " " + str(dateEnd) + " " + str(cmd) + " Fin") file.close() ################################################################################# ################################################################################# # Bandera Fin del Script, imprimir datos importantes print("Dimensiones Data Query: " + str(len(data))) print("Dimensiones Data: " + str(len(dataAcc))) print("Fin Script: " + str(datetime.now())) print("Datos Consulta") print("Fecha Inicio: " + str(dateStart) + " Fecha Fin: " + str(dateEnd)) #################################################################################
python
from django.apps import AppConfig class TambahVaksinConfig(AppConfig): default_auto_field = 'django.db.models.BigAutoField' name = 'tambah_vaksin'
python
# Data Preprocessing Template # Importing the libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd # Importing the dataset dataset = pd.read_csv('Salary_Data.csv') X = dataset.iloc[:, :-1].values y = dataset.iloc[:, 1].values # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0) # Simple linear regres from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train) # Predicting the Test set result y_pred = regressor.predict(X_test) # Visualising the Training set result plt.scatter(X_train, y_train, color='red') plt.plot(X_train, regressor.predict(X_train), color='blue') plt.title('Salary vs Experience (Training Set)') plt.xlabel('Experience (Years)') plt.ylabel('Salary ($)') plt.show() # Visualising the Test set result plt.scatter(X_test, y_test, color='red') plt.plot(X_train, regressor.predict(X_train), color='blue') plt.title('Salary vs Experience (Test Set)') plt.xlabel('Experience (Years)') plt.ylabel('Salary ($)') plt.show()
python
"""Utilities relative to hunspell itself."""
python
# -*- coding: utf-8 -*- """ Created on Thu Jan 30 19:39:10 2020 @author: esol """ from neqsim.thermo import fluid, addOilFractions, printFrame, dataFrame, fluidcreator,createfluid,createfluid2, TPflash, phaseenvelope from neqsim.process import pump, clearProcess, stream, valve, separator, compressor, runProcess, viewProcess, heater, mixer, recycle from neqsim.thermo import fluid, TPflash, phaseenvelope, fluidComposition from neqsim.process import clearProcess, stream, valve, separator,compressor, runProcess, viewProcess, heater, mixer, recycle # Start by creating a fluid in neqsim uing a predifined fluid (dry gas, rich gas, light oil, black oil) #Set temperature and pressure and do a TPflash. Show results in a dataframe. feedPressure = 50.0 feedTemperature = 30.0 fluid1 = fluid("cpa") # create a fluid using the SRK-EoS fluid1.addComponent("CO2",1e-10) fluid1.addComponent("methane",1e-10) fluid1.addComponent("ethane",1e-10) fluid1.addComponent("propane",1e-10) fluid1.addComponent("water",1e-10) fluid1.addComponent("TEG",1e-10) fluid1.setMixingRule(10) fluid1.setMultiPhaseCheck(True) fluidcomposition = [0.031, 0.9297, 0.0258, 0.0135, 6.48413454028242e-002, 1.0e-15] fluidComposition(fluid1, fluidcomposition) fluid1.setTemperature(feedTemperature, "C") fluid1.setPressure(feedPressure, "bara") fluid1.setTotalFlowRate(5.0, "MSm3/day") fluid2= fluid("cpa") fluid2.addComponent("CO2", 1.0e-10) fluid2.addComponent("methane", 1.0e-10) fluid2.addComponent("ethane", 1.0e-10) fluid2.addComponent("propane", 1.0e-10) fluid2.addComponent("water", 1.0, 'kg/sec') fluid2.addComponent("TEG", 99.0, 'kg/sec') fluid2.setMixingRule(10) fluid2.setMultiPhaseCheck(True) fluid2.setTemperature(313.15, "K") fluid2.setPressure(75.0, "bara") fluid2.setTotalFlowRate(10625.0, 'kg/hr') # demonstration of setting up a simple process calculation clearProcess() stream1 = stream(fluid1) glycolstream = stream(fluid2) separator1 = separator(stream1, "inlet separator") compressor1 = compressor(separator1.getGasOutStream(), 75.0) heater1 = heater(compressor1.getOutStream()) heater1.setOutTemperature(313.0) mixer1 = mixer() mixer1.addStream(heater1.getOutStream()) mixer1.addStream(glycolstream) scrubberLP = separator(mixer1.getOutStream()) valve1 = valve(scrubberLP.getLiquidOutStream(), 10.0, "Glycol valve") flashDrum = separator(valve1.getOutStream()) heater1 = heater(flashDrum.getLiquidOutStream()) heater1.setOutTemperature(273.15+195.0) stripper = separator(heater1.getOutStream()) cooler1 = heater(stripper.getLiquidOutStream()) cooler1.setOutTemperature(313.0) pump1 = pump(cooler1.getOutStream(), 75.0) runProcess() print("1") runProcess() print("2") runProcess() print("3")
python
# Copyright 2018 Lawrence Kesteloot # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import sys import sha import stat import os # Using a hash is much slower and doesn't handle duplicate files well. Leaving this here # because we may want to later add a way to detect duplicate files. USE_HASH = False # Take a filename and escape spaces. Doesn't handle all shell special characters (quotes, etc.). def shell_friendly(filename): return filename.replace(" ", "\\ ") # Return a unique identifier for this file, as a constant-width string. def get_file_identifier(pathname): if USE_HASH: contents = open(pathname).read() identifier = sha.sha(contents).hexdigest() else: # Use inode number. s = os.stat(pathname) identifier = "%-15d" % s[stat.ST_INO] return identifier # Generate the data file. def generate_file(): for filename in glob.glob("*"): print get_file_identifier(filename) + " " + filename # Read the data file and rename the files. def rename_files(data_file): # Read data file. id_to_new_filename = {} for line in open(data_file): line = line.strip() # Break at the first space. space = line.find(" ") if space == -1: sys.stderr.write("WARNING: This line has no filename: " + line) else: file_id = line[:space] filename = line[space + 1:].strip() id_to_new_filename[file_id] = filename # Read file identifiers from disk. id_to_old_filename = {} for filename in glob.glob("*"): id_to_old_filename[get_file_identifier(filename).strip()] = filename # Generate the script. for file_id, old_filename in id_to_old_filename.items(): new_filename = id_to_new_filename.get(file_id) if not new_filename: sys.stderr.write("Identifier " + file_id + " not found in data file: " + old_filename + "\n") else: del id_to_new_filename[file_id] if new_filename != old_filename: print "mv " + shell_friendly(old_filename) + " " + shell_friendly(new_filename) # See if any lines in the file were unused. for file_id, new_filename in id_to_new_filename.items(): sys.stderr.write("Filename not used in data file: " + new_filename + "\n") def main(): if len(sys.argv) == 1: generate_file() elif len(sys.argv) == 2: rename_files(sys.argv[1]) else: sys.stderr.write("usage: RENAME.py [filename]\n") if __name__ == "__main__": main()
python
def check(): import numpy as np dataOK = np.loadtxt('nusselt_ref.out') dataChk= np.loadtxt('data/post/wall/nusselt.out') tol = 1e-6 nts = 10000 chk = (np.mean(dataOK[-nts:,2])-np.mean(dataChk[-nts:,2]))<tol return chk def test_answer(): assert check()
python
import pytt assert pytt.name == "pytt"
python
from discord.ext import commands import discord import cogs import random import asyncio import requests from discord import File import os from datetime import datetime import traceback import tabula import json bot = commands.Bot(command_prefix='$') class VipCog(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command() async def chujwdupekuczkowiexe(self, ctx): try: with open("planlekcji.json", "r") as f: pl = json.load(f) dzien = datetime.today().strftime('%A') if dzien == "Monday": embed=discord.Embed(title="plan lekcji Poniedzialek",description=str(pl["Monday"]), color=0xE657EE) embed.add_field(value=str(pl["Tuesday"]), name="Wtorek",inline=False) await ctx.send(embed=embed) if dzien == "Tuesday": embed=discord.Embed(title="Plan lekcji Wtorek", description=str(pl["Tuesday"]), color=0xE657EE) embed.add_field(value=str(pl["Wednesday"]), name="Sroda",inline=False) await ctx.send(embed=embed) if dzien == "Wednesday": embed=discord.Embed(title="Plan lekcji Sroda", description=str(pl["Wednesday"]), color=0xE657EE) embed.add_field(value=str(pl["Thursday"]), name="Czwartek",inline=False) await ctx.send(embed=embed) if dzien == "Thursday": embed=discord.Embed(title="Plan lekcji Czwartek", description=str(pl["Thursday"]), color=0xE657EE) embed.add_field(value=str(pl["Friday"]), name="Piatek",inline=False) await ctx.send(embed=embed) if dzien == "Friday": embed=discord.Embed(title="Plan lekcji Piatek", description=str(pl["Friday"]), color=0xE657EE) embed.add_field(value=str(pl["Monday"]), name="Poniedzialek",inline=False) await ctx.send(embed=embed) except: await ctx.send(traceback.format_exc()) @commands.command() async def chujciwdupkekurwo(self, ctx, *, arg): try: await ctx.send(arg, tts=True) except: await ctx.send(f"```python\n{traceback.format_exc()}```") def setup(bot): bot.add_cog(VipCog(bot)) print('Vip Gotowe')
python
from collections import OrderedDict from copy import deepcopy from functools import partial from ml_collections import ConfigDict import numpy as np import jax import jax.numpy as jnp import flax import flax.linen as nn from flax.training.train_state import TrainState import optax import distrax from .jax_utils import next_rng, value_and_multi_grad, mse_loss from .model import Scalar, update_target_network from .utils import prefix_metrics class BC(object): @staticmethod def get_default_config(updates=None): config = ConfigDict() config.policy_lr = 3e-4 config.optimizer_type = 'adam' config.alpha_multiplier = 0.0 config.use_automatic_entropy_tuning = True config.target_entropy = 0.0 if updates is not None: config.update(ConfigDict(updates).copy_and_resolve_references()) return config def __init__(self, config, policy): self.config = self.get_default_config(config) self.policy = policy self.observation_dim = policy.observation_dim self._train_states = {} optimizer_class = { 'adam': optax.adam, 'sgd': optax.sgd, }[self.config.optimizer_type] policy_params = self.policy.init(next_rng(), next_rng(), jnp.zeros((10, self.observation_dim))) self._train_states['policy'] = TrainState.create( params=policy_params, tx=optimizer_class(self.config.policy_lr), apply_fn=None ) model_keys = ['policy'] if self.config.use_automatic_entropy_tuning: self.log_alpha = Scalar(0.0) self._train_states['log_alpha'] = TrainState.create( params=self.log_alpha.init(next_rng()), tx=optimizer_class(self.config.policy_lr), apply_fn=None ) model_keys.append('log_alpha') self._model_keys = tuple(model_keys) self._total_steps = 0 def train(self, batch): self._total_steps += 1 self._train_states, metrics = self._train_step( self._train_states, next_rng(), batch ) return metrics @partial(jax.jit, static_argnames=('self')) def _train_step(self, train_states, rng, batch): def loss_fn(train_params, rng): observations = batch['observations'] actions = batch['actions'] batch_size, _ = jnp.shape(observations) loss_collection = {} rng, split_rng = jax.random.split(rng) new_actions, log_pi = self.policy.apply(train_params['policy'], split_rng, observations) if self.config.use_automatic_entropy_tuning: alpha_loss = -self.log_alpha.apply(train_params['log_alpha']) * (log_pi + self.config.target_entropy).mean() loss_collection['log_alpha'] = alpha_loss alpha = jnp.exp(self.log_alpha.apply(train_params['log_alpha'])) * self.config.alpha_multiplier else: alpha_loss = 0.0 alpha = self.config.alpha_multiplier """ Policy loss """ rng, split_rng = jax.random.split(rng) log_probs = self.policy.apply(train_params['policy'], observations, actions, method=self.policy.log_prob) policy_loss = (alpha*log_pi - log_probs).mean() loss_collection['policy'] = policy_loss negative_log_probs = -log_probs.mean() return tuple(loss_collection[key] for key in self.model_keys), locals() train_params = {key: train_states[key].params for key in self.model_keys} (_, aux_values), grads = value_and_multi_grad(loss_fn, len(self.model_keys), has_aux=True)(train_params, rng) new_train_states = { key: train_states[key].apply_gradients(grads=grads[i][key]) for i, key in enumerate(self.model_keys) } metrics = dict( policy_loss=aux_values['policy_loss'], negative_log_probs=aux_values['negative_log_probs'], alpha_loss=aux_values['alpha_loss'], alpha=aux_values['alpha'], ) return new_train_states, metrics def log_likelihood(self, observations, actions): actions = jnp.clip(actions, -1 + 1e-5, 1 - 1e-5) log_prob = self.policy.apply(self.train_params['policy'], observations, actions, method=self.policy.log_prob) return log_prob.mean() @property def model_keys(self): return self._model_keys @property def train_states(self): return self._train_states @property def train_params(self): return {key: self.train_states[key].params for key in self.model_keys} @property def total_steps(self): return self._total_steps
python
import sys import os from inspect import getmembers from types import BuiltinFunctionType, BuiltinMethodType, MethodType, FunctionType import zipfile from util import isIronPython, isJython, getPlatform cur_path = os.path.abspath(os.path.dirname(__file__)) distPaths = [os.path.join(cur_path, '../../../indigo/dist'), os.path.join(cur_path, '../../dist/')] success = False if isIronPython(): raise RuntimeError("Indigo coverage is not supported in .NET") elif isJython(): raise RuntimeError("Indigo coverage is not supported in Java") else: dll_full_path = os.path.normpath(os.path.join(cur_path, "../../../indigo/api/python")) rdll_full_path = os.path.normpath(os.path.join(cur_path, "../../../indigo/api/plugins/renderer/python")) idll_full_path = os.path.normpath(os.path.join(cur_path, "../../../indigo/api/plugins/inchi/python")) bdll_full_path = os.path.normpath(os.path.join(cur_path, "../../../indigo/api/plugins/bingo/python")) if not os.path.exists(os.path.join(dll_full_path, 'lib')): for distPath in distPaths: if not os.path.exists(distPath): continue dll_full_path = '%s/python' % (distPath) for item in os.listdir(distPath): if item.startswith('indigo-python-') and item.endswith('.zip') and (item.find(getPlatform()) != -1 or item.find('universal') != -1): curdir = os.path.abspath(os.curdir) os.chdir(distPath) if 'INDIGO_TEST_MODE' not in os.environ: with zipfile.ZipFile(item) as zf: zf.extractall() os.environ['INDIGO_TEST_MODE'] = '1' os.chdir(curdir) dll_full_path = os.path.abspath(os.path.join(cur_path, distPath, item.replace('.zip', ''))) break if not os.path.exists(dll_full_path): continue break sys.path.insert(0, dll_full_path) sys.path.insert(0, rdll_full_path) sys.path.insert(0, idll_full_path) sys.path.insert(0, bdll_full_path) from indigo import Indigo, IndigoObject, IndigoException from indigo_renderer import IndigoRenderer from indigo_inchi import IndigoInchi from bingo import Bingo, BingoException, BingoObject success = True if not success: raise RuntimeError('Indigo not found at %s' % distPaths) class IndigoObjectCoverageWrapper(IndigoObject): def __init__(self, dispatcher, id, parent=None): IndigoObject.__init__(self, dispatcher, id, parent) self._type = None self._type = int(self.dbgInternalType()[1:3]) def __getattribute__(self, item): dispatcher = object.__getattribute__(self, 'dispatcher') type = object.__getattribute__(self, '_type') if dispatcher is not None: if item in dispatcher._indigoObjectCoverageDict: dispatcher._indigoObjectCoverageDict[item] += 1 if type: if type not in dispatcher._indigoObjectCoverageByTypeDict: dispatcher._indigoObjectCoverageByTypeDict[type] = {} dispatcher._indigoObjectCoverageByTypeDict[type][item] = 1 else: if item not in dispatcher._indigoObjectCoverageByTypeDict[type]: dispatcher._indigoObjectCoverageByTypeDict[type][item] = 1 else: dispatcher._indigoObjectCoverageByTypeDict[type][item] += 1 return object.__getattribute__(self, item) class IndigoCoverageWrapper(Indigo): def __init__(self, path=None): Indigo.__init__(self, path) if isJython() or isIronPython(): IndigoObject = IndigoObjectCoverageWrapper # TODO: Change standard IndigoObject to IndigoObjectCoverageWrapper else: self.IndigoObject = IndigoObjectCoverageWrapper self._indigoObjectCoverageDict = dict() self._indigoObjectCoverageByTypeDict = dict() m = self.createMolecule() for item in getmembers(m): if type(item[1]) in (BuiltinFunctionType, BuiltinMethodType, MethodType, FunctionType) and not item[0].startswith('_'): self._indigoObjectCoverageDict[item[0]] = 0 self._indigoCoverageDict = dict() for item in getmembers(self): if type(item[1]) in (BuiltinFunctionType, BuiltinMethodType, MethodType, FunctionType) and not item[0].startswith('_'): self._indigoCoverageDict[item[0]] = 0 def __getattribute__(self, item): try: indigoCoverageDict = object.__getattribute__(self, '_indigoCoverageDict') if indigoCoverageDict: if item in indigoCoverageDict: indigoCoverageDict[item] += 1 except AttributeError: pass return object.__getattribute__(self, item) def version(self): return super(IndigoCoverageWrapper, self).version() + '-coverage' class IndigoObjectTypeEnum: SCANNER = 1 MOLECULE = 2 QUERY_MOLECULE = 3 REACTION = 4 QUERY_REACTION = 5 OUTPUT = 6 REACTION_ITER = 7 REACTION_MOLECULE = 8 GROSS = 9 SDF_LOADER = 10 SDF_SAVER = 11 RDF_MOLECULE = 12 RDF_REACTION = 13 RDF_LOADER = 14 SMILES_MOLECULE = 15 SMILES_REACTION = 16 MULTILINE_SMILES_LOADER = 17 ATOM = 18 ATOMS_ITER = 19 RGROUP = 20 RGROUPS_ITER = 21 RGROUP_FRAGMENT = 22 RGROUP_FRAGMENTS_ITER = 23 ARRAY = 24 ARRAY_ITER = 25 ARRAY_ELEMENT = 26 MOLECULE_SUBSTRUCTURE_MATCH_ITER = 27 MOLECULE_SUBSTRUCTURE_MATCHER = 28 REACTION_SUBSTRUCTURE_MATCHER = 29 SCAFFOLD = 30 DECONVOLUTION = 31 DECONVOLUTION_ELEM = 32 DECONVOLUTION_ITER = 33 PROPERTIES_ITER = 34 PROPERTY = 35 FINGERPRINT = 36 BOND = 37 BONDS_ITER = 38 ATOM_NEIGHBOR = 39 ATOM_NEIGHBORS_ITER = 40 SUPERATOM = 41 SUPERATOMS_ITER = 42 DATA_SGROUP = 43 DATA_SGROUPS_ITER = 44 REPEATING_UNIT = 45 REPEATING_UNITS_ITER = 46 MULTIPLE_GROUP = 47 MULTIPLE_GROUPS_ITER = 48 GENERIC_SGROUP = 49 GENERIC_SGROUPS_ITER = 50 SGROUP_ATOMS_ITER = 51 SGROUP_BONDS_ITER = 52 DECOMPOSITION = 53 COMPONENT = 54 COMPONENTS_ITER = 55 COMPONENT_ATOMS_ITER = 56 COMPONENT_BONDS_ITER = 57 SUBMOLECULE = 58 SUBMOLECULE_ATOMS_ITER = 59 SUBMOLECULE_BONDS_ITER = 60 MAPPING = 61 REACTION_MAPPING = 62 SSSR_ITER = 63 SUBTREES_ITER = 64 RINGS_ITER = 65 EDGE_SUBMOLECULE_ITER = 66 CML_MOLECULE = 67 CML_REACTION = 68 MULTIPLE_CML_LOADER = 69 SAVER = 70 ATTACHMENT_POINTS_ITER = 71 DECOMPOSITION_MATCH = 72 DECOMPOSITION_MATCH_ITER = 73 TAUTOMER_ITER = 74 TAUTOMER_MOLECULE = 75 IndigoObjectTypeDict = { 1: 'SCANNER', 2: 'MOLECULE', 3: 'QUERY_MOLECULE', 4: 'REACTION', 5: 'QUERY_REACTION', 6: 'OUTPUT', 7: 'REACTION_ITER', 8: 'REACTION_MOLECULE', 9: 'GROSS', 10: 'SDF_LOADER', 11: 'SDF_SAVER', 12: 'RDF_MOLECULE', 13: 'RDF_REACTION', 14: 'RDF_LOADER', 15: 'SMILES_MOLECULE', 16: 'SMILES_REACTION', 17: 'MULTILINE_SMILES_LOADER', 18: 'ATOM', 19: 'ATOMS_ITER', 20: 'RGROUP', 21: 'RGROUPS_ITER', 22: 'RGROUP_FRAGMENT', 23: 'RGROUP_FRAGMENTS_ITER', 24: 'ARRAY', 25: 'ARRAY_ITER', 26: 'ARRAY_ELEMENT', 27: 'MOLECULE_SUBSTRUCTURE_MATCH_ITER', 28: 'MOLECULE_SUBSTRUCTURE_MATCHER', 29: 'REACTION_SUBSTRUCTURE_MATCHER', 30: 'SCAFFOLD', 31: 'DECONVOLUTION', 32: 'DECONVOLUTION_ELEM', 33: 'DECONVOLUTION_ITER', 34: 'PROPERTIES_ITER', 35: 'PROPERTY', 36: 'FINGERPRINT', 37: 'BOND', 38: 'BONDS_ITER', 39: 'ATOM_NEIGHBOR', 40: 'ATOM_NEIGHBORS_ITER', 41: 'SUPERATOM', 42: 'SUPERATOMS_ITER', 43: 'DATA_SGROUP', 44: 'DATA_SGROUPS_ITER', 45: 'REPEATING_UNIT', 46: 'REPEATING_UNITS_ITER', 47: 'MULTIPLE_GROUP', 48: 'MULTIPLE_GROUPS_ITER', 49: 'GENERIC_SGROUP', 50: 'GENERIC_SGROUPS_ITER', 51: 'SGROUP_ATOMS_ITER', 52: 'SGROUP_BONDS_ITER', 53: 'DECOMPOSITION', 54: 'COMPONENT', 55: 'COMPONENTS_ITER', 56: 'COMPONENT_ATOMS_ITER', 57: 'COMPONENT_BONDS_ITER', 58: 'SUBMOLECULE', 59: 'SUBMOLECULE_ATOMS_ITER', 60: 'SUBMOLECULE_BONDS_ITER', 61: 'MAPPING', 62: 'REACTION_MAPPING', 63: 'SSSR_ITER', 64: 'SUBTREES_ITER', 65: 'RINGS_ITER', 66: 'EDGE_SUBMOLECULE_ITER', 67: 'CML_MOLECULE', 68: 'CML_REACTION', 69: 'MULTIPLE_CML_LOADER', 70: 'SAVER', 71: 'ATTACHMENT_POINTS_ITER', 72: 'DECOMPOSITION_MATCH', 73: 'DECOMPOSITION_MATCH_ITER', 74: 'TAUTOMER_ITER', 75: 'TAUTOMER_MOLECULE', }
python
import os from .base import * BASE_SITE_URL = 'https://rapidpivot.com' AMQP_URL = 'amqp://guest:guest@localhost:5672//' ALLOWED_HOSTS = ['rapidpivot.com'] ADMINS = (('Name', '[email protected]'),) DEBUG = False TEMPLATE_DEBUG = False # SSL/TLS Settings SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') CSRF_COOKIE_SECURE = True SESSION_COOKIE_SECURE = True os.environ['wsgi.url_scheme'] = 'https' # Email Settings EMAIL_USE_TLS = True EMAIL_HOST = retrieve_secret_configuration("EMAIL_HOST") EMAIL_HOST_USER = retrieve_secret_configuration("EMAIL_USER") EMAIL_HOST_PASSWORD = retrieve_secret_configuration("EMAIL_PASS") EMAIL_PORT = retrieve_secret_configuration("EMAIL_PORT") # TEMPLATE_DIRS += ("",) # INSTALLED_APPS += ("",) # Basic Logging Configuration # https://docs.djangoproject.com/en/1.7/topics/logging/ LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'handlers': { 'file': { 'level': 'INFO', 'class': 'logging.FileHandler', 'filename': 'RAPID.log', }, }, 'loggers': { 'django.request': { 'handlers': ['file'], 'level': 'INFO', 'propagate': True, }, }, }
python
from setuptools import find_packages, setup import os # load README.md as long_description long_description = '' if os.path.exists('README.md'): with open('README.md', 'r') as f: long_description = f.read() setup( name='XMCD Projection', version='1.0.0', packages=find_packages(include=['xmcd_projection']), description='Library for simulating XMCD projection signal', long_description=long_description, long_description_content_type='text/markdown', author='Luka Skoric', license='MIT LICENSE', install_requires=[ 'trimesh>=3.9.12', 'numpy==1.20.2', 'matplotlib>=3.4.1', 'numba>=0.53.1', 'joblib>=1.0.1', 'PyQt5>=5.15.4', 'pyqtgraph>=0.11.1', 'scikit-image>=0.18.1', 'scipy>=1.6.2', 'PyOpenGL>=3.1.5', 'cached-property>=1.5.2', 'pandas>=1.0.5', 'meshio>=4.0.16', 'tqdm<=4.46.1' ] )
python
from random import sample from time import sleep lista = [] print('\033[0;34m-'*30) print(' \033[0;34mJOGOS DA MEGA SENA') print('\033[0;34m-\033[m'*30) j = int(input('Quantos jogos você deseja gerar? ')) print('SORTEANDO...') for i in range(0, j): ran = sorted(sample(range(1, 60), 6)) lista.append(ran[:]) sleep(2) print(f'Jogo {i+1}:{lista[i]}')
python
from PyQt5.QtWidgets import QWidget, QMainWindow from PyQt5.QtCore import Qt import gi.repository gi.require_version('Gdk', '3.0') from gi.repository import Gdk from utils import Rect # from keyboard import Keybroad # from button import Button # moved inside classes to prevent cyclic import # Window(parent, title, width=1280, height=720) # # Simple class to create PyQt5 windows. # Default window size is 1280x720 and position the center of the screen. # If another window is passed as the first argument, when showing the child # window the parent one will temporarily freeze. # # Use: # # class App(Window): # def __init__(self, parent, title): # super().__init__(parent, title, modal) # # primary = App(None, 'This is my primary window') # secondary = App(primary, 'This is my secondary window') # # primary.show() class Window(QMainWindow): def __init__(self, parent, title, modal=True, width=1280, height=720): if parent == None: super().__init__() else: super().__init__(parent) self.parent = parent self.title = title self.modal = modal self.width = width self.height = height screen = Gdk.Screen.get_default() window_x = (screen.get_width() - width) / 2 window_y = (screen.get_height() - height) / 2 self.setWindowTitle(self.title) if modal: self.setWindowModality(Qt.ApplicationModal) self.setGeometry(window_x, window_y, width, height) class KeycapConfigWindow(Window): def __init__(self, parent, width=800, height=600): super().__init__( parent, 'tooltip', 'window title', True, width, height) from button import Button self.ui = [] Button(self, self.ui, 'save', Rect(742, 560, 10, 10), lambda: self.hide(), '') self.show()
python
# Generated by Django 3.1.5 on 2021-02-01 18:11 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('rentalsapp', '0001_initial'), ] operations = [ migrations.AlterField( model_name='tenants', name='amount', field=models.CharField(default='none', max_length=30), ), ]
python
# -*- coding: utf-8 -*- def main(): from itertools import accumulate n = int(input()) # 大きさを基準に昇順で並び替えておく a = sorted(list(map(int, input().split()))) sum_a = list(accumulate([0] + a)) # 色は最大でN種類 ans = [False for _ in range(n)] # 初期化:最も大きいモンスターは,確実に最後まで生き残る ans[n - 1] = True # KeyInsight # あるモンスターiが最後まで残る # =吸収を繰り返していき,自分の2倍以上大きなモンスターが存在しないことを満たす,と言い換える # サイズの大きなモンスターから順に判定 for i in range(n - 2, -1, -1): if a[i + 1] <= 2 * a[i]: if ans[i + 1]: ans[i] = True # サイズの小さいモンスターjは,事前に自分より小さいモンスターを全て吸収しておく # それからモンスターjとモンスターj+1のサイズを比較 elif a[i + 1] <= 2 * sum_a[i + 1]: if ans[i + 1]: ans[i] = True else: ans[i] = False print(sum(ans)) if __name__ == '__main__': main()
python
# Python code for 2D random walk. import json import sys import random import time import math import logging import asyncio from .DataAggregator import DataAggregator from .PositioningTag import PositioningTag from pywalkgen.walk_model import WalkAngleGenerator from pywalkgen.pub_sub import PubSubAMQP from pywalkgen.imu import IMU from pywalkgen.raycast import Particle, StaticMap from pywalkgen.collision_detection import CollisionDetection logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) handler = logging.FileHandler('/tmp/walkgen.log') handler.setLevel(logging.ERROR) formatter = logging.Formatter('%(levelname)-8s-[%(filename)s:%(lineno)d]-%(message)s') handler.setFormatter(formatter) logger.addHandler(handler) # ========================================= WALK PATTERN GENERATOR =================================================== class WalkPatternGenerator: def __init__(self, eventloop, config_file): """ Initialize walk pattern generator Walk pattern generator consists of :param eventloop: event loop for amqp pub sub :param config_file: config file """ try: # id assigned to the personnel. self.walker_id = config_file["id"] # initialize the start coordinates of the personnel self.pos = {'x': config_file["start_coordinates"]["x"], 'y': config_file["start_coordinates"]["y"], 'z': config_file["start_coordinates"]["z"]} walk_attribute = config_file["attribute"]["walk"] # Walk angle generator for the personnel walk self.walk_angle_gen = WalkAngleGenerator(mid_point=walk_attribute["sigmoid_attributes"]["mid_point"], steepness=walk_attribute["sigmoid_attributes"]["steepness"], max_value=math.radians( walk_attribute["sigmoid_attributes"]["min_angle"]), level_shift=math.radians( walk_attribute["sigmoid_attributes"]["max_angle"]), walk_direction_factor=walk_attribute["direction_factor"], walk_angle_deviation_factor=walk_attribute[ "angle_deviation_factor"]) # IMU tag self.imu_tag = IMU(config_file=config_file) # Collision detection for static and dynamic obstacles self.collision = CollisionDetection(scene=StaticMap(config_file=config_file["map"]), particle=Particle(particle_id=config_file["id"], x=config_file["start_coordinates"]["x"], y=config_file["start_coordinates"]["y"]), env_collision_distance=config_file["attribute"]["collision"][ "distance"]["environment"], robot_collision_distance=config_file["attribute"]["collision"][ "distance"]["robot"]) # UWB tag self.uwb_tag = PositioningTag(config=config_file["attribute"]["positioning"]["outliers"]) self.data_aggregators = [] for area in config_file["map"]["area_division"]: self.data_aggregators.append(DataAggregator(area_config=area)) # set Walk attributes and angle generators self.max_walk_speed = walk_attribute["max_walk_speed"] self.walk_dimension = walk_attribute["walk_dimension"] self.walk_angle = 0 # position related states self.pos_prev = {'x': self.pos['x'], 'y': self.pos['y'], 'z': self.pos['z']} self.net_step_size = 0 # time stamp information self.time_now = 0 self.time_past = 0 # sample time information self.interval = config_file['attribute']['other']['interval'] self.distance_factor = config_file["attribute"]["walk"]["distance_factor"] self.distance_in_sample_time = 0 # Publisher protocol = config_file["protocol"] self.publishers = [] if protocol["publishers"] is not None: for publisher in protocol["publishers"]: if publisher["type"] == "amq": logger.debug('Setting Up AMQP Publisher for Robot') self.publishers.append( PubSubAMQP( eventloop=eventloop, config_file=publisher, binding_suffix=self.walker_id ) ) else: logger.error("Provide protocol amq config") raise AssertionError("Provide protocol amq config") # Subscriber self.subscribers = [] if protocol["subscribers"] is not None: for subscriber in protocol["subscribers"]: if subscriber["type"] == "amq": logger.debug('Setting Up AMQP Subcriber for Robot') if subscriber["exchange"] == "control_exchange": self.subscribers.append( PubSubAMQP( eventloop=eventloop, config_file=subscriber, binding_suffix="", app_callback=self._consume_telemetry_msg ) ) else: self.subscribers.append( PubSubAMQP( eventloop=eventloop, config_file=subscriber, binding_suffix=self.walker_id, app_callback=self._consume_telemetry_msg ) ) else: logger.error("Provide protocol amq config") raise AssertionError("Provide protocol amq config") except Exception as e: logger.critical("unhandled exception", e) sys.exit(-1) def _consume_telemetry_msg(self, **kwargs): """ consume telemetry messages :param kwargs: must contain following information 1. exchange_name 2. binding_name 3. message_body :return: none """ # extract message attributes from message exchange_name = kwargs["exchange_name"] binding_name = kwargs["binding_name"] message_body = json.loads(kwargs["message_body"]) # check for matching subscriber with exchange and binding name in all subscribers for subscriber in self.subscribers: if subscriber.exchange_name == exchange_name: if "visual.generator.robot" in binding_name: # extract robot id from binding name binding_delimited_array = binding_name.split(".") robot_id = binding_delimited_array[len(binding_delimited_array) - 1] msg_attributes = message_body.keys() # check for must fields in the message attributes if ("id" in msg_attributes) and ("base" in msg_attributes) \ and ("shoulder" in msg_attributes) and ("elbow" in msg_attributes): # check if robot id matches with 'id' field in the message if robot_id == message_body["id"]: logger.debug(f'Sub: exchange: {exchange_name} msg {message_body}') # extract information from message body base_shoulder = [message_body["base"], message_body["shoulder"]] shoulder_elbow = [message_body["shoulder"], message_body["elbow"]] elbow_wrist = [message_body["elbow"], message_body["wrist"]] prefix = "robot_" + message_body["id"] # update robot in scene for collision detection self.collision.update_scene(obstacle_id=prefix + "_base_shoulder", points=base_shoulder, shape="line") self.collision.update_scene(obstacle_id=prefix + "_shoulder_elbow", points=shoulder_elbow, shape="line") self.collision.update_scene(obstacle_id=prefix + "_elbow_wrist", points=elbow_wrist, shape="line") return async def _update3d(self, tdelta=-1): """ update walker position in 3D :param tdelta: time duration between successive updates :return: """ try: # calculate loop time if tdelta > 0: # valid time delta received as input paramter timedelta = tdelta elif self.time_now == 0 and self.time_past == 0: # time delta calculation for first update cycle self.time_now = time.time() self.time_past = self.time_now timedelta = 0.01 else: # time delta calculation based on run time self.time_now = time.time() timedelta = self.time_now - self.time_past self.time_past = self.time_now assert (timedelta >= 0), f"Time delta: {timedelta}, can't be negative" # Calculate Walk angle for next step, and also check if walker is in collision course ranging, collision_avoidance_msg = self.collision.ranging() self.walk_angle, collision_decision = \ self.walk_angle_gen.get_walk_angle(angle=self.walk_angle, ranging=ranging, velocity=self.net_step_size / timedelta) step_length = {'x': 0, 'y': 0, 'z': 0} if collision_decision: # self.net_step_size = self.net_step_size * 0.2 self.net_step_size = random.uniform(self.net_step_size, self.distance_in_sample_time * 0.6134) else: # step size decision new_distance_in_sample_time = random.uniform(self.distance_in_sample_time, self.max_walk_speed * timedelta * 0.6134) self.distance_in_sample_time = (self.distance_in_sample_time * (1 - self.distance_factor)) \ + (new_distance_in_sample_time * self.distance_factor) self.net_step_size = random.uniform(self.net_step_size, self.distance_in_sample_time * 0.6134) # step length in each of the axis if self.walk_dimension == 1: step_length['x'] = self.net_step_size * math.cos(self.walk_angle) step_length['y'] = 0 step_length['z'] = 0 elif self.walk_dimension == 2: step_length['x'] = self.net_step_size * math.cos(math.radians(self.walk_angle)) step_length['y'] = self.net_step_size * math.sin(math.radians(self.walk_angle)) step_length['z'] = 0 else: step_length['x'] = self.net_step_size * math.cos(self.walk_angle) step_length['y'] = self.net_step_size * math.sin(self.walk_angle) step_length['z'] = math.sin(math.sqrt((math.pow(self.x_step_length, 2) + math.pow( self.y_step_length, 2)))) # todo write logic for z_step_length based on angle # walk based on step size calculated in each direction self.pos['x'] = self.pos_prev['x'] + step_length['x'] self.pos['y'] = self.pos_prev['y'] + step_length['y'] self.pos['z'] = self.pos_prev['z'] + step_length['z'] # update particle's position self.collision.update_particles(x=self.pos['x'], y=self.pos['y']) heading = {'ref_heading': {'end': (self.pos['x'], self.pos['y']), 'start': (self.pos_prev['x'], self.pos_prev['y'])}} # prepare for next iteration self.pos_prev['x'] = self.pos['x'] self.pos_prev['y'] = self.pos['y'] self.pos_prev['z'] = self.pos['z'] uwb_measurement = self.uwb_tag.get_measurement(ref=[self.pos['x'], self.pos['y'], self.pos['z']]) data_aggregator_id = self.get_area_information(ref=[self.pos['x'], self.pos['y']]) result = { "measurement": "walk", "time": time.time_ns(), "id": self.walker_id, "data_aggregator_id": data_aggregator_id, "walk_angle": self.walk_angle, "x_step_length": step_length['x'], "y_step_length": step_length['y'], "z_step_length": step_length['z'], "x_ref_pos": self.pos['x'], "y_ref_pos": self.pos['y'], "z_ref_pos": self.pos['z'], "x_uwb_pos": uwb_measurement[0], "y_uwb_pos": uwb_measurement[1], "z_uwb_pos": uwb_measurement[2], "view": ranging } result.update(heading) imu_result = self.imu_tag.update(cur_position=result, tdelta=timedelta) result.update(imu_result) result.update({"timestamp": round(time.time() * 1000)}) plm_result = { "id": result["id"], "data_aggregator_id": result["data_aggregator_id"], "x_uwb_pos": result["x_uwb_pos"], "y_uwb_pos": result["y_uwb_pos"], "z_uwb_pos": result["z_uwb_pos"], 'x_imu_vel': result['x_imu_vel'], 'y_imu_vel': result['y_imu_vel'], 'z_imu_vel': result['z_imu_vel'], "timestamp": result['timestamp'] } return result, plm_result except Exception as e: logger.critical("unhandled exception", e) sys.exit(-1) async def publish(self, exchange_name, msg, external_binding_suffix=None): ''' publishes amqp message :param exchange_name: name of amqp exchange :param msg: message to be published :param external_binding_suffix: binding suffix. suffix is appended to the end of binding namedd :return: ''' for publisher in self.publishers: if exchange_name == publisher.exchange_name: await publisher.publish(message_content=msg, external_binding_suffix=external_binding_suffix) logger.debug(f'Pub: exchange: {exchange_name} msg {msg}') async def connect(self): """ connects amqp publishers and subscribers :return: """ for publisher in self.publishers: await publisher.connect() for subscriber in self.subscribers: await subscriber.connect(mode="subscriber") async def update(self): """ update walk generator. Note This function need to be called in a loop every update cycle :param binding_key: binding key name (optional) used when other than default binding key :return: """ result = dict() if self.interval >= 0: all_result, plm_result = await self._update3d() result.update(all_result) await self.publish(exchange_name='generator_personnel', msg=json.dumps(result).encode()) # sleep until its time for next sample if self.interval >= 0: await asyncio.sleep(delay=self.interval) else: await asyncio.sleep(delay=0) def get_states(self): return {"x_ref_pos": self.pos['x'], "y_ref_pos ": self.pos['y'], "z_ref_pos": self.pos['z']} def get_area_information(self, ref): for data_aggregator in self.data_aggregators: if data_aggregator.locate(point=[ref[0], ref[1]]): return data_aggregator.id return None
python
from tkinter import* from tkinter import messagebox from PIL import ImageTk import sqlite3 root=Tk() root.geometry("1196x600") root.title("Hotel Management System") #bg=PhotoImage(file ="D:\Python\HotelManagement\Background.png") #bglabel=Label(root,image=bg) #bglabel.place(x=0,y=0) backimage=PhotoImage("D:\Python\HotelManagement\Back.png") #====database conn=sqlite3.connect('Hotel_Management.db') c=conn.cursor() # c.execute("""CREATE TABLE room( # Name varchar, # Phone_number varchar, # address varchar, # adhar varchar, # occupants varchar, # category varchar)""") conn.commit() conn.close() class BookingPage: global root global backimage global confirm_function def __init__(self,root): self.root=root root.geometry("1196x600") root.title("Room Booking") #self.bag=PhotoImage(file ="D:\Python\HotelManagement\Background.png") #self.bglabel=Label(root,image=self.bag) #self.bglabel.place(x=0,y=0) self.pane=Canvas(root,bg="White",height=1000,width=800) self.pane.place(relx=0.5,y=500,anchor=CENTER) self.label=Label(root,text="Availability",bg="White",font=("Product Sans",20)).place(relx=0.5,rely=0.05,anchor=CENTER) #==================================================================================================================================================================================================== # Getting the number of occupants self.Occupants=StringVar() OccupantLabel=Label(root,text="Select Number of Occupants",bg="White",font=("Product Sans",12)).place(relx=0.3,rely=0.55) self.OccupantSelect=OptionMenu(root,self.Occupants,*["1","2","3"],command=self.NumOfOcc()) self.OccupantSelect.config(indicatoron=0) self.OccupantSelect.configure(bg="White",highlightthickness=0,highlightbackground="White",borderwidth=0) self.OccupantSelect.place(relx=0.7,rely=0.55,anchor=CENTER) self.Occupants.set("1") #==================================================================================================================================================================================================== # choosing the category of the room self.Category=StringVar() self.CategoryLabel=Label(root,text="Select Category",bg="White",font=("Product Sans",12)).place(relx=0.3,rely=0.65) self.CategorySelect=OptionMenu(root,self.Category,*["A/C","Non A/C","Presidential Suite"]) self.CategorySelect.config(indicatoron=0) self.CategorySelect.configure(bg="White",highlightthickness=0,highlightbackground="Grey",borderwidth=0) self.CategorySelect.place(relx=0.6,rely=0.65) self.Category.set("A/C") #==================================================================================================================================================================================================== # Info label self.InfoLabel=Label(root,bg="White",font=("Product Sans",12),text="") self.InfoLabel.place(relx=0.5,rely=0.5,anchor=CENTER) # Price Lablel self.PriceLabel=Label(root,bg="White",font=("Product Sans",12)) self.PriceLabel.place(relx=0.5,rely=0.6,anchor=CENTER) #==================================================================================================================================================================================================== # Buttons self.IDProof=StringVar() self.IDProof.set("Aadhar") self.label=Label(root,text="Enter Customer Details",bg="White",font=("Product Sans",20)).place(relx=0.5,rely=0.05,anchor=CENTER) self.name=Label(root,text="Name",font=("Product Sans",12),bg="White").place(relx=0.3,rely=0.1) self.Number=Label(root,text="Phone Number",font=("Product Sans",12),bg="White").place(relx=0.3,rely=0.2) self.Address=Label(root,text="Address",font=("Product Sans",12),bg="White").place(relx=0.3,rely=0.3) self.ID=OptionMenu(root,self.IDProof,*["Aadhar","Driving Licence","Other"]) self.ID.config(indicatoron=0,font=("Product Sans",12)) self.ID.configure(bg="White",highlightthickness=0,highlightbackground="Grey",borderwidth=0) self.ID.place(relx=0.6,rely=0.4) self.IDLabel=Label(root,text="ID Proof",bg="White",font=("Product Sans",12)).place(relx=0.3,rely=0.4) self.EnterName=Entry(root,font=("Product Sans",12)).place(relx=0.6,rely=0.1) self.EnterNumber=Entry(root,font=("Product Sans",12)).place(relx=0.6,rely=0.2) self.EnterAddress=Entry(root,font=("Product Sans",12)).place(relx=0.6,rely=0.3) self.EnterIdProof=Entry(root,font=("Product Sans",12)).place(relx=0.6,rely=0.45) self.bookbutton=Button(root,text="Confirm",command=self.confirm_function) self.bookbutton.place(relx=0.5,rely=0.95,anchor=CENTER) self.Days=Label(root,text="No of days",font=("Product Sans",12),bg="White").place(relx=0.3,rely=0.75) self.Day=IntVar() self.DaysSelect=OptionMenu(root,self.Day,*[1,2,3,4]) self.DaysSelect.config(indicatoron=0) self.DaysSelect.configure(bg="White",highlightthickness=0,highlightbackground="Grey",borderwidth=0) self.DaysSelect.place(relx=0.6,rely=0.75) self.Day.set(1) self.subtotal=Label(root,bg="white",font=("Product Sans",12)) self.subtotal.place(relx=0.5,rely=0.85,anchor=CENTER) # def Book(self): #Book Button Command # self.RoomCategory=self.Category.get() # self.days=self.Day.get() # if self.RoomCategory=="Non A/C": # price=1000 # elif self.RoomCategory=="A/C": # price=1500 # elif self.RoomCategory=="Presidential Suite": # price=2000 # self.totalPrice=price*self.days # self.totalPrice=str(self.totalPrice) # self.TXT=("Your subtotal will be "+self.totalPrice ) # self.subtotal.config(text=self.TXT) def ShowInfo(self): self.InfoLabel.config(text="Info will be shown") self.ShowBook() def NumOfOcc(self): NumberOfOccupants=self.Occupants.get() return NumberOfOccupants def RoomCategoryFun(self,Category): RoomCategory=self.Category.get() if RoomCategory=="Non A/C": self.PriceLabel.config(text="Price: 1000") elif RoomCategory=="A/C": self.PriceLabel.config(text="Price: 1500") elif RoomCategory=="Presidential Suite": self.PriceLabel.config(text="Price: 2000") def Back(self): for widget in root.winfo_children(): widget.destroy() SplashScreen(root) def FinalPage(self): for widget in root.winfo_children(): widget.destroy() UserInfo(root) # def BillingPage(self): # self.newWindow = Toplevel(self.root) # self.app = BillingPage(self.newWindow) def confirm_function(self): conn=sqlite3.connect('Hotel_Management.db') c=conn.cursor() c.execute("INSERT INTO room VALUES(:Name,:Phone_number,:address,:adhar,:occupants,:category)", { 'Name':self.EnterName.get(), 'Phone_number':self.EnterNumber.get(), 'address':self.EnterAddress.get(), 'adhar':self.EnterIdProof.get(), 'occupants':self.Occupants.get(), 'category':self.Category.get() }) conn.commit() conn.close() def delete(self): self.EnterName.delete(0,END) self.EnterAddress.delete(0,END) self.EnterIdProof.delete(0,END) self.En.delete(0,END) class BillingPage: global root global backimage def __init__(self,root): self.root=root #self.bg=PhotoImage(file ="D:\Python\HotelManagement\Background.png") #self.bglabel=Label(root,image=bg) #self.bglabel.place(x=0,y=0) #=========================================================================================================================================================================================================================== self.label5=Label(root,text='BILL PAYMENT',borderwidth=1,relief='solid',width=12,height=3) self.label5.pack() self.label5.place(x=460,y=30) self.label6 = Label(root, borderwidth=5, relief='solid', width=50, height=20) self.label6.pack() self.label6.place(x=500, y=120) self.pay=StringVar() self.payno=IntVar() self.r1=Radiobutton(root,text='PAY WITH CREDIT CARD',variable=self.payno,value=1) self.r1.pack() self.r1.place(x=20,y=100) self.r2 = Radiobutton(root, text='CASH', variable=self.payno, value=2) self.r2.pack() self.r2.place(x=20,y=170) self.r3 = Radiobutton(root, text='ONLINE PAYMENT', variable=self.payno, value=3) self.r3.pack() self.r3.place(x=20,y=240) def fun_pay(self): self.messagebox.showinfo('Hello','THANKS FOR CHOOSING\nOUR HOTEL\n\n\nPAYMENT DONE SUCCESSFULLY') self.b = Label(root, text="PAY NOW", foreground="blue", bg='pink', activebackground="red", width=10, height=2) self.b.pack() self.b.place(x=50,y=420) self.backbutton=Button(root,text="Back",image=backimage,command=self.Back,compound=LEFT) self.backbutton.place(relx=0.1,rely=0.1,anchor=CENTER) def Back(self): for widget in root.winfo_children(): widget.destroy() SplashScreen(root) class Login: def WelcomePage(self): for widget in root.winfo_children(): widget.destroy() SplashScreen(root) def __init__(self,root): self.root=root self.root.title("Admin login") self.root.geometry("1169x600") #====login frame==== root=Frame(self.root,bg="white") root.place(x=100,y=150,height=400,width=500) title=Label(root,text="Admin Login",font=("Impact",35,"bold"),fg="gray",bg="white").place(x=90,y=40) desc=Label(root,text="Fill username and password here",font=("Goudy old style",15,"bold"),fg="gray",bg="white").place(x=90,y=100) #====Username module==== lbl_username=Label(root,text="Username",font=("Impact",15),fg="gray",bg="white").place(x=90,y=140) self.txt_user=Entry(root,font=("times new roman",15),bg="lightgray") self.txt_user.place(x=90,y=170, width=350, height=35) #====Password module==== lbl_password=Label(root,text="Password",font=("Impact",15),fg="gray",bg="white").place(x=90,y=210) self.txt_pass=Entry(root,show="*",font=("times new roman",15),bg="lightgray") self.txt_pass.place(x=90,y=240, width=350, height=35) #====Button==== forget_btn=Button(root,text="Forgot password?",bg="white",fg="gray",bd=0,font=("times new roman",12)).place(x=90,y=280) login_btn=Button(root,command=self.login_function,text="login",bg="white",fg="gray",font=("times new roman",15)).place(x=90,y=320) def login_function(self): if self.txt_user.get()=="" or self.txt_pass.get()=="": messagebox.showerror("Error","All fields are required", parent=self.root) elif self.txt_user.get()!="Admin" or self.txt_pass.get()!="1234": messagebox.showerror("Error","Invalid Username/password", parent=self.root) else: messagebox.showinfo("Welcome","Welcome Admin") self.WelcomePage() class SplashScreen: global root def Booking(self): for widget in root.winfo_children(): widget.destroy() BookingPage(root) def Billing(self): for widget in root.winfo_children(): widget.destroy() BillingPage(root) def Cab(self): for widget in root.winfo_children(): widget.destroy() SplashScreen(root) def LogOut(self): for widget in root.winfo_children(): widget.destroy() Login(root) def __init__(self,root): self.root=root #self.root.title("Login Page") #self.bag=PhotoImage(file ="D:\Python\HotelManagement\Background.png") #self.bglabel=Label(root,image=self.bag) #self.bglabel.place(x=0,y=0) #frames code #Labels title=Label(root,text="MAIN MENU",font=("Arial black",45,"bold"),fg="blue",bg="sky blue").place(x=220,y=0) # label_roomAvail = Label(root, text="ROOM AVAILABILITY",font=("Goudy old style",20,"bold"),fg="black",bg="white").place(x=30,y=120) # label_checkOUT = Label(root,text="CHECK OUT",font=("Goudy old style",20,"bold"),fg="black",bg="white").place(x=550,y=120) # label_cabBook = Label(root,text="BOOK A CAB",font=("Goudy old style",20,"bold"),fg="black",bg="white").place(x=30,y=350) # label_billing = Label(root,text="BILLING",font=("Goudy old style",20,"bold"),fg="black",bg="white").place(x=550,y=350) #BUTTONS CODE roomAvail=Button(root,text="ROOM AVAILABILITY",bg="white",fg="black",bd=0,font=("Goudy old style",28),command=self.Booking).place(x=30,y=140) checkOut = Button(root,text="CHECK-OUT",bg="white",fg="black",bd=0,font=("Goudy old style",28),command=self.Billing).place(x=530,y=140) #cabBook = Button(root,text="CAB BOOK",bg="white",fg="black",bd=0,font=("Goudy old style",28),command=self.Cab).place(x=100,y=310) billing = Button(root,text="Log Out",bg="white",fg="black",bd=0,font=("Goudy old style",28),command=self.LogOut).place(x=560,y=310) class UserInfo: global root global backimage def __init__(self,root): self.root=root root.geometry("1196x600") root.title("Room Booking") #self.bag=PhotoImage(file ="D:\Python\HotelManagement\Background.png") #self.bglabel=Label(root,image=self.bag) #self.bglabel.place(x=0,y=0) self.pane=Canvas(root,bg="White",height=1000,width=800) self.pane.place(relx=0.5,y=500,anchor=CENTER) self.IDProof=StringVar() self.IDProof.set("Aadhar") self.label=Label(root,text="Enter Customer Details",bg="White",font=("Product Sans",20)).place(relx=0.5,rely=0.05,anchor=CENTER) self.name=Label(root,text="Name",font=("Product Sans",14),bg="White").place(relx=0.3,rely=0.2) self.Number=Label(root,text="Phone Number",font=("Product Sans",14),bg="White").place(relx=0.3,rely=0.3) self.Address=Label(root,text="Address",font=("Product Sans",14),bg="White").place(relx=0.3,rely=0.4) self.ID=OptionMenu(root,self.IDProof,*["Aadhar","Driving Licence","Other"]) self.ID.config(indicatoron=0,font=("Product Sans",12)) self.ID.configure(bg="White",highlightthickness=0,highlightbackground="Grey",borderwidth=0) self.ID.place(relx=0.6,rely=0.5) self.IDLabel=Label(root,text="ID Proof",bg="White",font=("Product Sans",14)).place(relx=0.3,rely=0.5) self.EnterName=Entry(root,font=("Product Sans",12)).place(relx=0.6,rely=0.2) self.EnterNumber=Entry(root,font=("Product Sans",12)).place(relx=0.6,rely=0.3) self.EnterAddress=Entry(root,font=("Product Sans",12)).place(relx=0.6,rely=0.4) self.EnterIdProof=Entry(root,font=("Product Sans",12)).place(relx=0.6,rely=0.6) self.bookbutton=Button(root,text="Confirm",command=self.Book) self.bookbutton.place(relx=0.5,rely=0.9,anchor=CENTER) def Book(self): #Book Button Command pass # self.name=Label(root,text="Name",font=("Product Sans",14),bg="White").place(relx=0.3,rely=0.3,anchor=CENTER) # class Cab: # global root # def __init__(self): # root=Tk() # root.geometry("1200x600") # self.f1=Frame(root,bg="black",borderwidth=6,relief=RIDGE) # self.f1.pack(side=TOP,fill="y",pady=20) # self.l1=Label(self.f1,text="WELCOME TO OUR CAB SERVICES",fg="red",padx=13,pady=13,font="comicsansms 25 bold",borderwidth=3) # self.l1.pack(fill="x") # self.f2=Frame(root,bg="PINK",borderwidth=6,relief=RIDGE) # self.f2.pack(side=LEFT,fill=Y,pady=20) # self.l2=Label(f2,text="CUSTOMER DETAILS ",fg="RED",padx=30,pady=30,font="comicsansms 19 bold",borderwidth=3) # self.l2.grid(row=0,column=0) # self.f3=Frame(root,bg="PINK",borderwidth=6,relief=RIDGE) # self.f3.pack(fill=Y,side=LEFT,padx=30,pady=20) # self.l3=Label(f3,text="BOOKING DETAILS",fg="RED",padx=30,pady=30,font="comicsansms 19 bold",borderwidth=3) # self.l3.grid(row=0,column=0) # self.f4=Frame(root,bg="pink",borderwidth=6,relief=RIDGE) # self.f4.pack(fill=Y,side=LEFT,pady=20) # self.l4=Label(f4,text="RECEIPT",fg="RED",padx=30,pady=30,font="comicsansms 19 bold",borderwidth=3) # self.l4.grid() # #text for 2nd frame # self.name=Label(f2,text="NAME",font="comicsansma 15 bold") # self.gender=Label(f2,text="GENDER",font="comicsansma 15 bold") # self.address=Label(f2,text="ADDRESS",font="comicsansma 15 bold") # self.mobile=Label(f2,text="MOBILE",font="comicsansma 15 bold") # self.email=Label(f2,text="EMAIL",font="comicsansma 15 bold") # #pack text for 2nd frame # self.name.grid(row=1,column=0,sticky=W,pady=2,padx=2) # self.gender.grid(row=2,column=0,sticky=W,pady=6,padx=2) # self.address.grid(row=3,column=0,sticky=W,pady=6,padx=2) # self.mobile.grid(row=4,column=0,sticky=W,pady=6,padx=2) # self.email.grid(row=5,column=0,sticky=W,pady=6,padx=2) # #variables for 2nd frame # """namevalue=StringVar() # gendervalue=StringVar() # addressvalue=StringVar() # mobilevalue=StringVar() # emailvalue=StringVar()""" # #entries for 2nd frame # self.nameentry=Entry(f2) # self.genderentry=Entry(f2) # self.addressentry=Entry(f2) # self.mobileentry=Entry(f2) # self.emailentry=Entry(f2) # #packing entries for 2nd frame # self.nameentry.grid(row=1,column=0,pady=2) # self.genderentry.grid(row=2,column=0,pady=6) # self.addressentry.grid(row=3,column=0,pady=6) # self.mobileentry.grid(row=4,column=0,pady=6,padx=4) # self.emailentry.grid(row=5,column=0,pady=6) # #buttons for 2nd frame # self.b1=Button(f2, text="SUBMIT", foreground="RED", bg='sky blue', activebackground="YELLOW", width=12, height=2) # self.b1.grid() # self.b1.place(x=50,y=410,anchor=S) # self.b2=Button(f2, text="CANCEL", foreground="RED", bg='sky blue', activebackground="YELLOW", width=12, height=2) # self.b2.grid() # self.b2.place(x=270,y=410,anchor=S) # #text for 3rd frame # self.pickup=Label(f3,text="PICKUP",font="comicsansma 12 bold") # self.drop=Label(f3,text="DROP",font="comicsansma 12 bold") # self.pooling=Label(f3,text="POOLING",font="comicsansma 12 bold") # self.luggage=Label(f3,text="LUGGAGE",font="comicsansma 12 bold") # self.car=Label(f3,text="CAR TYPE",font="comicsansma 12 bold") # #pack text for 3RD frame # self.pickup.grid(row=1,column=0,sticky=W,pady=6,padx=2) # self.drop.grid(row=2,column=0,sticky=W,pady=6,padx=2) # self.pooling.grid(row=3,column=0,sticky=W,pady=6,padx=2) # self.luggage.grid(row=4,column=0,sticky=W,pady=6,padx=2) # self.car.grid(row=5,column=0,sticky=W,pady=6,padx=2) # #entries for 3RD frame # self.pickupentry=Entry(f3) # self.dropentry=Entry(f3) # self.poolingentry=Entry(f3) # self.luggageentry=Entry(f3) # self.carentry=Entry(f3) # #packing entries for 3RD frame # self.pickupentry.grid(row=1,column=0,pady=2) # self.dropentry.grid(row=2,column=0,pady=6) # self.poolingentry.grid(row=3,column=0,pady=16,padx=16) # self.luggageentry.grid(row=4,column=0,pady=6,padx=4) # self.carentry.grid(row=5,column=0,pady=6) # #buttons for 3rd frame # self.b1=Button(f3, text="SUBMIT", foreground="RED", bg='sky blue', activebackground="YELLOW", width=12, height=2) # self.b1.grid() # self.b1.place(x=50,y=410,anchor=S) # self.b2=Button(f3, text="CANCEL", foreground="RED", bg='sky blue', activebackground="YELLOW", width=12, height=2) # self.b2.grid() # self.b2.place(x=240,y=410,anchor=S) # #buttons for 4th frame # self.b1=Button(f4, text="TOTAL", foreground="RED", bg='sky blue', activebackground="YELLOW", width=12, height=2) # self.b1.grid() # self.b1.place(x=50,y=250,anchor=S) # self.b2=Button(f4, text="RECIEPT", foreground="RED", bg='sky blue', activebackground="YELLOW", width=12, height=2) # self.b2.grid() # self.b2.place(x=50,y=300,anchor=S) # self.b3=Button(f4, text="RESET", foreground="RED", bg='sky blue', activebackground="YELLOW", width=12, height=2) # self.b3.grid() # self.b3.place(x=50,y=350,anchor=S) # self.b4=Button(f4, text="EXIT", foreground="RED", bg='sky blue', activebackground="YELLOW", width=12, height=2) # self.b4.grid() # self.b4.place(x=50,y=400,anchor=S) Login(root) root.mainloop()
python
import os import pytest from cctbx import sgtbx, uctbx from dxtbx.serialize import load import dials.command_line.cosym as dials_cosym from dials.algorithms.symmetry.cosym._generate_test_data import ( generate_experiments_reflections, ) from dials.array_family import flex from dials.util import Sorry @pytest.mark.parametrize( "space_group,engine", [(None, "scitbx"), ("P 1", "scipy"), ("P 4", "scipy")] ) def test_cosym(dials_data, run_in_tmpdir, space_group, engine): mcp = dials_data("multi_crystal_proteinase_k") args = ["space_group=" + str(space_group), "seed=0", f"engine={engine}"] for i in [1, 2, 3, 4, 5, 7, 8, 10]: args.append(mcp.join("experiments_%d.json" % i).strpath) args.append(mcp.join("reflections_%d.pickle" % i).strpath) dials_cosym.run(args=args) assert os.path.isfile("symmetrized.refl") assert os.path.isfile("symmetrized.expt") experiments = load.experiment_list("symmetrized.expt", check_format=False) if space_group is None: assert ( experiments[0].crystal.get_space_group().type().lookup_symbol() == "P 4 2 2" ) else: assert ( experiments[0].crystal.get_space_group().type().lookup_symbol() == space_group ) joint_reflections = flex.reflection_table.from_file("symmetrized.refl") # check that there are 8 unique id and imageset_ids, and that these # correctly correspond to each experiment assert len(set(joint_reflections["id"])) == 8 assert len(set(joint_reflections["imageset_id"])) == 8 for id_ in range(8): sel = joint_reflections["id"] == id_ assert set(joint_reflections["imageset_id"].select(sel)) == {id_} def test_cosym_partial_dataset(dials_data, run_in_tmpdir): """Test how cosym handles partial/bad datasets.""" mcp = dials_data("multi_crystal_proteinase_k") args = [] for i in [1, 2]: args.append(mcp.join("experiments_%d.json" % i).strpath) args.append(mcp.join("reflections_%d.pickle" % i).strpath) # Make one dataset that will be removed in prefiltering r = flex.reflection_table.from_file(mcp.join("reflections_8.pickle").strpath) r["partiality"] = flex.double(r.size(), 0.1) r.as_file("renamed.refl") args.append("renamed.refl") args.append(mcp.join("experiments_8.json").strpath) # Add another good dataset at the end of the input list args.append(mcp.join("experiments_10.json").strpath) args.append(mcp.join("reflections_10.pickle").strpath) dials_cosym.run(args=args) assert os.path.exists("symmetrized.refl") assert os.path.exists("symmetrized.expt") experiments = load.experiment_list("symmetrized.expt", check_format=False) assert len(experiments) == 3 def test_cosym_partial_dataset_raises_sorry(dials_data, run_in_tmpdir, capsys): """Test how cosym handles partial/bad datasets.""" mcp = dials_data("multi_crystal_proteinase_k") args = ["renamed.refl", mcp.join("experiments_8.json").strpath] r2 = flex.reflection_table.from_file(mcp.join("reflections_10.pickle").strpath) r2["partiality"] = flex.double(r2.size(), 0.1) r2.as_file("renamed2.refl") args.append("renamed2.refl") args.append(mcp.join("experiments_10.json").strpath) with pytest.raises(Sorry): dials_cosym.run(args=args) @pytest.mark.parametrize( ( "space_group", "unit_cell", "dimensions", "sample_size", "use_known_space_group", "use_known_lattice_group", ), [ ("P2", None, None, 10, False, False), ("P3", None, None, 20, False, False), ("I23", None, 2, 10, False, False), ("P422", (79, 79, 37, 90, 90, 90), None, 10, True, False), ("P321", (59.39, 59.39, 28.35, 90, 90, 120), None, 5, False, False), ], ) def test_synthetic( space_group, unit_cell, dimensions, sample_size, use_known_space_group, use_known_lattice_group, run_in_tmpdir, ): space_group = sgtbx.space_group_info(space_group).group() if unit_cell is not None: unit_cell = uctbx.unit_cell(unit_cell) experiments, reflections, _ = generate_experiments_reflections( space_group=space_group, unit_cell=unit_cell, unit_cell_volume=10000, sample_size=sample_size, map_to_p1=True, d_min=1.5, ) experiments.as_json("tmp.expt") expt_file = "tmp.expt" joint_table = flex.reflection_table() for r in reflections: joint_table.extend(r) joint_table.as_file("tmp.refl") refl_file = "tmp.refl" args = [ expt_file, refl_file, "output.experiments=symmetrized.expt", "output.reflections=symmetrized.refl", "output.html=cosym.html", "output.json=cosym.json", ] if use_known_space_group: args.append(f"space_group={space_group.info()}") if use_known_lattice_group: args.append(f"lattice_group={space_group.info()}") if dimensions is not None: args.append(f"dimensions={dimensions}") dials_cosym.run(args=args) assert os.path.isfile("symmetrized.refl") assert os.path.isfile("symmetrized.expt") assert os.path.isfile("cosym.html") assert os.path.isfile("cosym.json") cosym_expts = load.experiment_list("symmetrized.expt", check_format=False) assert len(cosym_expts) == len(experiments) for expt in cosym_expts: if unit_cell is not None: assert expt.crystal.get_unit_cell().parameters() == pytest.approx( unit_cell.parameters() ) if ( str(expt.crystal.get_space_group().info()) == "P 6 2 2" and str(space_group.info()) == "P 3 2 1" ): # This is fine continue assert str(expt.crystal.get_space_group().info()) == str(space_group.info()) assert expt.crystal.get_space_group() == space_group
python
def print_section_header(header: str) -> None: print(f"========================================================================") print(f"=== {header} ") def print_section_finish() -> None: print(f"=== SUCCESS\n")
python
names = [] posx = [] posy = [] caps = [] with open('sink_cap.txt') as f: for line in f: tokens = line.split() names.append(tokens[0]) posx.append(float(tokens[1])) posy.append(float(tokens[2])) caps.append(float(tokens[3])) minx = min(posx) miny = min(posy) maxx = max(posx) maxy = max(posy) #print(" - minx = " + str(minx)) #print(" - miny = " + str(miny)) #print(" - maxx = " + str(maxx)) #print(" - maxy = " + str(maxy)) with open('sink_cap.txt', 'w') as f: for i in range(len(posx)): f.write(names[i] + " " + str(posx[i]-minx) + " " + str(posy[i]-miny) + " " + str(caps[i]) + "\n") with open('blks_tmp2.txt') as f1, open('blks.txt', 'w') as f2: for line in f1: tokens = line.split() x1 = float(tokens[0]) - minx y1 = float(tokens[1]) - miny x2 = float(tokens[2]) - minx y2 = float(tokens[3]) - miny f2.write(str(x1) + " " + str(y1) + " " + str(x2) + " " + str(y2) + "\n") with open('die-size.txt', 'w') as f: f.write(str(maxx-minx) + " " + str(maxy-miny) + " " + str(minx) + " " + str(miny)) #print("../bin/genHtree -w " + str(maxx-minx) + " -h " + str(maxy-miny) + " -n 256 -s 20 -tech 16")
python
from block_model.controller.block_model import BlockModel from drillhole.controller.composites import Composites from geometry.controller.ellipsoid import Ellipsoid from kriging.controller.search_ellipsoid import SearchEllipsoid from kriging.controller.point_kriging import PointKriging from variogram.controller.model import Model from variogram.controller.structure import Structure from common.discretize import * from common.rotation import * blockPath = '../../GP_Data/cy17_spc_assays_pvo_entry_ug.csv' # blockPath = '../../GP_Data/test_kriging.csv' ugVarBlock = 'ugcutPVO' blockColumns = [(ugVarBlock, int)] var = 'cut' ugVarComp = 'ugcut' # columna que contiene ug de los datos de sondaje compColumns = [(var, float), (ugVarComp, float)] compPath = '../../GP_Data/cy17_spc_assays_rl6_entry.csv' # compPath = '../../GP_Data/cy17_spc_au_rl6_entry.csv' def run(): blockModel, composites, ellipsoid = getObjects() ugs = [10, 20, 30, 40, 50, 51, 60, 70, 71, 80] for ug in ugs: model = getModel(ug) if model is not None: blocks = blockModel.applyFilter('"%s" == %d' % (ugVarBlock, ug)) comps = composites.applyFilter('"%s" == %d' % (ugVarComp, ug)) estimate(blocks, comps, ellipsoid, model) exportBlockModel(blockModel) def getModel(ug): # modelo de variograma if ug == 10: nugget = 0.250 s1 = Structure(Structure.EXPONENTIAL, 0.480, Ellipsoid(19, 19, 19, 0, 0, 0)) s2 = Structure(Structure.EXPONENTIAL, 0.270, Ellipsoid(436, 436, 436, 0, 0, 0)) structures = [s1, s2] return Model(nugget, structures) elif ug == 20: nugget = 0.250 s1 = Structure(Structure.EXPONENTIAL, 0.370, Ellipsoid(16, 22, 5, 20, 0, 0)) s2 = Structure(Structure.EXPONENTIAL, 0.380, Ellipsoid(177, 97, 27, 20, 0, 0)) structures = [s1, s2] return Model(nugget, structures) elif ug == 30: nugget = 0.290 s1 = Structure(Structure.SPHERIC, 0.320, Ellipsoid(47, 103, 20, 30, 0, 0)) s2 = Structure(Structure.SPHERIC, 0.390, Ellipsoid(601, 500, 32, 30, 0, 0)) structures = [s1, s2] return Model(nugget, structures) elif ug == 40: nugget = 0.220 s1 = Structure(Structure.SPHERIC, 0.420, Ellipsoid(55, 20, 11, 40, 0, 0)) s2 = Structure(Structure.SPHERIC, 0.360, Ellipsoid(447, 183, 26, 40, 0, 0)) structures = [s1, s2] return Model(nugget, structures) elif ug == 50: nugget = 0.180 s1 = Structure(Structure.SPHERIC, 0.390, Ellipsoid(16, 29, 11, 40, 0, 0)) s2 = Structure(Structure.SPHERIC, 0.430, Ellipsoid(144, 93, 145, 40, 0, 0)) structures = [s1, s2] return Model(nugget, structures) elif ug == 51: nugget = 0.140 s1 = Structure(Structure.SPHERIC, 0.390, Ellipsoid(14, 37, 28, 35, 0, 0)) s2 = Structure(Structure.SPHERIC, 0.470, Ellipsoid(343, 183, 125, 35, 0, 0)) structures = [s1, s2] return Model(nugget, structures) elif ug == 60: nugget = 0.150 s1 = Structure(Structure.SPHERIC, 0.550, Ellipsoid(14.8, 10.3, 11.9, 10, 0, 0)) s2 = Structure(Structure.SPHERIC, 0.300, Ellipsoid(954.5, 98.9, 16337.9, 10, 0, 0)) structures = [s1, s2] return Model(nugget, structures) elif ug == 70: nugget = 0.150 s1 = Structure(Structure.EXPONENTIAL, 0.444, Ellipsoid(18.6, 15.1, 18.1, 10, 0, 0)) s2 = Structure(Structure.EXPONENTIAL, 0.406, Ellipsoid(18.8, 14.9, 208.9, 10, 0, 0)) structures = [s1, s2] return Model(nugget, structures) elif ug == 71: nugget = 0.200 s1 = Structure(Structure.EXPONENTIAL, 0.441, Ellipsoid(11.1, 7.9, 9.8, 20, 0, 0)) s2 = Structure(Structure.EXPONENTIAL, 0.359, Ellipsoid(143.7, 161.0, 3777.8, 20, 0, 0)) structures = [s1, s2] return Model(nugget, structures) return None def estimate(blocks, composites, ellipsoid, model): # se rotan los compósitos rotatedPoints = rotateComposites(composites, ellipsoid.rotationMatrix) # se crea un diccionario para acceder a las muestras según su coordenada rotada compositesByRotatedPoint = dict([(tuple(rotatedPoints[i]), composites[i]) for i in range(len(rotatedPoints))]) # se discretiza el espacio discretizedPoints = discretizePoints(rotatedPoints, ellipsoid.major, ellipsoid.medium, ellipsoid.minor) kriging = PointKriging(ellipsoid, model) cap = 2 print('Estimando modelo de bloques:') for block in blocks: # se rota el punto que se quiere estimar rx, ry, rz = rotateBlock(block, ellipsoid.rotationMatrix) # se obtienen los compósitos cercanos al centro del bloque points = ellipsoid.searchPointsInDiscretizedPoints((rx, ry, rz), discretizedPoints) if len(points) > 0: # se ordenan los puntos por distancia al bloque points = sorted(points, key=lambda point: point[0]) inEllipsoid = [] for distance, rotatedPoint, movedPoint, octant in points: composite = compositesByRotatedPoint[rotatedPoint] inEllipsoid.append((distance, composite, octant)) # se seleccionan las muestras que cumplen con los criterios pedidos selectedSamples = ellipsoid.selectSamples(inEllipsoid) if len(selectedSamples) > 0: print('se utilizaron {}'.format(len(selectedSamples))) blockpoint = (block.x, block.y, block.z) weights, variance = kriging.ordinary(selectedSamples, blockpoint) value = 0 for i in range(len(selectedSamples)): _, comp, _ = selectedSamples[i] # capping gradeComp = comp[var] if comp[var] <= cap else cap value = gradeComp * weights[i] block.grade = value def exportBlockModel(blockModel): # Exportación modelo de bloques outfile = 'modelo_estimado_sondaje.csv' outfile = open(outfile, 'w') outfile.write('x,y,z,grade\n') for block in blockModel: if hasattr(block, 'grade'): line = block.x, block.y, block.z, block.grade else: line = block.x, block.y, block.z, -99 outfile.write("%f,%f,%f,%f\n" % line) outfile.close() def getObjects(): # se carga el modelo de bloques, compósitos y script de categoría blockModel = BlockModel(path=blockPath, x='midx', y='midy', z='midz', readBlocks=True) # composites = Composites(path=compPath, holeid='dhid', middlex='midx', middley='midy', middlez='midz', # from_='from', to_='to', columns=compColumns, readComposites=True) composites = Composites(path=compPath, holeid='dhid', middlex='midx', middley='midy', middlez='midz', columns=compColumns, readComposites=True) major, medium, minor = 100, 100, 100 bearing, plunge, dip = 0, 0, 0 minSamples, maxSamples = 10, 100 minSamplesByOctant, maxSamplesByOctant = 1, 100 minOctantWithSamples, maxSamplesByDrillhole = 1, 100 ellipsoid = SearchEllipsoid(major=major, medium=medium, minor=minor, bearing=bearing, plunge=plunge, dip=dip, minSamples=minSamples, maxSamples=maxSamples, minSamplesByOctant=minSamplesByOctant, maxSamplesByOctant=maxSamplesByOctant, minOctantWithSamples=minOctantWithSamples, maxSamplesByDrillhole=maxSamplesByDrillhole) return blockModel, composites, ellipsoid if __name__ == '__main__': run()
python
"""This module demonstrates usage of if-else statements, while loop and break.""" def calculate_grade(grade): """Function that calculates final grades based on points earned.""" if grade >= 90: if grade == 100: return 'A+' return 'A' if grade >= 80: return 'B' if grade >= 70: return 'C' return 'F' if __name__ == '__main__': while True: grade_str = input('Number of points (<ENTER> for END): ') if len(grade_str) == 0: break points = int(grade_str) print(calculate_grade(points)) print('Good Bye!')
python
from pinata.response import PinataResponse from pinata.session import PinataAPISession class PinataClient: def __init__(self, session: PinataAPISession, api_namespace: str): self.session = session self._prefix = api_namespace def _post(self, uri, *args, **kwargs) -> PinataResponse: return self.session.post(self._uri(uri), *args, **kwargs) def _get(self, uri, *args, **kwargs) -> PinataResponse: return self.session.get(self._uri(uri), *args, **kwargs) def _delete(self, uri, *args, **kwargs) -> PinataResponse: return self.session.delete(self._uri(uri), *args, **kwargs) def _uri(self, uri: str) -> str: return f"/{self._prefix}/{uri}" __all__ = ["PinataClient"]
python
import music_trees as mt from music_trees.tree import MusicTree from copy import deepcopy import random from tqdm import tqdm NUM_TAXONOMIES = 10 NUM_SHUFFLES = 1000 output_dir = mt.ASSETS_DIR / 'taxonomies' output_dir.mkdir(exist_ok=True) target_tree = mt.utils.data.load_entry( mt.ASSETS_DIR / 'taxonomies' / 'deeper-mdb.yaml', format='yaml') target_tree = MusicTree.from_taxonomy(target_tree) def scramble_tree(tree: MusicTree): "scramble a class tree" # first, copy the tree tree = deepcopy(tree) # shuffle many times for _ in tqdm(list(range(NUM_SHUFFLES))): # get all of the leaves twice A = tree.leaves() B = tree.leaves() # scramble one of them random.shuffle(B) # swap a and b for all A and B for an, bn in zip(A, B): tree.swap_leaves(an, bn) return tree def export_tree(tree: MusicTree, fp: str): mt.utils.data.save_entry(tree._2dict()['root'], fp, format='yaml') if __name__ == "__main__": for i in range(NUM_TAXONOMIES): t = scramble_tree(target_tree) # breakpoint() fp = output_dir / f'scrambled-{i}' export_tree(t, fp)
python
from PIL import Image import sys im = Image.new("L", (256, 256)) c = 0 with open(sys.argv[1], "rb") as f: f.read(8) byte = f.read(1) while c < 65536: #print(c) im.putpixel((c % 256, int(c / 256)), ord(byte)) byte = f.read(1) c = c + 1 im.save("fog.png")
python
from ..std.index import * from .math3d import * from .math2d import * from ..df.blizzardj import bj_mapInitialPlayableArea class TerrainGrid(Rectangle): grids = [] _loc = None def __init__(self,r,sampling=8): Rectangle.__init__(self,GetRectMinX(r),GetRectMinY(r),GetRectMaxX(r),GetRectMaxY(r)) TerrainGrid.grids.append(self) self.sampling = sampling _l = TerrainGrid._loc _zgrid=None """[[luacode]] local _zgrid = {} """ for X in range(math.floor(self.maxx - self.minx) / sampling): """[[luacode]] _zgrid[X] = {} """ for Y in range(math.floor(self.maxy - self.miny) / sampling): MoveLocation(_l, X * sampling + self.minx, Y * sampling + self.miny) """[[luacode]] _zgrid[X][Y] = GetLocationZ(_l) """ self.grid = _zgrid def get_z(self,x,y): X = math.floor((x - self.minx) / self.sampling) Y = math.floor((y - self.miny) / self.sampling) return self.grid[X][Y] @staticmethod def z(x,y): for g in TerrainGrid.grids: if Vector2(x,y,True) in g: return g.get_z(x,y) MoveLocation(TerrainGrid._loc,x,y) return GetLocationZ(TerrainGrid._loc) @staticmethod def _init(): TerrainGrid._loc = Location(0,0) AddScriptHook(TerrainGrid._init,MAIN_BEFORE) def _ft(x,y,temp=False): z = TerrainGrid.z(x,y) if IsTerrainPathable(x, y, PATHING_TYPE_WALKABILITY): z += 2000.0 return Vector3(x,y,z,temp) Vector3.from_terrain = _ft
python
from collections.abc import Callable def update( # <1> probe: Callable[[], float], # <2> display: Callable[[float], None] # <3> ) -> None: temperature = probe() # imagine lots of control code here display(temperature) def probe_ok() -> int: # <4> return 42 def display_wrong(temperature: int) -> None: # <5> print(hex(temperature)) update(probe_ok, display_wrong) # type error # <6> def display_ok(temperature: complex) -> None: # <7> print(temperature) update(probe_ok, display_ok) # OK # <8>
python
# pylint: disable=invalid-name # pylint: disable=too-many-locals # pylint: disable=too-many-arguments # pylint: disable=too-many-statements # pylint: disable=unbalanced-tuple-unpacking # pylint: disable=consider-using-f-string) # pylint: disable=too-many-lines """ A module for finding M² values for a laser beam. Full documentation is available at <https://laserbeamsize.readthedocs.io> Start with necessary imports:: >>>> import numpy as np >>>> import laserbeamsize as lbs Finding the beam waist size, location, and M² for a beam is straightforward:: >>>> lambda0 = 632.8e-9 # m >>>> z = np.array([168, 210, 280, 348, 414, 480, 495, 510, 520, 580, 666, 770]) >>>> r = np.array([597, 572, 547, 554, 479, 403, 415, 400, 377, 391, 326, 397]) >>>> lbs.M2_report(z * 1e-3, 2 * r * 1e-6, lambda0) A graphic of the fit to diameters can be created by:: >>>> lbs.M2_diameter_plot(z * 1e-3, 2 * r * 1e-6, lambda0) >>>> plt.show() A graphic of the radial fit can be created by:: >>>> lbs.M2_radius_plot(z * 1e-3, 2 * r * 1e-6, lambda0) >>>> plt.show() """ import scipy.optimize import matplotlib.gridspec import matplotlib.pyplot as plt import numpy as np __all__ = ('z_rayleigh', 'beam_radius', 'magnification', 'image_distance', 'curvature', 'divergence', 'gouy_phase', 'focused_diameter', 'beam_parameter_product', 'artificial_to_original', 'M2_fit', 'M2_report', 'M2_diameter_plot', 'M2_radius_plot', 'M2_focus_plot' ) def z_rayleigh(w0, lambda0, M2=1): """ Return the Rayleigh distance for a Gaussian beam. Args: w0: minimum beam radius [m] lambda0: wavelength of light [m] Returns: z: axial distance from focus that irradiance has dropped 50% [m] """ return np.pi * w0**2 / lambda0 / M2 def beam_radius(w0, lambda0, z, z0=0, M2=1): """ Return the beam radius at an axial location. Args: w0: minimum beam radius [m] lambda0: wavelength of light [m] z: axial location of desired beam radius [m] z0: axial location of beam waist [m] M2: beam propagation factor [-] Returns: r: beam radius at axial position [m] """ zz = (z - z0) / z_rayleigh(w0, lambda0, M2) return w0 * np.sqrt(1 + zz**2) def magnification(w0, lambda0, s, f, M2=1): """ Return the magnification of a Gaussian beam. If the beam waist is before the lens, then the distance s will be negative, i.e. if it is at the front focus of the lens (s=-f). The new beam waist will be `m * w0` and the new Rayleigh distance will be `m**2 * zR` Args: f: focal distance of lens [m] zR: Rayleigh distance [m] s: distance of beam waist to lens [m] Returns: m: magnification [-] """ zR2 = z_rayleigh(w0, lambda0, M2)**2 return f / np.sqrt((s + f)**2 + zR2) def curvature(w0, lambda0, z, z0=0, M2=1): """ Calculate the radius of curvature of a Gaussian beam. The curvature will be a maximum at the Rayleigh distance and it will be infinite at the beam waist. Args: w0: minimum beam radius [m] lambda0: wavelength of light [m] z axial position along beam [m] z0 axial position of the beam waist [m] M2: beam propagation factor [-] Returns: R: radius of curvature of field at z [m] """ zR2 = z_rayleigh(w0, lambda0, M2)**2 return (z - z0) + zR2 / (z - z0) def divergence(w0, lambda0, M2=1): """ Calculate the full angle of divergence of a Gaussian beam. Args: w0: minimum beam radius [m] lambda0: wavelength of light [m] M2: beam propagation factor [-] Returns: theta: divergence of beam [radians] """ return 2 * w0 / z_rayleigh(w0, lambda0, M2) def gouy_phase(w0, lambda0, z, z0=0): """ Calculate the Gouy phase of a Gaussian beam. Args: w0: minimum beam radius [m] lambda0: wavelength of light [m] z: axial position along beam [m] z0: axial position of beam waist [m] Returns: phase: Gouy phase at axial position [radians] """ zR = z_rayleigh(w0, lambda0) return -np.arctan2(z - z0, zR) def focused_diameter(f, lambda0, d, M2=1): """ Diameter of diffraction-limited focused beam. see eq 6b from Roundy, "Current Technology of Beam Profile Measurements" in Laser Beam Shaping: Theory and Techniques by Dickey, 2000 Args: f: focal length of lens [m] lambda0: wavelength of light [m] d: diameter of limiting aperture [m] M2: beam propagation factor [-] Returns: d: diffraction-limited beam diameter [m] """ return 4 * M2**2 * lambda0 * f / (np.pi * d) def beam_parameter_product(Theta, d0, Theta_std=0, d0_std=0): """ Find the beam parameter product (BPP). Better beam quality is associated with the lower BPP values. The best (smallest) BPP is λ / π and corresponds to a diffraction-limited Gaussian beam. Args: Theta: full beam divergence angle [radians] d0: beam waist diameter [m] Theta_std: std. dev. of full beam divergence angle [radians] d0_std: std. dev. of beam waist diameter [m] Returns: BPP: Beam parameter product [m * radian] BPP_std: standard deviation of beam parameter product [m * radian] """ BPP = Theta * d0 / 4 BPP_std = BPP * np.sqrt((Theta_std / Theta)**2 + (d0_std / d0)**2) return BPP, BPP_std def image_distance(w0, lambda0, s, f, M2=1): """ Return the image location of a Gaussian beam. The default case is when the beam waist is located at the front focus of the lens (s=-f). Args: s: distance of beam waist to lens [m] f: focal distance of lens [m] w0: minimum beam radius [m] lambda0: wavelength of light [m] M2: beam propagation factor [-] Returns: z: location of new beam waist [m] """ zR2 = z_rayleigh(w0, lambda0, M2)**2 return f * (s * f + s * s + zR2) / ((f + s)**2 + zR2) def _abc_fit(z, d, lambda0): """ Return beam parameters for beam diameter measurements. Follows ISO 11146-1 section 9 and uses the standard `polyfit` routine in `numpy` to find the coefficients `a`, `b`, and `c`. d(z)**2 = a + b * z + c * z**2 These coefficients are used to determine the beam parameters using equations 25-29 from ISO 11146-1. Unfortunately, standard error propagation fails to accurately determine the standard deviations of these parameters. Therefore the error calculation lines are commented out and only the beam parameters are returned. Args: z: axial position of beam measurement [m] d: beam diameter [m] Returns: d0: beam waist diameter [m] z0: axial location of beam waist [m] M2: beam propagation parameter [-] Theta: full beam divergence angle [radians] zR: Rayleigh distance [m] """ nlfit, _nlpcov = np.polyfit(z, d**2, 2, cov=True) # unpack fitting parameters c, b, a = nlfit z0 = -b / (2 * c) Theta = np.sqrt(c) disc = np.sqrt(4 * a * c - b * b) / 2 M2 = np.pi / 4 / lambda0 * disc d0 = disc / np.sqrt(c) zR = disc / c params = [d0, z0, Theta, M2, zR] # unpack uncertainties in fitting parameters from diagonal of covariance matrix # c_std, b_std, a_std = [np.sqrt(_nlpcov[j, j]) for j in range(nlfit.size)] # z0_std = z0 * np.sqrt(b_std**2/b**2 + c_std**2/c**2) # d0_std = np.sqrt((4 * c**2 * a_std)**2 + (2 * b * c * b_std)**2 \ # + (b**2 * c_std)**2) / (8 * c**2 * d0) # Theta_std = c_std/2/np.sqrt(c) # zR_std = np.sqrt(4 * c**4 * a_std**2 + b**2 * c**2 * b_std**2 + \ # (b**2-2 * a * c)**2 * c_std**2)/(4 * c**3) / zR # M2_std = np.pi**2 * np.sqrt(4 * c**2 * a_std**2 + b**2 * b_std**2 + \ # 4 * a**2 * c_std**2)/(64 * lambda0**2) / M2 # errors = [d0_std, z0_std, M2_std, Theta_std, zR_std] return params def _beam_fit_fn_(z, d0, z0, Theta): """Fitting function for d0, z0, and Theta.""" return np.sqrt(d0**2 + (Theta * (z - z0))**2) def _beam_fit_fn_2(z, d0, Theta): """Fitting function for d0 and Theta.""" return np.sqrt(d0**2 + (Theta * z)**2) def _beam_fit_fn_3(z, z0, Theta): """Fitting function for z0 and Theta.""" return np.abs(Theta * (z - z0)) def _beam_fit_fn_4(z, Theta): """Fitting function for just Theta.""" return np.abs(Theta * z) def basic_beam_fit(z, d, lambda0, z0=None, d0=None): """ Return the hyperbolic fit to the supplied diameters. Follows ISO 11146-1 section 9 but `a`, `b`, and `c` have been replaced by beam parameters `d0`, `z0`, and Theta. The equation for the beam diameter `d(z)` is d(z)**2 = d0**2 + Theta**2 * (z-z0)**2 A non-linear curve fit is done to determine the beam parameters and the standard deviations of those parameters. The beam parameters are returned in one array and the errors in a separate array:: d0: beam waist diameter [m] z0: axial location of beam waist [m] Theta: full beam divergence angle [radians] M2: beam propagation parameter [-] zR: Rayleigh distance [m] Args: z: array of axial position of beam measurements [m] d: array of beam diameters [m] lambda0: wavelength of the laser [m] Returns: params: [d0, z0, Theta, M2, zR] errors: array with standard deviations of above values """ # approximate answer i = np.argmin(d) d0_guess = d[i] z0_guess = z[i] # fit data using SciPy's curve_fit() algorithm if z0 is None: if d0 is None: i = np.argmax(abs(z - z0_guess)) theta_guess = abs(d[i] / (z[i] - z0_guess)) p0 = [d0_guess, z0_guess, theta_guess] nlfit, nlpcov = scipy.optimize.curve_fit(_beam_fit_fn_, z, d, p0=p0) d0, z0, Theta = nlfit d0_std, z0_std, Theta_std = [np.sqrt(nlpcov[j, j]) for j in range(nlfit.size)] else: i = np.argmax(abs(z - z0_guess)) theta_guess = abs(d[i] / (z[i] - z0_guess)) p0 = [z0_guess, theta_guess] dd = np.sqrt(d**2 - d0**2) nlfit, nlpcov = scipy.optimize.curve_fit(_beam_fit_fn_3, z, dd, p0=p0) z0, Theta = nlfit z0_std, Theta_std = [np.sqrt(nlpcov[j, j]) for j in range(nlfit.size)] d0_std = 0 else: i = np.argmax(abs(z - z0)) theta_guess = abs(d[i] / (z[i] - z0)) if d0 is None: p0 = [d0_guess, theta_guess] nlfit, nlpcov = scipy.optimize.curve_fit(_beam_fit_fn_2, z - z0, d, p0=p0) d0, Theta = nlfit d0_std, Theta_std = [np.sqrt(nlpcov[j, j]) for j in range(nlfit.size)] z0_std = 0 else: p0 = [theta_guess] dd = np.sqrt(d**2 - d0**2) nlfit, nlpcov = scipy.optimize.curve_fit(_beam_fit_fn_4, z - z0, dd, p0=p0) Theta = nlfit[0] Theta_std = np.sqrt(nlpcov[0, 0]) z0_std = 0 d0_std = 0 # divergence and Rayleigh range of Gaussian beam Theta0 = 4 * lambda0 / (np.pi * d0) zR = np.pi * d0**2 / (4 * lambda0) M2 = Theta / Theta0 zR = np.pi * d0**2 / (4 * lambda0 * M2) M2_std = M2 * np.sqrt((Theta_std / Theta)**2 + (d0_std / d0)**2) zR_std = zR * np.sqrt((M2_std / M2)**2 + (2 * d0_std / d0)**2) params = [d0, z0, Theta, M2, zR] errors = [d0_std, z0_std, Theta_std, M2_std, zR_std] return params, errors def max_index_in_focal_zone(z, zone): """Return index farthest from focus in inner zone.""" _max = -1e32 imax = None for i, zz in enumerate(z): if zone[i] == 1: if _max < zz: _max = zz imax = i return imax def min_index_in_outer_zone(z, zone): """Return index of measurement closest to focus in outer zone.""" _min = 1e32 imin = None for i, zz in enumerate(z): if zone[i] == 2: if zz < _min: _min = zz imin = i return imin def M2_fit(z, d, lambda0, strict=False, z0=None, d0=None): """ Return the hyperbolic fit to the supplied diameters. Follows ISO 11146-1 section 9 but `a`, `b`, and `c` have been replaced by beam parameters `d0`, `z0`, and Theta. The equation for the beam diameter `d(z)` is d(z)**2 = d0**2 + Theta**2 * (z - z0)**2 A non-linear curve fit is done to determine the beam parameters and the standard deviations of those parameters. The beam parameters are returned in one array and the errors in a separate array:: d0: beam waist diameter [m] z0: axial location of beam waist [m] Theta: full beam divergence angle [radians] M2: beam propagation parameter [-] zR: Rayleigh distance [m] When `strict==True`, an estimate is made for the location of the beam focus and the Rayleigh distance. These values are then used to divide the measurements into three zones:: * those within one Rayleigh distance of the focus, * those between 1 and 2 Rayleigh distances, and * those beyond two Rayleigh distances. values are used or unused depending on whether they comply with a strict reading of the ISO 11146-1 standard which requires:: ... measurements at at least 10 different z positions shall be taken. Approximately half of the measurements shall be distributed within one Rayleigh length on either side of the beam waist, and approximately half of them shall be distributed beyond two Rayleigh lengths from the beam waist. Args: z: array of axial position of beam measurements [m] d: array of beam diameters [m] lambda0: wavelength of the laser [m] strict: (optional) boolean for strict usage of ISO 11146 z0: (optional) location of beam waist [m] d0: (optional) diameter of beam waist [m] Returns: params: [d0, z0, Theta, M2, zR] errors: [d0_std, z0_std, Theta_std, M2_std, zR_std] used: boolean array indicating if data point is used """ used = np.full_like(z, True, dtype=bool) params, errors = basic_beam_fit(z, d, lambda0, z0=z0, d0=d0) if not strict: return params, errors, used z0 = params[1] zR = params[4] # identify zones (0=unused, 1=focal region, 2=outer region) zone = np.zeros_like(z) for i, zz in enumerate(z): if abs(zz - z0) <= 1.01 * zR: zone[i] = 1 if 1.99 * zR <= abs(zz - z0): zone[i] = 2 # count points in each zone n_focal = np.sum(zone == 1) n_outer = np.sum(zone == 2) if n_focal + n_outer < 10 or n_focal < 4 or n_outer < 4: print("Invalid distribution of measurements for ISO 11146") print("%d points within 1 Rayleigh distance" % n_focal) print("%d points greater than 2 Rayleigh distances" % n_outer) return params, errors, used # mark extra points in outer zone closest to focus as unused extra = n_outer - n_focal if n_focal == 4: extra = n_outer - 6 for _ in range(extra): zone[min_index_in_outer_zone(abs(z - z0), zone)] = 0 # mark extra points in focal zone farthest from focus as unused extra = n_outer - n_focal if n_outer == 4: extra = n_focal - 6 for _ in range(n_focal - n_outer): zone[max_index_in_focal_zone(abs(z - z0), zone)] = 0 # now find beam parameters with 50% focal and 50% outer zone values used = zone != 0 dd = d[used] zz = z[used] params, errors = basic_beam_fit(zz, dd, lambda0, z0=z0, d0=d0) return params, errors, used def M2_string(params, errors): """ Return string describing a single set of beam measurements. Args: z: array of axial position of beam measurements [m] d: array of beam diameters [m] lambda0: wavelength of the laser [m] Returns: s: formatted string suitable for printing. """ d0, z0, Theta, M2, zR = params d0_std, z0_std, Theta_std, M2_std, zR_std = errors BPP, BPP_std = beam_parameter_product(Theta, d0, Theta_std, d0_std) s = '' s += " M^2 = %.2f ± %.2f\n" % (M2, M2_std) s += "\n" s += " d_0 = %.0f ± %.0f µm\n" % (d0 * 1e6, d0_std * 1e6) s += " w_0 = %.0f ± %.0f µm\n" % (d0 / 2 * 1e6, d0_std / 2 * 1e6) s += "\n" s += " z_0 = %.0f ± %.0f mm\n" % (z0 * 1e3, z0_std * 1e3) s += " z_R = %.0f ± %.0f mm\n" % (zR * 1e3, zR_std * 1e3) s += "\n" s += " Theta = %.2f ± %.2f mrad\n" % (Theta * 1e3, Theta_std * 1e3) s += "\n" s += " BPP = %.2f ± %.2f mm mrad\n" % (BPP * 1e6, BPP_std * 1e6) return s def artificial_to_original(params, errors, f, hiatus=0): """ Convert artificial beam parameters to original beam parameters. ISO 11146-1 section 9 equations are used to retrieve the original beam parameters from parameters measured for an artificial waist created by focusing the beam with a lens. M2 does not change. Ideally, the waist position would be relative to the rear principal plane of the lens and the original beam waist position would be corrected by the hiatus between the principal planes of the lens. The beam parameters are in an array `[d0,z0,Theta,M2,zR]` :: d0: beam waist diameter [m] z0: axial location of beam waist [m] Theta: full beam divergence angle [radians] M2: beam propagation parameter [-] zR: Rayleigh distance [m] The errors that are returned are not quite right at the moment. Args: params: array of artificial beam parameters errors: array with std dev of above parameters f: focal length of lens [m] hiatus: distance between principal planes of focusing lens [m] Returns: params: array of original beam parameters (without lens) errors: array of std deviations of above parameters """ art_d0, art_z0, art_Theta, M2, art_zR = params art_d0_std, art_z0_std, art_Theta_std, M2_std, art_zR_std = errors x2 = art_z0 - f V = f / np.sqrt(art_zR**2 + x2**2) orig_d0 = V * art_d0 orig_d0_std = V * art_d0_std orig_z0 = V**2 * x2 + f - hiatus orig_z0_std = V**2 * art_z0_std orig_zR = V**2 * art_zR orig_zR_std = V**2 * art_zR_std orig_Theta = art_Theta / V orig_Theta_std = art_Theta_std / V o_params = [orig_d0, orig_z0, orig_Theta, M2, orig_zR] o_errors = [orig_d0_std, orig_z0_std, orig_Theta_std, M2_std, orig_zR_std] return o_params, o_errors def _M2_report(z, d, lambda0, f=None, strict=False, z0=None, d0=None): """ Return string describing a single set of beam measurements. Args: z: array of axial position of beam measurements [m] d: array of beam diameters [m] lambda0: wavelength of the laser [m] Returns: s: formatted string suitable for printing. """ params, errors, _ = M2_fit(z, d, lambda0, strict, z0=z0, d0=d0) if f is None: s = "Beam propagation parameters\n" s += M2_string(params, errors) return s s = "Beam propagation parameters for the focused beam\n" s += M2_string(params, errors) o_params, o_errors = artificial_to_original(params, errors, f) s += "\nBeam propagation parameters for the laser beam\n" s += M2_string(o_params, o_errors) return s def M2_report(z, dx, lambda0, dy=None, f=None, strict=False, z0=None, d0=None): """ Return string describing a one or more sets of beam measurements. Example:: >>>> import numpy as np >>>> import laserbeamsize as lbs >>>> lambda0 = 632.8e-9 # meters >>>> z = np.array([168, 210, 280, 348, 414, 480, 495, 510, 520, 580, 666, 770]) >>>> r = np.array([597, 572, 547, 554, 479, 403, 415, 400, 377, 391, 326, 397]) >>>> s = lbs.M2_report(z * 1e-3, 2 * r * 1e-6, lambda0) >>>> print(s) Args: z: array of axial position of beam measurements [m] dx: array of beam diameters for semi-major axis [m] lambda0: wavelength of the laser [m] dy: (optional) array of beam diameters for semi-minor axis [m] f: (optional) focal length of lens [m] strict: (optional) boolean for strict usage of ISO 11146 z0: (optional) location of beam waist [m] d0: (optional) diameter of beam waist [m] Returns: s: formatted string suitable for printing. """ if dy is None: s = _M2_report(z, dx, lambda0, f=f, strict=strict, z0=z0, d0=d0) return s params, errors, _ = M2_fit(z, dx, lambda0, strict=strict, z0=z0, d0=d0) d0x, z0x, Thetax, M2x, zRx = params d0x_std, z0x_std, Thetax_std, M2x_std, zRx_std = errors params, errors, _ = M2_fit(z, dy, lambda0, strict=strict, z0=z0, d0=d0) d0y, z0y, Thetay, M2y, zRy = params d0y_std, z0y_std, Thetay_std, M2y_std, zRy_std = errors z0 = (z0x + z0y) / 2 z0_std = np.sqrt(z0x_std**2 + z0y_std**2) d0 = (d0x + d0y) / 2 d0_std = np.sqrt(d0x_std**2 + d0y_std**2) zR = (zRx + zRy) / 2 zR_std = np.sqrt(zRx_std**2 + zRy_std**2) Theta = (Thetax + Thetay) / 2 Theta_std = np.sqrt(Thetax_std**2 + Thetay_std**2) M2 = np.sqrt(M2x * M2y) M2_std = np.sqrt(M2x_std**2 + M2y_std**2) BPP, BPP_std = beam_parameter_product(Theta, d0, Theta_std, d0_std) BPPx, BPPx_std = beam_parameter_product(Thetax, d0x, Thetax_std, d0x_std) BPPy, BPPy_std = beam_parameter_product(Thetay, d0y, Thetay_std, d0y_std) tag = '' if f is not None: tag = " of the focused beam" s = "Beam propagation parameters derived from hyperbolic fit\n" s += "Beam Propagation Ratio%s\n" % tag s += " M2 = %.2f ± %.2f\n" % (M2, M2_std) s += " M2x = %.2f ± %.2f\n" % (M2x, M2x_std) s += " M2y = %.2f ± %.2f\n" % (M2y, M2y_std) s += "Beam waist diameter%s\n" % tag s += " d0 = %.0f ± %.0f µm\n" % (d0 * 1e6, d0_std * 1e6) s += " d0x = %.0f ± %.0f µm\n" % (d0x * 1e6, d0x_std * 1e6) s += " d0y = %.0f ± %.0f µm\n" % (d0y * 1e6, d0y_std * 1e6) s += "Beam waist location%s\n" % tag s += " z0 = %.0f ± %.0f mm\n" % (z0 * 1e3, z0_std * 1e3) s += " z0x = %.0f ± %.0f mm\n" % (z0x * 1e3, z0x_std * 1e3) s += " z0y = %.0f ± %.0f mm\n" % (z0y * 1e3, z0y_std * 1e3) s += "Rayleigh Length%s\n" % tag s += " zR = %.0f ± %.0f mm\n" % (zR * 1e3, zR_std * 1e3) s += " zRx = %.0f ± %.0f mm\n" % (zRx * 1e3, zRx_std * 1e3) s += " zRy = %.0f ± %.0f mm\n" % (zRy * 1e3, zRy_std * 1e3) s += "Divergence Angle%s\n" % tag s += " theta = %.2f ± %.2f milliradians\n" % (Theta * 1e3, Theta_std * 1e3) s += " theta_x = %.2f ± %.2f milliradians\n" % (Thetax * 1e3, Thetax_std * 1e3) s += " theta_y = %.2f ± %.2f milliradians\n" % (Thetay * 1e3, Thetay_std * 1e3) s += "Beam parameter product%s\n" % tag s += " BPP = %.2f ± %.2f mm * mrad\n" % (BPP * 1e6, BPP_std * 1e6) s += " BPP_x = %.2f ± %.2f mm * mrad\n" % (BPPx * 1e6, BPPx_std * 1e6) s += " BPP_y = %.2f ± %.2f mm * mrad\n" % (BPPy * 1e6, BPPy_std * 1e6) if f is None: return s # needs to be completed x2 = z0x - f y2 = z0y - f r2 = z0 - f Vx = f / np.sqrt(zRx**2 + x2**2) Vy = f / np.sqrt(zRy**2 + y2**2) V = f / np.sqrt(zR**2 + r2**2) d0x *= Vx d0y *= Vy d0 *= V z0x = Vx**2 * x2 + f z0y = Vy**2 * y2 + f z0 = V**2 * r2 + f return s def _fit_plot(z, d, lambda0, strict=False, z0=None, d0=None): """ Plot beam diameters and ISO 11146 fit. Args: z: array of axial position of beam measurements [m] d: array of beam diameters [m] lambda0: wavelength of the laser [m] Returns: residuals: array with differences between fit and data z0: location of focus zR: Rayleigh distance for beam """ params, errors, used = M2_fit(z, d, lambda0, strict=strict, z0=z0, d0=d0) unused = np.logical_not(used) d0, z0, Theta, M2, zR = params d0_std, z0_std, Theta_std, M2_std, zR_std = errors # fitted line zmin = min(np.min(z), z0 - 4 * zR) zmax = max(np.max(z), z0 + 4 * zR) # plt.xlim(zmin, zmax) z_fit = np.linspace(zmin, zmax) # d_fit = np.sqrt(d0**2 + (Theta * (z_fit - z0))**2) # plt.plot(z_fit * 1e3, d_fit * 1e6, ':k') d_fit_lo = np.sqrt((d0 - d0_std)**2 + ((Theta - Theta_std) * (z_fit - z0))**2) d_fit_hi = np.sqrt((d0 + d0_std)**2 + ((Theta + Theta_std) * (z_fit - z0))**2) plt.fill_between(z_fit * 1e3, d_fit_lo * 1e6, d_fit_hi * 1e6, color='red', alpha=0.5) # show perfect gaussian caustic when unphysical M2 arises if M2 < 1: Theta00 = 4 * lambda0 / (np.pi * d0) d_00 = np.sqrt(d0**2 + (Theta00 * (z_fit - z0))**2) plt.plot(z_fit * 1e3, d_00 * 1e6, ':k', lw=2, label="M²=1") plt.legend(loc="lower right") plt.fill_between(z_fit * 1e3, d_fit_lo * 1e6, d_fit_hi * 1e6, color='red', alpha=0.5) # data points plt.plot(z[used] * 1e3, d[used] * 1e6, 'o', color='black', label='used') plt.plot(z[unused] * 1e3, d[unused] * 1e6, 'ok', mfc='none', label='unused') plt.xlabel('') plt.ylabel('') tax = plt.gca().transAxes plt.text(0.05, 0.30, '$M^2$ = %.2f±%.2f ' % (M2, M2_std), transform=tax) plt.text(0.05, 0.25, '$d_0$ = %.0f±%.0f µm' % (d0 * 1e6, d0_std * 1e6), transform=tax) plt.text(0.05, 0.15, '$z_0$ = %.0f±%.0f mm' % (z0 * 1e3, z0_std * 1e3), transform=tax) plt.text(0.05, 0.10, '$z_R$ = %.0f±%.0f mm' % (zR * 1e3, zR_std * 1e3), transform=tax) Theta_ = Theta * 1e3 Theta_std_ = Theta_std * 1e3 plt.text(0.05, 0.05, r'$\Theta$ = %.2f±%.2f mrad' % (Theta_, Theta_std_), transform=tax) plt.axvline(z0 * 1e3, color='black', lw=1) plt.axvspan((z0 - zR) * 1e3, (z0 + zR) * 1e3, color='cyan', alpha=0.3) plt.axvspan((z0 - 2 * zR) * 1e3, (zmin) * 1e3, color='cyan', alpha=0.3) plt.axvspan((z0 + 2 * zR) * 1e3, (zmax) * 1e3, color='cyan', alpha=0.3) # plt.axhline(d0 * 1e6, color='black', lw=1) # plt.axhspan((d0 + d0_std) * 1e6, (d0 - d0_std) * 1e6, color='red', alpha=0.1) plt.title(r'$d^2(z) = d_0^2 + \Theta^2 (z - z_0)^2$') if sum(z[unused]) > 0: plt.legend(loc='upper right') residuals = d - np.sqrt(d0**2 + (Theta * (z - z0))**2) return residuals, z0, zR, used def _M2_diameter_plot(z, d, lambda0, strict=False, z0=None, d0=None): """ Plot the fitted beam and the residuals. Args: z: array of axial position of beam measurements [m] d: array of beam diameters [m] lambda0: wavelength of the laser [m] Returns: nothing """ fig = plt.figure(1, figsize=(12, 8)) gs = matplotlib.gridspec.GridSpec(2, 1, height_ratios=[6, 2]) fig.add_subplot(gs[0]) residualsx, z0, zR, used = _fit_plot(z, d, lambda0, strict=strict, z0=z0, d0=d0) unused = np.logical_not(used) zmin = min(np.min(z), z0 - 4 * zR) zmax = max(np.max(z), z0 + 4 * zR) plt.ylabel('beam diameter (µm)') plt.ylim(0, 1.1 * max(d) * 1e6) fig.add_subplot(gs[1]) plt.plot(z * 1e3, residualsx * 1e6, "ro") plt.plot(z[used] * 1e3, residualsx[used] * 1e6, 'ok', label='used') plt.plot(z[unused] * 1e3, residualsx[unused] * 1e6, 'ok', mfc='none', label='unused') plt.axhline(color="gray", zorder=-1) plt.xlabel('axial position $z$ (mm)') plt.ylabel('residuals (µm)') plt.axvspan((z0 - zR) * 1e3, (z0 + zR) * 1e3, color='cyan', alpha=0.3) plt.axvspan((z0 - 2 * zR) * 1e3, (zmin) * 1e3, color='cyan', alpha=0.3) plt.axvspan((z0 + 2 * zR) * 1e3, (zmax) * 1e3, color='cyan', alpha=0.3) def M2_diameter_plot(z, dx, lambda0, dy=None, strict=False, z0=None, d0=None): """ Plot the semi-major and semi-minor beam fits and residuals. Example:: >>>> import numpy as np >>>> import laserbeamsize as lbs >>>> lambda0 = 632.8e-9 # meters >>>> z = np.array([168, 210, 280, 348, 414, 480, 495, 510, 520, 580, 666, 770]) >>>> r = np.array([597, 572, 547, 554, 479, 403, 415, 400, 377, 391, 326, 397]) >>>> lbs.M2_diameter_plot(z * 1e-3, 2 * r * 1e-6, lambda0) >>>> plt.show() Args: z: array of axial position of beam measurements [m] lambda0: wavelength of the laser [m] dx: array of beam diameters [m] Returns: nothing """ if dy is None: _M2_diameter_plot(z, dx, lambda0, strict=strict, z0=z0, d0=d0) return ymax = 1.1 * max(np.max(dx), np.max(dy)) * 1e6 # Create figure window to plot data fig = plt.figure(1, figsize=(12, 8)) gs = matplotlib.gridspec.GridSpec(2, 2, height_ratios=[6, 2]) # semi-major axis plot fig.add_subplot(gs[0, 0]) residualsx, z0x, zR, used = _fit_plot(z, dx, lambda0, strict=strict, z0=z0, d0=d0) zmin = min(np.min(z), z0x - 4 * zR) zmax = max(np.max(z), z0x + 4 * zR) unused = np.logical_not(used) plt.ylabel('beam diameter (µm)') plt.title('Semi-major Axis Diameters') plt.ylim(0, ymax) # semi-major residuals fig.add_subplot(gs[1, 0]) ax = plt.gca() plt.plot(z[used] * 1e3, residualsx[used] * 1e6, 'ok', label='used') plt.plot(z[unused] * 1e3, residualsx[unused] * 1e6, 'ok', mfc='none', label='unused') plt.axhline(color="gray", zorder=-1) plt.xlabel('axial position $z$ (mm)') plt.ylabel('residuals (µm)') plt.axvspan((z0x - zR) * 1e3, (z0x + zR) * 1e3, color='cyan', alpha=0.3) plt.axvspan((z0x - 2 * zR) * 1e3, (zmin) * 1e3, color='cyan', alpha=0.3) plt.axvspan((z0x + 2 * zR) * 1e3, (zmax) * 1e3, color='cyan', alpha=0.3) # semi-minor axis plot fig.add_subplot(gs[0, 1]) residualsy, z0y, zR, used = _fit_plot(z, dy, lambda0, strict=strict, z0=z0, d0=d0) unused = np.logical_not(used) plt.title('Semi-minor Axis Diameters') plt.ylim(0, ymax) ymax = max(np.max(residualsx), np.max(residualsy)) * 1e6 ymin = min(np.min(residualsx), np.min(residualsy)) * 1e6 ax.set_ylim(ymin, ymax) # semi-minor residuals fig.add_subplot(gs[1, 1]) plt.plot(z[used] * 1e3, residualsy[used] * 1e6, 'ok', label='used') plt.plot(z[unused] * 1e3, residualsy[unused] * 1e6, 'ok', mfc='none', label='unused') plt.axhline(color="gray", zorder=-1) plt.xlabel('axial position $z$ (mm)') plt.ylabel('') plt.axvspan((z0y - zR) * 1e3, (z0y + zR) * 1e3, color='cyan', alpha=0.3) plt.axvspan((z0y - 2 * zR) * 1e3, (zmin) * 1e3, color='cyan', alpha=0.3) plt.axvspan((z0y + 2 * zR) * 1e3, (zmax) * 1e3, color='cyan', alpha=0.3) plt.ylim(ymin, ymax) def M2_radius_plot(z, d, lambda0, strict=False, z0=None, d0=None): """ Plot radii, beam fits, and asymptotes. Example:: >>>> import numpy as np >>>> import laserbeamsize as lbs >>>> lambda0 = 632.8e-9 # meters >>>> z = np.array([168, 210, 280, 348, 414, 480, 495, 510, 520, 580, 666, 770]) >>>> r = np.array([597, 572, 547, 554, 479, 403, 415, 400, 377, 391, 326, 397]) >>>> lbs.M2_radius_plot(z * 1e-3, 2 * r * 1e-6, lambda0) >>>> plt.show() Args: z: array of axial position of beam measurements [m] d: array of beam diameters [m] lambda0: wavelength of the laser [m] Returns: nothing """ params, errors, used = M2_fit(z, d, lambda0, strict=strict, z0=z0, d0=d0) unused = np.logical_not(used) d0, z0, Theta, M2, zR = params d0_std, _, Theta_std, M2_std, _ = errors plt.figure(1, figsize=(12, 8)) # fitted line zmin = min(np.min(z - z0), -4 * zR) * 1.05 + z0 zmax = max(np.max(z - z0), +4 * zR) * 1.05 + z0 plt.xlim((zmin - z0) * 1e3, (zmax - z0) * 1e3) z_fit = np.linspace(zmin, zmax) d_fit = np.sqrt(d0**2 + (Theta * (z_fit - z0))**2) # plt.plot((z_fit - z0) * 1e3, d_fit * 1e6 / 2, ':r') # plt.plot((z_fit - z0) * 1e3, -d_fit * 1e6 / 2, ':r') d_fit_lo = np.sqrt((d0 - d0_std)**2 + ((Theta - Theta_std) * (z_fit - z0))**2) d_fit_hi = np.sqrt((d0 + d0_std)**2 + ((Theta + Theta_std) * (z_fit - z0))**2) # asymptotes r_left = -(z0 - zmin) * np.tan(Theta / 2) * 1e6 r_right = (zmax - z0) * np.tan(Theta / 2) * 1e6 plt.plot([(zmin - z0) * 1e3, (zmax - z0) * 1e3], [r_left, r_right], '--b') plt.plot([(zmin - z0) * 1e3, (zmax - z0) * 1e3], [-r_left, -r_right], '--b') # xticks along top axis ticks = [(i * zR) * 1e3 for i in range(int((zmin - z0) / zR), int((zmax - z0) / zR) + 1)] ticklabels1 = ["%.0f" % (z + z0 * 1e3) for z in ticks] ticklabels2 = [] for i in range(int((zmin - z0) / zR), int((zmax - z0) / zR) + 1): if i == 0: ticklabels2 = np.append(ticklabels2, "0") elif i == -1: ticklabels2 = np.append(ticklabels2, r"-$z_R$") elif i == 1: ticklabels2 = np.append(ticklabels2, r"$z_R$") else: ticklabels2 = np.append(ticklabels2, r"%d$z_R$" % i) ax1 = plt.gca() ax2 = ax1.twiny() ax1.set_xticks(ticks) if len(ticks) > 10: ax1.set_xticklabels(ticklabels1, fontsize=14, rotation=90) else: ax1.set_xticklabels(ticklabels1, fontsize=14) ax2.set_xbound(ax1.get_xbound()) ax2.set_xticks(ticks) if len(ticks) > 10: ax2.set_xticklabels(ticklabels2, fontsize=14, rotation=90) else: ax2.set_xticklabels(ticklabels2, fontsize=14) # usual labels for graph ax1.set_xlabel('Axial Location (mm)', fontsize=14) ax1.set_ylabel('Beam radius (µm)', fontsize=14) title = r'$w_0=d_0/2$=%.0f±%.0fµm, ' % (d0 / 2 * 1e6, d0_std / 2 * 1e6) title += r'$M^2$ = %.2f±%.2f, ' % (M2, M2_std) title += r'$\lambda$=%.0f nm' % (lambda0 * 1e9) plt.title(title, fontsize=16) # show the divergence angle s = r'$\Theta$ = %.2f±%.2f mrad' % (Theta * 1e3, Theta_std * 1e3) plt.text(2 * zR * 1e3, 0, s, ha='left', va='center', fontsize=16) arc_x = 1.5 * zR * 1e3 arc_y = 1.5 * zR * np.tan(Theta / 2) * 1e6 plt.annotate('', (arc_x, -arc_y), (arc_x, arc_y), arrowprops=dict(arrowstyle="<->", connectionstyle="arc3, rad=-0.2")) # show the Rayleigh ranges ymin = max(max(d_fit), max(d)) ymin *= -1 / 2 * 1e6 plt.text(0, ymin, '$-z_R<z-z_0<z_R$', ha='center', va='bottom', fontsize=16) x = (zmax - z0 + 2 * zR) / 2 * 1e3 plt.text(x, ymin, '$2z_R < z-z_0$', ha='center', va='bottom', fontsize=16) x = (zmin - z0 - 2 * zR) / 2 * 1e3 plt.text(x, ymin, '$z-z_0 < -2z_R$', ha='center', va='bottom', fontsize=16) ax1.axvspan((-zR) * 1e3, (+zR) * 1e3, color='cyan', alpha=0.3) ax1.axvspan((-2 * zR) * 1e3, (zmin - z0) * 1e3, color='cyan', alpha=0.3) ax1.axvspan((+2 * zR) * 1e3, (zmax - z0) * 1e3, color='cyan', alpha=0.3) # show the fit zz = (z_fit - z0) * 1e3 lo = d_fit_lo * 1e6 / 2 hi = d_fit_hi * 1e6 / 2 ax1.fill_between(zz, lo, hi, color='red', alpha=0.5) ax1.fill_between(zz, -lo, -hi, color='red', alpha=0.5) # show perfect gaussian caustic when unphysical M2 arises if M2 < 1: Theta00 = 4 * lambda0 / (np.pi * d0) r_00 = np.sqrt(d0**2 + (Theta00 * zz * 1e-3)**2) / 2 * 1e6 plt.plot(zz, r_00, ':k', lw=2, label="M²=1") plt.plot(zz, -r_00, ':k', lw=2) plt.legend(loc="lower right") # data points ax1.plot((z[used] - z0) * 1e3, d[used] * 1e6 / 2, 'ok', label='used') ax1.plot((z[used] - z0) * 1e3, -d[used] * 1e6 / 2, 'ok') ax1.plot((z[unused] - z0) * 1e3, d[unused] * 1e6 / 2, 'ok', mfc='none', label='unused') ax1.plot((z[unused] - z0) * 1e3, -d[unused] * 1e6 / 2, 'ok', mfc='none') if sum(z[unused]) > 0: ax1.legend(loc='center left') def M2_focus_plot(w0, lambda0, f, z0, M2=1): """ Plot a beam from its waist through a lens to its focus. After calling this, use `plt.show()` to display the plot. The lens is at `z=0` with respect to the beam waist. All distances to the left of the lens are negative and those to the right are positive. The beam has a waist at `z0`. If the beam waist is at the front focal plane of the lens then `z0=-f`. Args: w0: beam radius at waist [m] lambda0: wavelength of beam [m] f: focal length of lens [m] z0: location of beam waist [m] M2: beam propagation factor [-] Returns: nothing. """ # plot the beam from just before the waist to the lens left = 1.1 * z0 z = np.linspace(left, 0) r = beam_radius(w0, lambda0, z, z0=z0, M2=M2) plt.fill_between(z * 1e3, -r * 1e6, r * 1e6, color='red', alpha=0.2) # find the gaussian beam parameters for the beam after the lens w0_after = w0 * magnification(w0, lambda0, z0, f, M2=M2) z0_after = image_distance(w0, lambda0, z0, f, M2=M2) zR_after = z_rayleigh(w0_after, lambda0, M2) # plot the beam after the lens right = max(2 * f, z0_after + 4 * zR_after) z_after = np.linspace(0, right) r_after = beam_radius(w0_after, lambda0, z_after, z0=z0_after, M2=M2) # plt.axhline(w0_after * 1.41e6) plt.fill_between(z_after * 1e3, -r_after * 1e6, r_after * 1e6, color='red', alpha=0.2) # locate the lens and the two beam waists plt.axhline(0, color='black', lw=1) plt.axvline(0, color='black') plt.axvline(z0 * 1e3, color='black', linestyle=':') plt.axvline(z0_after * 1e3, color='black', linestyle=':') # finally, show the ±1 Rayleigh distance zRmin = max(0, (z0_after - zR_after)) * 1e3 zRmax = (z0_after + zR_after) * 1e3 plt.axvspan(zRmin, zRmax, color='blue', alpha=0.1) plt.xlabel('Axial Position Relative to Lens (mm)') plt.ylabel('Beam Radius (microns)') title = "$w_0$=%.0fµm, $z_0$=%.0fmm, " % (w0 * 1e6, z0 * 1e3) title += "$w_0'$=%.0fµm, $z_0'$=%.0fmm, " % (w0_after * 1e6, z0_after * 1e3) title += "$z_R'$=%.0fmm" % (zR_after * 1e3) plt.title(title)
python
''' 09.60 - Use the 8x8 LED Matrix with the max7219 driver using SPI This sketch shows how to control the 8x8 LED Matrix to draw random pixels. Components ---------- - ESP32 - One or more 8x8 LED matrix displays with the max7219 driver - GND --> GND - VCC --> 5V - CS --> GPIO 5 (SPI SS) - CLK --> GPIO 18 (SPI SCK) - DIN --> GPIO 23 (SPI MOSI) - Wires - Breadboard Documentation: * Pins and GPIO: https://micropython-docs-esp32.readthedocs.io/en/esp32_doc/esp32/quickref.html#pins-and-gpio * sleep: http://docs.micropython.org/en/latest/library/utime.html?highlight=utime%20sleep#utime.sleep * SPI (hardware): https://docs.micropython.org/en/latest/esp32/quickref.html#hardware-spi-bus * max7219: https://github.com/mcauser/micropython-max7219 * random function: https://docs.python.org/3/library/random.html Course: MicroPython with the ESP32 https://techexplorations.com ''' import max7219 from machine import Pin, SPI from utime import sleep_ms from random import * #spi = SPI(2, baudrate=10000000, polarity=1, phase=0, sck=Pin(18), mosi=Pin(23)) spi = SPI(2, 10000000, sck=Pin(18), mosi=Pin(23)) ss = Pin(5, Pin.OUT) display = max7219.Matrix8x8(spi, ss, 4) display.fill(0) display.brightness(5) while True: for x in range(10): display.pixel(randint(0, 31), randint(0, 7),1) display.show() sleep_ms(15) display.fill(0)
python
#!/usr/bin/python from setuptools import setup, find_packages version = '0.9.4' setup(name='workerpool', version=version, description="Module for distributing jobs to a pool of worker threads.", long_description="""\ Performing tasks in many threads made fun! This module facilitates distributing simple operations into jobs that are sent to worker threads, maintained by a pool object. It consists of these components: 1. Jobs, which are single units of work that need to be performed. 2. Workers, who grab jobs from a queue and perform them. 3. Worker pool, which keeps track of workers and the job queue. """, # Strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ 'Intended Audience :: Developers', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Topic :: Software Development :: Libraries :: Python Modules', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', ], keywords='pooling threading jobs', author='Andrey Petrov', author_email='[email protected]', url='https://github.com/shazow/workerpool', license='MIT', packages=find_packages(exclude=['ez_setup', 'examples', 'tests']), include_package_data=True, zip_safe=False, install_requires=[ # -*- Extra requirements: -*- "six" ], entry_points=""" # -*- Entry points: -*- """, test_suite='nose.collector' )
python
""" For more details, see the class documentation. """ from django.db.models import Q from map_annotate_app.dto import CrimeDTO from map_annotate_app.extras import Location from map_annotate_app.models import Crime class CrimeDAO: """ This class represents the data access layer for a crime record. """ def __init__(self): pass @staticmethod def get_crime_list(crime_filter): """ Gets crimes which pass the filter from database. C{crime_filter} is an object of class C{CrimeFilter} which is used to filter out the crimes. Returns a list of C{CrimeDTO} objects which satisfy the C{crime_filter} """ return_list = [] crime_obj = Crime.objects if len(crime_filter.type_id_list) > 0: filter_type_parameter = Q(type_id=crime_filter.type_id_list[0]) for type_id in crime_filter.type_id_list[1:]: filter_type_parameter = filter_type_parameter | Q(type_id=type_id) crime_obj = crime_obj.filter(filter_type_parameter) if crime_filter.north_east and crime_filter.south_west: # TODO: May cause errors when longitude varies from +180 to -180 crime_obj = crime_obj.filter(location__lat__lte=crime_filter.north_east.lat, location__lat__gte=crime_filter.south_west.lat, location__lng__lte=crime_filter.north_east.lng, location__lng__gte=crime_filter.south_west.lng, ) if crime_filter.dateFrom: crime_obj = crime_obj.filter(timestamp__gte=crime_filter.dateFrom) if crime_filter.dateTo: crime_obj = crime_obj.filter(timestamp__lte=crime_filter.dateTo) result_set = crime_obj.select_related('location', 'type').all() for each in result_set: crime_data_dto = CrimeDTO.CrimeDTO() crime_data_dto.type = str(each.type.crime_type) # crime_data_dto.type = "mobile theft" crime_data_dto.fir_no = "\"" + str(each.fir_number) + "\"" crime_data_dto.location = Location.Location(each.location.lat, each.location.lng) # crime_data_dto.location = Location.Location(23, 45) crime_data_dto.timestamp = each.timestamp.strftime("%d %B, %Y, %H:%M") crime_data_dto.url_link = "http://www.zipnet.in" return_list.append(crime_data_dto) # return_list.append(Pin.Pin(crime_data_dto.location, [crime_data_dto], [], [])) return return_list
python
""" $url mediavitrina.ru $type live $region Russia """ import logging import re from urllib.parse import urlparse from streamlink.plugin import Plugin, pluginmatcher from streamlink.plugin.api import validate from streamlink.stream.hls import HLSStream from streamlink.utils.url import update_qsd log = logging.getLogger(__name__) @pluginmatcher(re.compile(r"""https?://(?:www\.)?(?: 5-tv | chetv | ctc(?:love)? | domashniy )\.ru/(?:live|online)""", re.VERBOSE)) @pluginmatcher(re.compile(r"https?://ren\.tv/live")) @pluginmatcher(re.compile(r"https?://player\.mediavitrina\.ru/.+/player\.html")) class MediaVitrina(Plugin): _re_url_json = re.compile(r"https://media\.mediavitrina\.ru/(?:proxy)?api/v2/\w+/playlist/[\w-]+_as_array\.json") def _get_streams(self): self.session.http.headers.update({"Referer": self.url}) p_netloc = urlparse(self.url).netloc if p_netloc == "player.mediavitrina.ru": # https://player.mediavitrina.ru/ url_player = self.url elif p_netloc.endswith("ctc.ru"): # https://ctc.ru/online/ url_player = self.session.http.get( "https://ctc.ru/api/page/v1/online/", schema=validate.Schema( validate.parse_json(), {"content": validate.all( [dict], validate.filter(lambda n: n.get("type") == "on-air"), [{"onAirLink": validate.url(netloc="player.mediavitrina.ru")}], validate.get((0, "onAirLink")) )}, validate.get("content") ) ) else: # https://chetv.ru/online/ # https://ctclove.ru/online/ # https://domashniy.ru/online/ # https://ren.tv/live # https://www.5-tv.ru/online/ url_player = self.session.http.get(self.url, schema=validate.Schema( validate.parse_html(), validate.xml_xpath_string(".//iframe[starts-with(@src,'https://player.mediavitrina.ru/')]/@src"), )) if not url_player: return log.debug(f"url_player={url_player}") script_data = self.session.http.get(url_player, schema=validate.Schema( validate.parse_html(), validate.xml_xpath_string(".//script[contains(text(),'media.mediavitrina.ru/')]/text()"), )) if not script_data: log.debug("invalid script_data") return m = self._re_url_json.search(script_data) if not m: log.debug("invalid url_json") return url_json = m.group(0) log.debug(f"url_json={url_json}") res_token = self.session.http.get( "https://media.mediavitrina.ru/get_token", schema=validate.Schema( validate.parse_json(), {"result": {"token": str}}, validate.get("result"), )) url = self.session.http.get( update_qsd(url_json, qsd=res_token), schema=validate.Schema( validate.parse_json(), {"hls": [validate.url()]}, validate.get(("hls", 0)), )) if not url: return if "georestrictions" in url: log.error("Stream is geo-restricted") return return HLSStream.parse_variant_playlist(self.session, url, name_fmt="{pixels}_{bitrate}") __plugin__ = MediaVitrina
python
#!/usr/bin/env python from __future__ import absolute_import, division, print_function import sys from socket import * from time import strftime import datetime def main(): if len(sys.argv) < 4: print("completion_logger_server.py <listen address> <listen port> <log file>") exit(1) host = sys.argv[1] port = int(sys.argv[2]) buf = 1024 * 8 addr = (host,port) # Create socket and bind to address UDPSock = socket(AF_INET,SOCK_DGRAM) UDPSock.bind(addr) print("Listing on {0}:{1} and logging to '{2}'".format(host, port, sys.argv[3])) # Open the logging file. f = open(sys.argv[3], "a") # Receive messages while 1: data,addr = UDPSock.recvfrom(buf) if not data: break else: f.write("{ "); f.write("\"time\": \"{0}\"".format(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'))) f.write(", \"sender\": \"{0}\" ".format(addr[0])) f.write(", \"data\": ") f.write(data) f.write(" }\n") f.flush() # Close socket UDPSock.close() if __name__ == '__main__': main()
python
#!/usr/bin/python """ python 1.5.2 lacks some networking routines. This module implements them (as I don't want to drop 1.5.2 compatibility atm) """ # $Id: net.py,v 1.2 2001/11/19 00:47:49 ivo Exp $ from string import split import socket, fcntl, FCNTL def inet_aton(str): """ convert quated dot notation to a int python 2.x's inet_aton returns a string containing the network representation. This is according to the C inet_aton """ n = 0 quads = split(str, ".") if len(quads) != 4: raise socket.error, "illegal IP address string passed to inet_aton" for i in quads: try: j = int(i) if not(0 <= j <= 255): raise socket.error, \ "illegal IP address string passed to inet_aton" except ValueError: raise socket.error, "illegal IP address string passed to inet_aton" n = (int(quads[0]) << 24) + (int(quads[1]) << 16) + \ (int(quads[2]) << 8) + int(quads[3]) return n def inet_ntoa(addr): """ Do the reverse of inet_aton, return the quad notation of 'addr' which is a long containing the network address """ quad = [0,0,0,0] for i in (0,1,2,3): quad[i] = (addr >> (8*(3-i))) & 0xFF return "%u.%u.%u.%u" % tuple(quad) def make_nonblocking(fd): fl = fcntl.fcntl(fd, FCNTL.F_GETFL) try: fcntl.fcntl(fd, FCNTL.F_SETFL, fl | FCNTL.O_NDELAY) except AttributeError: fcntl.fcntl(fd, FCNTL.F_SETFL, fl | FCNTL.FNDELAY) if __name__ == '__main__': print "Testing inet_aton" for i in ('0.0.0.0', '127.0.0.1', '255.255.255.255', '10.0.0.1'): print "%s -> %lu" % (i, inet_aton(i)) print "The following wil fail" for i in ('0.0.0.0.0', '127.0.0', '256.255.255.255', 'www.amaze.nl'): try: print "%s -> %lu" % (i, inet_aton(i)) except socket.error: print "Could not translate %s" % i print "Testing inet_ntoa" for i in ('0.0.0.0', '127.0.0.1', '255.255.255.255', '10.0.0.1'): print "%s -> %s" % (i, inet_ntoa(inet_aton(i)))
python
""" this is for pytest to import everything smoothly """ import os import sys sys.path.append(os.path.dirname(os.path.realpath(__file__)))
python
from fastapi import FastAPI from fastapi.responses import JSONResponse app = FastAPI() store = { 'demo': 'this is important data!' } @app.get('/') # Return all key-value pairs def read_keys(): return store @app.post('/') # Create a new key-value pair def create_key(key: str, value: str): store[key] = value return {key: store[key]}
python
import pytest import logging import tempfile from lindh import jsondb # Logging FORMAT = '%(asctime)s [%(threadName)s] %(filename)s +%(levelno)s ' + \ '%(funcName)s %(levelname)s %(message)s' logging.basicConfig(format=FORMAT, level=logging.DEBUG) @pytest.fixture(scope='function') def db(): db = jsondb.Database(root=tempfile.mkdtemp(prefix='jsondb-')) yield db db.destroy() def test_init(db): assert db is not None def test_save(db): o = db.save({'a': 1}) assert '_id' in o.keys() assert o['_id'] is not None assert db.has(o['_id']) def test_get(db): o = db.save({'a': 1}) new_id = o['_id'] assert new_id is not None o = db.get(new_id) assert o is not None assert o['a'] == 1 assert '_id' in o.keys() assert o['_id'] == new_id assert '_rev' in o.keys() def test_get_2(db): o1 = db.save({'a': 1}) new_id_1 = o1['_id'] assert new_id_1 is not None o2 = db.save({'b': 2}) new_id_2 = o2['_id'] assert new_id_2 is not None o1 = db.get(new_id_1) assert o1 is not None assert o1['a'] == 1 assert '_id' in o1.keys() assert o1['_id'] == new_id_1 assert '_rev' in o1.keys() o2 = db.get(new_id_2) assert o2 is not None assert o2['b'] == 2 assert '_id' in o2.keys() assert o2['_id'] == new_id_2 assert '_rev' in o2.keys() def test_delete(db): o = db.save({'a': 1}) new_id = o['_id'] assert new_id is not None db.delete(new_id) assert not db.has(new_id) def test_update(db): o = db.save({'a': 1}) new_id = o['_id'] first_rev = o['_rev'] assert first_rev is not None assert new_id is not None o['a'] = 2 o = db.save(o) assert o['a'] == 2 second_rev = o['_rev'] assert second_rev is not None assert first_rev != second_rev o = db.get(new_id) assert o['a'] == 2 assert o['_rev'] == second_rev def test_view_just_save(db): db.define('b_by_a', lambda o: (o['a'], o['b'])) db.save({'a': 2, 'b': 22}) db.save({'a': 3, 'b': 33}) db.save({'a': 1, 'b': 11}) r = db.view('b_by_a') r = list(r) assert len(r) == 3 assert r[0] == {'id': 2, 'key': 1, 'value': 11} assert r[1] == {'id': 0, 'key': 2, 'value': 22} assert r[2] == {'id': 1, 'key': 3, 'value': 33} def test_view_save_and_update_value(db): db.define('b_by_a', lambda o: (o['a'], o['b'])) db.save({'a': 2, 'b': 22}) db.save({'a': 3, 'b': 33}) o1 = db.save({'a': 1, 'b': 11}) o1['b'] = 1111 db.save(o1) r = db.view('b_by_a') r = list(r) assert len(r) == 3 assert r[0] == {'id': 2, 'key': 1, 'value': 1111} assert r[1] == {'id': 0, 'key': 2, 'value': 22} assert r[2] == {'id': 1, 'key': 3, 'value': 33} def test_view_save_and_delete(db): db.define('b_by_a', lambda o: (o['a'], o['b'])) o2 = db.save({'a': 2, 'b': 22}) db.save({'a': 3, 'b': 33}) db.save({'a': 1, 'b': 11}) db.delete(o2['_id']) r = db.view('b_by_a') r = list(r) assert len(r) == 2 assert r[0] == {'id': 2, 'key': 1, 'value': 11} assert r[1] == {'id': 1, 'key': 3, 'value': 33} def test_view_kickstart(db): db.save({'a': 2, 'b': 22}) db.save({'a': 3, 'b': 33}) db.save({'a': 1, 'b': 11}) db.define('b_by_a', lambda o: (o['a'], o['b'])) r = db.view('b_by_a') r = list(r) assert len(r) == 3 assert r[0] == {'id': 2, 'key': 1, 'value': 11} assert r[1] == {'id': 0, 'key': 2, 'value': 22} assert r[2] == {'id': 1, 'key': 3, 'value': 33} def test_view_by_key(db): db.save({'a': 2, 'b': 22}) db.save({'a': 3, 'b': 33}) db.save({'a': 1, 'b': 11}) db.define('b_by_a', lambda o: (o['a'], o['b'])) r = list(db.view('b_by_a', key=2)) assert len(r) == 1 assert r[0] == {'id': 0, 'key': 2, 'value': 22} def test_view_by_key_string(db): db.save({'a': '2', 'b': 22}) db.save({'a': '3', 'b': 33}) db.save({'a': '1', 'b': 11}) db.define('b_by_a', lambda o: (o['a'], o['b'])) r = list(db.view('b_by_a', key='2')) assert len(r) == 1 assert r[0] == {'id': 0, 'key': '2', 'value': 22} def test_view_by_key_two_values_same_key_before(db): db.define('b_by_a', lambda o: (o['a'], o['b'])) db.save({'a': 2, 'b': 22}) db.save({'a': 3, 'b': 33}) db.save({'a': 1, 'b': 11}) db.save({'a': 2, 'b': 44}) r = list(db.view('b_by_a', key=2)) assert len(r) == 2 assert r[0] == {'id': 0, 'key': 2, 'value': 22} assert r[1] == {'id': 3, 'key': 2, 'value': 44} def test_view_by_key_two_values_same_key_after(db): db.save({'a': 2, 'b': 22}) db.save({'a': 3, 'b': 33}) db.save({'a': 1, 'b': 11}) db.save({'a': 2, 'b': 44}) db.define('b_by_a', lambda o: (o['a'], o['b'])) r = list(db.view('b_by_a', key=2)) assert len(r) == 2 assert r[0] == {'id': 0, 'key': 2, 'value': 22} assert r[1] == {'id': 3, 'key': 2, 'value': 44} def test_view_by_startkey(db): db.save({'a': 2, 'b': 22}) db.save({'a': 3, 'b': 33}) db.save({'a': 1, 'b': 11}) db.define('b_by_a', lambda o: (o['a'], o['b'])) r = list(db.view('b_by_a', startkey=2)) assert len(r) == 2 assert r[0] == {'id': 0, 'key': 2, 'value': 22} assert r[1] == {'id': 1, 'key': 3, 'value': 33} def test_view_by_startkey_after(db): db.save({'a': 3, 'b': 33}) db.save({'a': 4, 'b': 44}) db.save({'a': 1, 'b': 11}) db.define('b_by_a', lambda o: (o['a'], o['b'])) r = list(db.view('b_by_a', startkey=2)) assert len(r) == 2 assert r[0] == {'id': 0, 'key': 3, 'value': 33} assert r[1] == {'id': 1, 'key': 4, 'value': 44} def test_view_by_endkey(db): db.save({'a': 2, 'b': 22}) db.save({'a': 3, 'b': 33}) db.save({'a': 1, 'b': 11}) db.define('b_by_a', lambda o: (o['a'], o['b'])) r = list(db.view('b_by_a', endkey=2)) assert len(r) == 2 assert r[0] == {'id': 2, 'key': 1, 'value': 11} assert r[1] == {'id': 0, 'key': 2, 'value': 22} def test_view_by_endkey_after(db): db.save({'a': 2, 'b': 22}) db.save({'a': 4, 'b': 44}) db.save({'a': 1, 'b': 11}) db.define('b_by_a', lambda o: (o['a'], o['b'])) r = list(db.view('b_by_a', endkey=3)) assert len(r) == 2 assert r[0] == {'id': 2, 'key': 1, 'value': 11} assert r[1] == {'id': 0, 'key': 2, 'value': 22} def test_add_with_custom_keys(db): db['a'] = {'a': 2, 'b': 22} db[1] = {'a': 3, 'b': 33} db[('a', 1)] = {'a': 1, 'b': 11} assert db['a'] == {'_id': 'a', '_rev': 0, 'a': 2, 'b': 22} assert db[1] == {'_id': 1, '_rev': 0, 'a': 3, 'b': 33} assert db[('a', 1)] == {'_id': ['a', 1], '_rev': 0, 'a': 1, 'b': 11} def test_include_docs(db): db.define('by_id', lambda o: (o['_id'], 1)) db[1] = {1: 11} db[2] = {2: 12} db[5] = {5: 15} db[7] = {7: 17} r = list(db.view('by_id', include_docs=True)) assert r[0] == {'id': 1, 'key': 1, 'value': 1, 'doc': {'_id': 1, '_rev': 0, '1': 11}} assert r[1] == {'id': 2, 'key': 2, 'value': 1, 'doc': {'_id': 2, '_rev': 0, '2': 12}} assert r[2] == {'id': 5, 'key': 5, 'value': 1, 'doc': {'_id': 5, '_rev': 0, '5': 15}} assert r[3] == {'id': 7, 'key': 7, 'value': 1, 'doc': {'_id': 7, '_rev': 0, '7': 17}} def test_yielding_mapping_function(db): def yielder(o): yield (o['a'], 1), o['b'] yield (o['a'], 2), o['b'] * 2 yield (o['a'], 3), o['b'] * 3 db.save({'a': 2, 'b': 22}) db.save({'a': 3, 'b': 33}) db.save({'a': 1, 'b': 11}) db.define('b_by_a', yielder) r = db.view('b_by_a') r = list(r) assert len(r) == 9 assert r[0] == {'id': 2, 'key': (1, 1), 'value': 11} assert r[1] == {'id': 2, 'key': (1, 2), 'value': 22} assert r[2] == {'id': 2, 'key': (1, 3), 'value': 33} assert r[3] == {'id': 0, 'key': (2, 1), 'value': 22} assert r[4] == {'id': 0, 'key': (2, 2), 'value': 44} assert r[5] == {'id': 0, 'key': (2, 3), 'value': 66} assert r[6] == {'id': 1, 'key': (3, 1), 'value': 33} assert r[7] == {'id': 1, 'key': (3, 2), 'value': 66} assert r[8] == {'id': 1, 'key': (3, 3), 'value': 99} def test_reduce_by_group(db): def sum_per(field, values): result = {} for value in values: v = value.get(field) if v in result: result[v] += 1 else: result[v] = 1 return result db.define('test', lambda o: (o['category'], {'state': o['state']}), lambda keys, values, rereduce: sum_per('state', values)) db.save({'category': 'a', 'state': 'new'}) db.save({'category': 'b', 'state': 'new'}) db.save({'category': 'a', 'state': 'old'}) db.save({'category': 'b', 'state': 'new'}) db.save({'category': 'a', 'state': 'old'}) db.save({'category': 'a', 'state': 'new'}) db.save({'category': 'c', 'state': 'new'}) db.save({'category': 'c', 'state': 'old'}) db.save({'category': 'a', 'state': 'new'}) db.save({'category': 'a', 'state': 'new'}) r = list(db.view('test', group=True)) print(r) assert r[0] == {'key': 'a', 'value': {'new': 4, 'old': 2}} assert r[1] == {'key': 'b', 'value': {'new': 2}} assert r[2] == {'key': 'c', 'value': {'new': 1, 'old': 1}} def test_skip(db): db.define('by_id', lambda o: (o['_id'], 1)) db[1] = {1: 11} db[2] = {2: 12} db[5] = {5: 15} db[7] = {7: 17} r = list(db.view('by_id', include_docs=True, skip=2)) assert r[0] == {'id': 5, 'key': 5, 'value': 1, 'doc': {'_id': 5, '_rev': 0, '5': 15}} assert r[1] == {'id': 7, 'key': 7, 'value': 1, 'doc': {'_id': 7, '_rev': 0, '7': 17}} def test_limit(db): db.define('by_id', lambda o: (o['_id'], 1)) db[1] = {1: 11} db[2] = {2: 12} db[5] = {5: 15} db[7] = {7: 17} r = list(db.view('by_id', include_docs=True, limit=2)) assert r[0] == {'id': 1, 'key': 1, 'value': 1, 'doc': {'_id': 1, '_rev': 0, '1': 11}} assert r[1] == {'id': 2, 'key': 2, 'value': 1, 'doc': {'_id': 2, '_rev': 0, '2': 12}} def test_skip_and_limit(db): db.define('by_id', lambda o: (o['_id'], 1)) db[1] = {1: 11} db[2] = {2: 12} db[5] = {5: 15} db[7] = {7: 17} r = list(db.view('by_id', include_docs=True, skip=1, limit=2)) assert r[0] == {'id': 2, 'key': 2, 'value': 1, 'doc': {'_id': 2, '_rev': 0, '2': 12}} assert r[1] == {'id': 5, 'key': 5, 'value': 1, 'doc': {'_id': 5, '_rev': 0, '5': 15}}
python
import sys import os from PIL import Image, ImageDraw # Add scripts dir to python search path sys.path.append(os.path.dirname(os.path.abspath(sys.argv[0]))) from maps_def import maps as MAPS BORDERS = True IDS = True def bake_map(tiles, info): size = tiles[0].size[0] res = Image.new("RGB", (len(info[0]) * size, len(info) * size)) z_d = ImageDraw.Draw(res) for y, line in enumerate(info): for x, tile in enumerate(line): res.paste(tiles[tile[0]].rotate(-90 * tile[1]), (x * size, (len(info) - 1) * size - y * size)) # naming if IDS: z_d.text((x * size + 10, (len(info) - 1) * size - y * size + 2), str(tile[0]), fill=(255, 0, 0)) # Tiles borders if BORDERS: for i in range(len(info)): z_d.line((0, i * size, len(info[0]) * size, i * size), fill=(0, 0, 100)) # vertical for i in range(len(info[0])): z_d.line((i * size, 0, i * size, len(info) * size), fill=(0, 0, 100)) return res def read_info(map_name): atls_cnt, y, x = MAPS[map_name.lower()][0] tmp = MAPS[map_name.lower()][1:] res = [tmp[i*x:(i+1)*x] for i in range(y)] return atls_cnt, res def read_tiles(tiles_path, map_name, tilesets_count): res = [] for i in range(tilesets_count): if not os.path.isfile(os.path.join(tiles_path, "{:}{:03d}.png".format(map_name, i))): print("No such file:", os.path.join(tiles_path, "{:}{:03d}.png".format(map_name, i))) sys.exit(-2) atlas = Image.open(os.path.join(tiles_path, "{:}{:03d}.png".format(map_name, i))).transpose(Image.FLIP_TOP_BOTTOM) t_size = atlas.size[0] // 8 frame = t_size // 8 usful = t_size * 3 // 4 for y in range(8): for x in range(8): res.append(atlas.crop((x * t_size + frame, y * t_size + frame, x * t_size + frame + usful, y * t_size + frame + usful)).transpose(Image.FLIP_TOP_BOTTOM)) return res if __name__ == "__main__": if os.environ.get("DONT_CHANGE_CWD", "0").lower() not in ("1", "yes", "true", "on"): os.chdir(os.path.dirname(os.path.abspath(sys.argv[0]))) if len(sys.argv) != 3: print("Usage: check_map map_name tiles_dir") sys.exit(0) map_name = sys.argv[1] tiles_path = sys.argv[2] if map_name.lower() not in MAPS.keys() or \ map_name not in ["BaseGipat", "bz2g", "bz3g", "bz4g", "bz5g", "bz6g", "Zone1", "Zone2", "Zone3Obr", "Zone4", "Zone6", "Zone6_2", "Zone7", "Zone8", "zone9", "ZoneMainMenuNew", "bz10k", "bz8k", "bz9k", "Zone11", "Zone12", "Zone13", "bz11k", "Zone14", "bz13h", "bz16h", "Zone15", "Zone18", "Zone19", "bz14h", "bz15h", "bz18h", "Bz7g", "Zone16", "Zone17", "Zone20", "Zone5_1", "Zone10"]: print("Unknown map:", map_name) sys.exit(-1) tilesets_count, info = read_info(map_name) tiles = read_tiles(tiles_path, map_name, tilesets_count) res = bake_map(tiles, info) res.save("map_checker.png")
python
#@+leo-ver=5-thin #@+node:edream.110203113231.741: * @file ../plugins/add_directives.py """Allows users to define new @direcives.""" from leo.core import leoGlobals as g directives = ("markup",) # A tuple with one string. #@+others #@+node:ekr.20070725103420: ** init def init(): """Return True if the plugin has loaded successfully.""" g.registerHandler("start1", addPluginDirectives) return True #@+node:edream.110203113231.742: ** addPluginDirectives def addPluginDirectives(tag, keywords): """Add all new directives to g.globalDirectiveList""" global directives for s in directives: if s.startswith('@'): s = s[1:] if s not in g.globalDirectiveList: g.globalDirectiveList.append(s) #@-others #@@language python #@@tabwidth -4 #@-leo
python
# Microsoft API results index & search features generator """ Copyright 2016 Fabric S.P.A, Emmanuel Benazera, Alexandre Girard Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os, sys import json import numpy as np import shelve import hashlib from feature_generator import FeatureGenerator from index_search import Indexer, Searcher import logging logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) class MAPIGenerator(FeatureGenerator): def __init__(self,image_files,json_files,json_emo_files,index_repo,name,description,tate=False,meta_in='',meta_out='',captions_in='',captions_out=''): self.name = name self.description = description self.tate = tate self.meta_in = meta_in self.meta_out = meta_out self.captions_in = captions_in self.captions_out = captions_out self.image_files = image_files self.json_files = json_files self.json_emo_files = json_emo_files self.index_repo = index_repo + '/' + self.name try: os.mkdir(self.index_repo) except: pass if self.captions_in == '': self.captions_in = self.index_repo + '/in_captions.bin' if self.captions_out == '': self.captions_out = self.index_repo + '/out_captions.bin' self.mapi_dominant_colors = {} self.mapi_tags = {} self.mapi_categories = {} self.mapi_people = {} self.mapi_faces = {} # face + gender + age + emotion self.mapi_captions = {} self.stm = {} self.st = shelve.open(self.index_repo + '/all_tags.bin') self.scm = {} self.sc = shelve.open(self.index_repo + '/all_cats.bin') self.emotions={'anger':0,'contempt':1,'disgust':2,'fear':3,'happiness':4,'neutral':5,'sadness':6,'surprise':7} return def __del__(self): for i,t in self.stm.iteritems(): self.st[i] = t self.st.close() for i,c in self.stm.iteritems(): self.sc[i] = t self.sc.close() # fuzzy matching of rectangles since M$ API do not return the same exact face rectangles with Vision and Emotion API... def equal_box(self,box1,box2): rtol = 0.05 if np.isclose(box1['height'],box2['height'],rtol=rtol) and np.isclose(box1['left'],box2['left'],rtol=rtol) and np.isclose(box1['top'],box2['top'],rtol=rtol) and np.isclose(box1['width'],box2['width'],rtol=rtol): return True else: return False def has_box(self,newbox,boxes): n = 0 for b in boxes: if self.equal_box(newbox['faceRectangle'],b['faceRectangle']): return n n = n + 1 return -1 def face_vector(self,fv): vec = [0.0] * 10 vec[0] = fv.get('age',-1) gender = -1 g = fv.get('gender',None) if g: if g == 'Male': gender = 1 else: gender = 2 vec[1] = gender v_emos = fv.get('emotions',None) if v_emos: for e,pos in self.emotions.iteritems(): if v_emos.get(e,None): vec[2+pos] = v_emos[e] return vec def box_hash(self,box): m = hashlib.md5() for c,v in box.iteritems(): m.update(str(v)) ha = m.hexdigest() return ha def preproc(self): ## prepare fields to be indexed: # - dominantColors # - tags (no scores) -> too generic... take top 5 and attach uniform scores # - categories + scores -> keep scores > 0.3 # - faces + age + gender + emotion (from emotion JSON / API) -> encode age + gender + emotion (8 categories) into vector if self.tate: ext = '.jpg' else: ext = '' img_bn = '' for jf in self.json_files: with open(jf,'r') as jfile: json_data = json.load(jfile) if not json_data: continue if not img_bn: jf = jf.replace('//','/') img_bn = os.path.dirname(os.path.dirname(jf)) img_name = img_bn + '/' + os.path.basename(jf).replace('_mapi.json',ext) if not img_name in self.image_files: continue if json_data.get('color',None): self.mapi_dominant_colors[img_name] = [] for c in json_data['color']['dominantColors']: self.mapi_dominant_colors[img_name].append({'cat':c,'prob':0.1}) if json_data.get('description',None): self.mapi_tags[img_name] = [] for t in json_data['description']['tags'][:5]: self.mapi_tags[img_name].append({'cat':t.replace('_',' '),'prob':0.2}) if json_data.get('categories',None): jd_cats = json_data['categories'] for c in jd_cats: self.mapi_categories[img_name] = [] if c['score'] >= 0.3: self.mapi_categories[img_name].append({'cat':c['name'].replace('_',' '),'prob':c['score']}) if json_data.get('faces',None): npeople = 0 nmales = 0 nfemales = 0 self.mapi_faces[img_name] = [] jd_faces = json_data['faces'] for jf in jd_faces: self.mapi_faces[img_name].append(jf) npeople += 1 gender = jf.get('gender',None) if gender == 'Male': nmales += 1 else: nfemales += 1 self.mapi_people[img_name] = [npeople,nmales,nfemales] #print self.mapi_people[img_name] if json_data.get('description',None): caption = json_data['description'].get('captions',None) if caption: caption = caption[0]['text'] self.mapi_captions[img_name] = caption for jf in self.json_emo_files: with open(jf,'r') as jfile: json_data = json.load(jfile) img_name = img_bn + '/' + os.path.basename(jf).replace('_mapi.json','.jpg') if not img_name in self.image_files: continue if len(json_data) == 0: continue if self.mapi_faces.get(img_name,None) == None: #print 'face detected with emotion API but not with Vision API...' self.mapi_faces[img_name] = json_data continue npeople = 0 emosum = [0.0]*len(self.emotions) for r in json_data: n = self.has_box(r,self.mapi_faces[img_name]) if n == -1: continue emo_scores = r['scores'] has_emo = False for e,c in self.emotions.iteritems(): emosum[c] += emo_scores[e] if emo_scores[e] > 0.5: if not has_emo: self.mapi_faces[img_name][n]['emotions'] = {} has_emo = True self.mapi_faces[img_name][n]['emotions'][e] = emo_scores[e] npeople = npeople + 1 if img_name in self.mapi_people: self.mapi_people[img_name] = self.mapi_people[img_name] + emosum else: self.mapi_people[img_name] = [npeople,0.0,0.0] + emosum return def index(self): ## index every variable type # - dominant colors (XXX: let's not match based on this, DNN does much better) #with Indexer(dim=1,repository=self.index_repo,db_name='colors.bin') as indexer: # for c,v in self.mapi_dominant_colors.iteritems(): # indexer.index_tags_single(v,c) # - tags #print 'indexing mapi tags...' if self.tate: with Indexer(dim=1,repository=self.index_repo,db_name='tags.bin') as indexer: for t,v in self.mapi_tags.iteritems(): indexer.index_tags_single(v,t) self.stm[t] = [] for tc in v: self.stm[t].append(tc['cat']) # - categories #print 'indexing mapi categories...' if self.tate: with Indexer(dim=1,repository=self.index_repo,db_name='cats.bin') as indexer: for t,v in self.mapi_categories.iteritems(): indexer.index_tags_single(v,t) self.scm[t] = [] for tc in v: self.scm[t].append(tc['cat']) # - number of people and gender # as a vector [npeople, males, females] if self.tate: with Indexer(dim=11,repository=self.index_repo,index_name='people.ann',db_name='people.bin') as indexer: c = 0 #print 'indexing', len(self.mapi_people),'people' for t,v in self.mapi_people.iteritems(): if len(v) < 11: v = v + [0.0]*len(self.emotions) # if no emotion detected indexer.index_single(c,v,t) c = c + 1 indexer.build_index() indexer.save_index() # - vector for age + gender + emotion + save boxes #print 'indexing mapi age, gender, emotion and boxes...' if self.tate: #c = 0 with Indexer(dim=10,repository=self.index_repo) as indexer: ldb = shelve.open(self.index_repo + '/ldata.bin') for f,v in self.mapi_faces.iteritems(): if len(v) > 0: rec = {'faceRectangles':[],'emotions':[],'genders':[],'ages':[]} for fv in v: vec = self.face_vector(fv) indexer.index_single(c,vec,f) ldb[str(c)] = (fv,f) c = c + 1 if 'age' in fv: rec['ages'].append(fv['age']) if 'emotion' in fv: rec['emotions'].append(fv['emotions']) if 'gender' in fv: rec['genders'].append(fv['gender']) if 'faceRectangle' in fv: rec['faceRectangles'].append(fv['faceRectangle']) ldb[f] = rec ldb.close() indexer.build_index() indexer.save_index() else: ldb = shelve.open(self.index_repo + '/out_ldata.bin') for f,v in self.mapi_faces.iteritems(): rec = {'faceRectangles':[],'emotions':[],'genders':[],'ages':[]} for fv in v: if 'age' in fv: rec['ages'].append(fv['age']) if 'emotions' in fv: rec['emotions'].append(fv['emotions']) if 'gender' in fv: rec['genders'].append(fv['gender']) if 'faceRectangle' in fv: rec['faceRectangles'].append(fv['faceRectangle']) #print 'indexing=',f,fv ldb[f] = rec ldb.close() # save captions dbname = '/out_captions.bin' if self.tate: dbname = '/in_captions.bin' ldb = shelve.open(self.index_repo + dbname) for i,c in self.mapi_captions.iteritems(): ldb[os.path.basename(str(i))] = c.encode('utf8') #print 'indexing',os.path.basename(str(i)),' / ',c.encode('utf8') ldb.close() return def search(self,jdataout={}): results_tags = {} with Searcher(self.index_repo,search_size=1000,db_name='tags.bin') as searcher: searcher.load_index() for t,v in self.mapi_tags.iteritems(): nns =searcher.search_tags_single(v,t) nns['tags_out_all'] = [] for nn in nns['nns_uris']: nns['tags_out_all'].append(self.st.get(str(nn),'')) results_tags[t] = nns results_tags = self.to_json(results_tags,'/img/reuters/','/img/tate/',self.name+'_tags',self.description,jdataout,self.meta_in,self.meta_out,self.captions_in,self.captions_out,mapi_in=self.index_repo + '/ldata.bin',mapi_out=self.index_repo + '/out_ldata.bin') #print 'results_tags=',results_tags results_cats = {} with Searcher(self.index_repo,search_size=1000,db_name='cats.bin') as searcher: searcher.load_index() for t,v in self.mapi_categories.iteritems(): nns =searcher.search_tags_single(v,t) nns['tags_out_all'] = [] for nn in nns['nns_uris']: nns['tags_out_all'].append(self.sc.get(str(nn),'')) results_cats[t] = nns results_tmp = self.to_json(results_cats,'/img/reuters/','/img/tate/',self.name+'_cats',self.description,results_tags,self.meta_in,self.meta_out,self.captions_in,self.captions_out,mapi_in=self.index_repo + '/ldata.bin',mapi_out=self.index_repo + '/out_ldata.bin') if not results_tmp: results_tmp = results_tags #print 'results_tmp=',results_tmp results_cats = results_tmp #results_people = {} #with Searcher(self.index_repo,search_size=200,index_name='people.ann',db_name='people.bin') as searcher: # searcher.load_index() # for f,v in self.mapi_people.iteritems(): # if len(v) < 11: # v = v + [0.0]*8 # nns = searcher.search_single(v,f) #print 'nns=',nns # results_people[f] = nns #print 'results_people=',results_people #results_tmp = self.to_json(results_people,'/img/reuters','/img/tate/',self.name+'_people',self.description,results_cats,self.meta_in,self.meta_out,self.captions_in,self.captions_out,mapi_in=self.index_repo + '/ldata.bin',mapi_out=self.index_repo + '/out_ldata.bin') #if not results_people: results_tmp = results_cats results_faces = {} with Searcher(self.index_repo,search_size=5000) as searcher: searcher.load_index() ldb = shelve.open(self.index_repo + '/ldata.bin') for f,v in self.mapi_faces.iteritems(): resi = {} # results for this image for fv in v: vec = self.face_vector(fv) nns = searcher.search_single(vec,f) m = 0 in_face_hash = '' faceR = fv.get('faceRectangle',{}) if faceR: in_face_hash = self.box_hash(faceR) age_in = fv.get('age',-1) #print 'nns scores=',nns['nns'][1] for nuri in nns['nns_uris']: nn = nns['nns'][0][m] nndata = ldb[str(nn)] nndata0 = nndata[0] nndata = ldb[nuri] age_out = nndata0.get('age',-1) if age_in > 0 and age_out > 0 and not age_in-10<=age_out<=age_in+10: # print 'discarding based on age, age_in=',age_in,' / age_out=',age_out continue if not nuri in resi: resi[nuri] = {'mapi_out':{'faceRectangles':[],'emotions':[],'genders':[],'ages':[],'boxids':[]}, 'mapi_in':{'faceRectangles':[],'emotions':[],'genders':[],'ages':[],'boxids':[]}, 'score':0.0} if in_face_hash: if not faceR in resi[nuri]['mapi_in']['faceRectangles']: resi[nuri]['mapi_in']['faceRectangles'].append(faceR) resi[nuri]['mapi_in']['emotions'].append(fv.get('emotions',{})) resi[nuri]['mapi_in']['genders'].append(fv.get('gender',-1)) resi[nuri]['mapi_in']['ages'].append(age_in) resi[nuri]['mapi_in']['boxids'].append([in_face_hash]) else: bidx = resi[nuri]['mapi_in']['faceRectangles'].index(faceR) resi[nuri]['mapi_in']['boxids'][bidx].append(in_face_hash) nnfaceR = nndata0.get('faceRectangle',{}) if nnfaceR: if not nnfaceR in resi[nuri]['mapi_out']['faceRectangles']: resi[nuri]['mapi_out']['faceRectangles'].append(nnfaceR) resi[nuri]['mapi_out']['emotions'].append(nndata0.get('emotions',{})) resi[nuri]['mapi_out']['genders'].append(nndata0.get('gender',-1)) resi[nuri]['mapi_out']['ages'].append(age_out) if in_face_hash: resi[nuri]['mapi_out']['boxids'].append([in_face_hash]) resi[nuri]['score'] += 10.0*nns['nns'][1][m] + 0.5 elif in_face_hash: bidx = resi[nuri]['mapi_out']['faceRectangles'].index(nnfaceR) resi[nuri]['mapi_out']['boxids'][bidx].append(in_face_hash) m = m + 1 # add uri array nnns_uris = [] nnns = [[],[]] for r in resi: if r == 'nns_uris' or r == 'nns': continue nnns_uris.append(r) nnns[0].append('') # dummy array nnns[1].append(resi[r]['score']) del resi[r]['score'] resi['nns_uris'] = nnns_uris resi['nns'] = nnns results_faces[f] = resi ldb.close() results_faces = self.to_json(results_faces,'/img/reuters/','/img/tate/',self.name,self.description,results_tmp,self.meta_in,self.meta_out,self.captions_in,self.captions_out,mapi_in=self.index_repo + '/ldata.bin',mapi_out=self.index_repo + '/out_ldata.bin') if not results_faces: results_faces = results_tmp #print 'results_faces=',results_faces return results_faces
python
""" This module is used to interface with classical HPC queuing systems. """
python
from django.contrib import admin from .models import SiteToCheck @admin.register(SiteToCheck) class SiteToCheckAdmin(admin.ModelAdmin): list_display = ['url', 'last_status', 'last_response_time']
python
# Based on https://github.com/petkaantonov/bluebird/blob/master/src/promise.js from .compat import Queue # https://docs.python.org/2/library/queue.html#Queue.Queue LATE_QUEUE_CAPACITY = 0 # The queue size is infinite NORMAL_QUEUE_CAPACITY = 0 # The queue size is infinite class Async(object): def __init__(self, schedule): self.is_tick_used = False self.late_queue = Queue(LATE_QUEUE_CAPACITY) self.normal_queue = Queue(NORMAL_QUEUE_CAPACITY) self.have_drained_queues = False self.trampoline_enabled = True self.schedule = schedule def enable_trampoline(self): self.trampoline_enabled = True def disable_trampoline(self): self.trampoline_enabled = False def have_items_queued(self): return self.is_tick_used or self.have_drained_queues def _async_invoke_later(self, fn, context): self.late_queue.put(fn) self.queue_tick(context) def _async_invoke(self, fn, context): self.normal_queue.put(fn) self.queue_tick(context) def _async_settle_promise(self, promise): self.normal_queue.put(promise) self.queue_tick(context=promise._trace) def invoke_later(self, fn, context): if self.trampoline_enabled: self._async_invoke_later(fn, context) else: self.schedule.call_later(0.1, fn) def invoke(self, fn, context): if self.trampoline_enabled: self._async_invoke(fn, context) else: self.schedule.call( fn ) def settle_promises(self, promise): if self.trampoline_enabled: self._async_settle_promise(promise) else: self.schedule.call( promise._settle_promises ) def throw_later(self, reason): def fn(): raise reason self.schedule.call(fn) fatal_error = throw_later def drain_queue(self, queue): from .promise import Promise while not queue.empty(): fn = queue.get() if (isinstance(fn, Promise)): fn._settle_promises() continue fn() def drain_queues(self): assert self.is_tick_used self.drain_queue(self.normal_queue) self.reset() self.have_drained_queues = True self.drain_queue(self.late_queue) def queue_context_tick(self): if not self.is_tick_used: self.is_tick_used = True self.schedule.call(self.drain_queues) def queue_tick(self, context): if not context: self.queue_context_tick() else: (context._parent or context).on_exit(self.queue_context_tick) def reset(self): self.is_tick_used = False
python
#!/usr/bin/env python3 import sys import os import time # # Generate the master out.grid # Create a 3M point file of lat/lons - and write to ASCII file called out.grd. # This file will be used as input to ucvm_query for medium scale test for images # if not os.path.exists("out.grd"): print("Creating grd.out file.") cmd="/app/ucvm/utilities/makegrid" print(cmd) os.system(cmd) # # valid_model_strings = {"bbp1d":1, "cca":1, "wfcvm":1, "albacore":1, "cvlsu":1, "ivlsu":1, "cvms":1, "cvmh":1, "cvmsi":1, "cvms5":1} # Check model parameter if len (sys.argv) < 2: print("Input format: % make_mesh_model.py cvms") sys.exit() else: model_string = sys.argv[1] # # Check if model is valid print("Model string: {}".format(model_string)) try: valid = valid_model_strings[model_string.strip()] except: print("Unknown model: {}".format(model_string)) for key in valid_model_strings.items(): print(key, valid_model_strings[key]) sys.exit() # # Call each of the installed crustal models and time how # long it takes to populate the models # # # start = time.time() cmd="ucvm_query -f /app/ucvm/conf/ucvm.conf -m %s < out.grd > mesh_%s.out"%(model_string,model_string) print(cmd) os.system(cmd) end = time.time() print("Mesh extraction for model {} : {} seconds".format(model_string,(end-start)))
python
import trcdproc.navigate.raw as nav from trcdproc.core import H5File def test_all_signal_dataset_paths_are_found(organized_faulty_data: H5File): """Ensures that all dataset paths are found """ dataset_paths_found = {path for path in nav.all_signal_dataset_paths(organized_faulty_data)} all_paths = [] organized_faulty_data.visit(lambda path: all_paths.append(path)) dataset_paths_present = {'/' + p for p in all_paths if any(sig in p for sig in ['perp', 'par', 'ref']) and 'faulty' not in p} assert dataset_paths_found == dataset_paths_present def test_all_pump_group_paths_are_found(organized_faulty_data: H5File): """Ensures that all of the pump/nopump groups are found, and that no faulty groups are picked up """ pump_groups_found = {path for path in nav.pump_group_paths(organized_faulty_data)} all_paths = [] organized_faulty_data.visit(lambda path: all_paths.append(path)) pump_groups_present = {'/' + p for p in all_paths if p.endswith('pump')} assert pump_groups_found == pump_groups_present def test_all_wavelength_groups_under_rounds_are_found(organized_faulty_data: H5File): """Ensures that all of the wavelength groups that are subgroups of rounds are found """ wavelength_groups_found = {path for path in nav.wavelengths_under_rounds_paths(organized_faulty_data)} all_paths = [] organized_faulty_data.visit(lambda path: all_paths.append(path)) wavelength_groups_present = {'/' + p for p in all_paths if p.endswith('76487') or p.endswith('76715') or p.endswith('76940')} assert wavelength_groups_found == wavelength_groups_present
python
from nhlscrapi.games.events import EventType as ET, EventFactory as EF from nhlscrapi.scrapr import descparser as dp def __shot_type(**kwargs): skater_ct = kwargs['skater_ct'] if 'skater_ct' in kwargs else 12 period = kwargs['period'] if 'period' in kwargs else 1 if period < 5: return ET.Shot # elif period < 5: # return ET.PenaltyShot else: return ET.ShootOutAtt def __goal_type(**kwargs): skater_ct = kwargs['skater_ct'] if 'skater_ct' in kwargs else 12 period = kwargs['period'] if 'period' in kwargs else 1 gt = kwargs['game_type'] if skater_ct <= 7 and period > 4 and gt < 3: return ET.ShootOutGoal else: return ET.Goal def event_type_mapper(event_str, **kwargs): event_type_map = { "SHOT": __shot_type, "SHOT (!)": __shot_type, "SHOT (*)": __shot_type, "BLOCK": lambda **kwargs: ET.Block, "BLOCKED SHOT": lambda **kwargs: ET.Block, "MISS": lambda **kwargs: ET.Miss, "MISSED SHOT": lambda **kwargs: ET.Miss, "GOAL": __goal_type, "HIT": lambda **kwargs: ET.Hit, "HIT (!)": lambda **kwargs: ET.Hit, "HIT (*)": lambda **kwargs: ET.Hit, "FAC": lambda **kwargs: ET.FaceOff, "FACE-OFF": lambda **kwargs: ET.FaceOff, "GIVE": lambda **kwargs: ET.Giveaway, "GIVEAWAY": lambda **kwargs: ET.Giveaway, "TAKE": lambda **kwargs: ET.Takeaway, "TAKEAWAY": lambda **kwargs: ET.Takeaway, "PENL": lambda **kwargs: ET.Penalty, "PENALTY": lambda **kwargs: ET.Penalty, "STOP": lambda **kwargs: ET.Stoppage, "STOPPAGE": lambda **kwargs: ET.Stoppage, "PEND": lambda **kwargs: ET.PeriodEnd, "GEND": lambda **kwargs: ET.GameEnd, "SOC": lambda **kwargs: ET.ShootOutEnd } e_type = event_type_map[event_str](**kwargs) if event_str in event_type_map else ET.Event return EF.Create(e_type) def parse_event_desc(event, season = 2008): if event.event_type == ET.Shot and season >= 2008: dp.parse_shot_desc_08(event) # elif event.event_type == ET.PenaltyShot: # dp.parse_penalty_shot_desc_08(event) elif event.event_type == ET.Goal and season >= 2008: dp.parse_goal_desc_08(event) elif event.event_type == ET.Miss and season >= 2008: dp.parse_miss_08(event) elif event.event_type == ET.FaceOff and season >= 2008: dp.parse_faceoff_08(event) elif event.event_type == ET.Hit and season >= 2008: dp.parse_hit_08(event) elif event.event_type == ET.Block and season >= 2008: dp.parse_block_08(event) elif event.event_type == ET.Takeaway and season >= 2008: dp.parse_takeaway_08(event) elif event.event_type == ET.Giveaway and season >= 2008: dp.parse_giveaway_08(event) elif event.event_type == ET.ShootOutGoal: dp.parse_shootout(event) else: dp.default_desc_parser(event)
python
from django import VERSION if VERSION < (3, 2): default_app_config = ( "rest_framework_simplejwt.token_blacklist.apps.TokenBlacklistConfig" )
python
# exercise/views.py # Jake Malley # 01/02/15 """ Define all of the routes for the exercise blueprint. """ # Imports from flask import flash, redirect, render_template, \ request, url_for, Blueprint, abort from flask.ext.login import login_required, current_user from forms import AddRunningForm, AddCyclingForm, AddSwimmingForm, CompareMemberForm, EditExerciseForm from traininglog.models import Member, Exercise, Weight, Message, RunningLookUp, CyclingLookUp, SwimmingLookUp from traininglog import db from datetime import datetime, date, timedelta from querying_functions import * from operator import itemgetter from traininglog.weight.views import weight_required # Setup the exercise blueprint. exercise_blueprint = Blueprint( 'exercise', __name__, template_folder='templates' ) # Define the routes @exercise_blueprint.route('/') @login_required def index(): """ Homepage for all the exercise data. Displays forms for adding exercise, options for generating reports then a table for all the exercise. """ # Create all of the forms. add_running_form = AddRunningForm() add_cycling_form = AddCyclingForm() add_swimming_form = AddSwimmingForm() # Get all the exercise data. exercise_data = Exercise.query.filter_by(member=current_user).order_by(Exercise.id.desc()).limit(10).all() # Get all the current members. members = Member.query.all() # Create the choices list for the compare form. choices = [(member.get_id(), member.get_full_name()) for member in members] # Create the form. compare_form = CompareMemberForm() compare_form.compare_member_1.choices = choices compare_form.compare_member_2.choices = choices # Display the exercise home page passing in the forms and recent data etc. return render_template('index.html', add_running_form=add_running_form, add_swimming_form=add_swimming_form, add_cycling_form=add_cycling_form, exercise_data=exercise_data,compare_form=compare_form) @exercise_blueprint.route('/add_running', methods=['GET','POST']) @login_required @weight_required def add_running(): """ Displays a form for users to add running. """ # Create the running form. add_running_form = AddRunningForm() # Create empty message and error. message = None error = None # Make sure the method was post. if request.method == 'POST': # Validate the form. if add_running_form.validate_on_submit(): # Get the current time. now = datetime.utcnow() # Get the all the last posts within the last minute. last_posts = Exercise.query.filter_by(member=current_user).filter(Exercise.date > (now-timedelta(minutes=1))).all() # Make sure they aren't cheating by having more than 24 hours in one day. # And they haven't added a post in the last 30 seconds. i.e. they aren't rapidly clicking the button. if (get_exercise_total(now) + float(add_running_form.duration.data) <= 24) and len(last_posts) < 5: # Look Up the calories burned and commit it. # Get users most recent weight. user_weight = Weight.query.filter_by(member=current_user).order_by(Weight.id.desc()).first().get_weight() calories_burned = (float(RunningLookUp.query.filter_by(id=add_running_form.exercise_level.data).first().calories_burned)/80)*user_weight*float(add_running_form.duration.data) # Add the exercise to the database. db.session.add(Exercise(now, 'running', add_running_form.exercise_level.data, add_running_form.duration.data, calories_burned, current_user.get_id())) # Commit the changes. db.session.commit() # Flash a success message. flash("Exercise successfully added.") # Add a well done message. message = "Well Done you burned "+str(calories_burned)+" calories in that session." else: # Make the correct error message. flash("An error occurred adding that exercise.",'error') if (get_exercise_total(now) + float(add_running_form.duration.data) > 24): error = "Exercise has not been added as the current total for today exceeds 24 hours." else: error = "You have tried to add too many events in the last minute, please wait then try again." # Get the last 4 exercises for running. running_data = Exercise.query.filter_by(exercise_type='running',member=current_user).order_by(Exercise.id.desc()).limit(4).all() # Display the add running page. return render_template('add_running.html', add_running_form=add_running_form, message=message,error=error,running_data=running_data) @exercise_blueprint.route('/add_cycling', methods=['GET','POST']) @login_required @weight_required def add_cycling(): """ Displays a form for users to add cycling. """ # Create empty message and error. message = None error = None # Create the cycling form. add_cycling_form = AddCyclingForm() # Make sure the method was post. if request.method == 'POST': # Validate the form. if add_cycling_form.validate_on_submit(): # Get the current time. now = datetime.utcnow() # Get the all the last posts within the last minute. last_posts = Exercise.query.filter_by(member=current_user).filter(Exercise.date > (now-timedelta(minutes=1))).all() # Make sure they aren't cheating by having more than 24 hours in one day. # And they haven't added a post in the last 30 seconds. i.e. they aren't rapidly clicking the button. if (get_exercise_total(now) + float(add_cycling_form.duration.data) <= 24) and len(last_posts) < 5: # Look Up the calories burned and commit it. # Get users most recent weight. user_weight = Weight.query.filter_by(member=current_user).order_by(Weight.id.desc()).first().get_weight() calories_burned = (float(CyclingLookUp.query.filter_by(id=add_cycling_form.exercise_level.data).first().calories_burned)/80)*user_weight*float(add_cycling_form.duration.data) # Add the exercise to the database. db.session.add(Exercise(now, 'cycling', add_cycling_form.exercise_level.data, add_cycling_form.duration.data, calories_burned, current_user.get_id())) # Commit the changes. db.session.commit() # Flash a success message. flash("Exercise successfully added.") # Add a well done message. message = "Well Done you burned "+str(calories_burned)+" calories in that session." else: # Make the correct error message. flash("An error occurred adding that exercise.",'error') if (get_exercise_total(now) + float(add_cycling_form.duration.data) > 24): error = "Exercise has not been added as the current total for today exceeds 24 hours." else: error = "You have tried to add too many events in the last minute, please wait then try again." # Get the last 4 exercises for running. cycling_data = Exercise.query.filter_by(exercise_type='cycling',member=current_user).order_by(Exercise.id.desc()).limit(4).all() # Display the add cycling page. return render_template('add_cycling.html', add_cycling_form=add_cycling_form, message=message,error=error, cycling_data=cycling_data) @exercise_blueprint.route('/add_swimming', methods=['GET','POST']) @login_required @weight_required def add_swimming(): """ Displays a form for users to add swimming. """ # Create empty message and error. message = None error = None # Create the swimming form. add_swimming_form = AddSwimmingForm() # Make sure the method was post. if request.method == 'POST': # Validate the form. if add_swimming_form.validate_on_submit(): # Get the current time. now = datetime.utcnow() # Get the all the last posts within the last minute. last_posts = Exercise.query.filter_by(member=current_user).filter(Exercise.date > (now-timedelta(minutes=1))).all() # Make sure they aren't cheating by having more than 24 hours in one day. # And they haven't added a post in the last 30 seconds. i.e. they aren't rapidly clicking the button. if (get_exercise_total(now) + float(add_swimming_form.duration.data) <= 24) and len(last_posts) < 5: # Look Up the calories burned and commit it. # Get users most recent weight. user_weight = Weight.query.filter_by(member=current_user).order_by(Weight.id.desc()).first().get_weight() calories_burned = (float(SwimmingLookUp.query.filter_by(id=add_swimming_form.exercise_level.data).first().calories_burned)/80)*user_weight*float(add_swimming_form.duration.data) # Add the exercise to the database. db.session.add(Exercise(now, 'swimming', add_swimming_form.exercise_level.data, add_swimming_form.duration.data, calories_burned, current_user.get_id())) # Commit the changes. db.session.commit() # Flash a success message. flash("Exercise successfully added.") # Add a well done message. message = "Well Done you burned "+str(calories_burned)+" calories in that session." else: # Make the correct error message. flash("An error occurred adding that exercise.",'error') if (get_exercise_total(now) + float(add_swimming_form.duration.data) > 24): error = "Exercise has not been added as the current total for today exceeds 24 hours." else: error = "You have tried to add too many events in the last minute, please wait then try again." # Get the last 4 exercises for running. swimming_data = Exercise.query.filter_by(exercise_type='swimming',member=current_user).order_by(Exercise.id.desc()).limit(4).all() # Display the add swimming page. return render_template('add_swimming.html', add_swimming_form=add_swimming_form, message=message,error=error, swimming_data=swimming_data) @exercise_blueprint.route('/view') @login_required def view(): """ Page to display a table of all the users exercise. It allows users to then click on specific events, which can then be viewed with view_exercise """ # Select the exercise data for the current member. all_exercise_data = Exercise.query.filter_by(member=current_user).order_by(Exercise.id.desc()).all() # Display the view page passing in all the exercise data. return render_template('view.html',all_exercise_data=all_exercise_data,member=current_user) @exercise_blueprint.route('/view/<exercise_id>') @login_required def view_exercise(exercise_id): """ Page to display a single exercise event. Displays the event with the id = exercise_id """ # Get the exercise object with the given id. exercise = Exercise.query.filter_by(id=exercise_id).first() if exercise is not None: # Create the edit exercise form. edit_exercise_form = EditExerciseForm() if exercise.member != current_user: # If you are viewing another users exercise. db.session.add(Message(datetime.utcnow(), current_user.get_full_name()+" Viewed your exercise", exercise.member.get_id())) # Commit the changes. db.session.commit() # Get all of the exercise for the member of the given exercise. all_exercise_data = Exercise.query.filter_by(member=exercise.member).order_by(Exercise.id.desc()).all() else: # The exercise ID is invalid abort with HTTP 404 abort(404) # Display the view page for a specific exercise event. return render_template('view.html',all_exercise_data=all_exercise_data,exercise=exercise,member=exercise.member,edit_exercise_form=edit_exercise_form) @exercise_blueprint.route('/edit_exercise', methods=['POST','GET']) @login_required @weight_required def edit_exercise(): """ Allows users to edit their exercise. """ # Create the edit exercise form. edit_exercise_form = EditExerciseForm() if request.method=='POST' and edit_exercise_form.validate_on_submit(): # The method was post and the form was valid. # Get the exercise object. exercise = Exercise.query.filter_by(id=edit_exercise_form.exercise_id.data).first() # Check the exercise is for the current user. if exercise.member == current_user: # OK lets run the update. # See if the want us to delete it. if bool(edit_exercise_form.delete.data) == True: # Delete that exercise. db.session.delete(exercise) db.session.commit() flash("Exercise has been deleted.") # Send back to all the exercise as this event won't exist anymore. return redirect(url_for('exercise.view')) else: # Calculate the new calories burned. # (We don't want to include the new weight in case they did this when the weight was different etc. # we are only updating the duration and thus calories burned as only a result of this.) new_calories_burned = (exercise.calories_burned/exercise.exercise_duration)*float(edit_exercise_form.duration.data) # Update the duration. exercise.update_duration(float(edit_exercise_form.duration.data), new_calories_burned) flash("Exercise has been updated.") # Send them back to where they came from. return redirect(request.referrer or url_for('exercise.index')) @exercise_blueprint.route('/compare',methods=['POST','GET']) @login_required def compare(): """ Page to compare to users. """ compare_form = CompareMemberForm() # Get all the current members. members = Member.query.all() # Create the choices list for the compare form. choices = [(member.get_id(), member.get_full_name()+' (id='+str(member.get_id())+')') for member in members] # Create the form. compare_form = CompareMemberForm() compare_form.compare_member_1.choices = choices compare_form.compare_member_2.choices = choices # Make sure the method was post. if request.method == 'POST': # Validate the form. if compare_form.validate_on_submit(): # Get data from the compare form. # Get the member objects for both of the members select on the form. compare_member_1 = Member.query.filter_by(id=compare_form.compare_member_1.data).first() compare_member_2 = Member.query.filter_by(id=compare_form.compare_member_2.data).first() # Get todays date. now = datetime.utcnow() # Create compare data for member 1. compare_member_1_data = { "name":compare_member_1.get_full_name(), "total_time":get_exercise_total(datetime(now.year,1,1),member=compare_member_1), "total_cals":get_cals_total(datetime(now.year,1,1),member=compare_member_1), "running_time":get_hours_running(member=compare_member_1), "running_cals":get_cals_running(member=compare_member_1), "cycling_time":get_hours_cycling(member=compare_member_1), "cycling_cals":get_cals_cycling(member=compare_member_1), "swimming_time":get_hours_swimming(member=compare_member_1), "swimming_cals":get_cals_swimming(member=compare_member_1), } # Create compare data for member 2. compare_member_2_data = { "name":compare_member_2.get_full_name(), "total_time":get_exercise_total(datetime(now.year,1,1),member=compare_member_2), "total_cals":get_cals_total(datetime(now.year,1,1),member=compare_member_2), "running_time":get_hours_running(member=compare_member_2), "running_cals":get_cals_running(member=compare_member_2), "cycling_time":get_hours_cycling(member=compare_member_2), "cycling_cals":get_cals_cycling(member=compare_member_2), "swimming_time":get_hours_swimming(member=compare_member_2), "swimming_cals":get_cals_swimming(member=compare_member_2), } # Get most recent exercise for the charts compare_member_1_exercise = Exercise.query.filter_by(member=compare_member_1).order_by(Exercise.id.desc()).limit(5).all() compare_member_2_exercise = Exercise.query.filter_by(member=compare_member_2).order_by(Exercise.id.desc()).limit(5).all() # Chart data for time chart_data_time_1 = [ exercise.exercise_duration for exercise in compare_member_1_exercise][::-1] chart_data_time_2 = [ exercise.exercise_duration for exercise in compare_member_2_exercise][::-1] # Chart data for calories chart_data_calories_1 = [ exercise.calories_burned for exercise in compare_member_1_exercise][::-1] chart_data_calories_2 = [ exercise.calories_burned for exercise in compare_member_2_exercise][::-1] return render_template('compare.html',compare_member_1_data=compare_member_1_data,compare_member_2_data=compare_member_2_data, compare_form=compare_form,chart_data_time_1=chart_data_time_1,chart_data_time_2=chart_data_time_2,chart_data_calories_1=chart_data_calories_1,chart_data_calories_2=chart_data_calories_2) # Display the compare page. return render_template('compare.html', compare_form=compare_form) @exercise_blueprint.route('/picktheteam') @login_required def picktheteam(): """ Page to display the team of eight runners. """ # Get all of the members in the database. members = Member.query.all() # Create a datetime object for this year. date = datetime(datetime.utcnow().year,1,1) # Get url argument to see if we need to display all the member or just the top 8. if request.args.get('all') == "true": page_title="All Members" pick_team=False else: page_title="Pick the Team" pick_team=True # Get url argument to see if we are ordering by calories_burned or total hours exercised. if request.args.get('order_by') == "hours": order_by = 2 else: order_by = 1 # Create a new list for the ordered members to be stored in. members_ordered=[] # For each member. for member in members: # Calculate the total calories burned for that member this year. calories_burned = get_cals_total(date=date,member=member) # Calculate the total hours exercised for that member this year. hours_exercised = get_exercise_total(date=date, member=member) # Add a tuple of the member and the calories burned to the ordered members list. members_ordered.append((member, calories_burned, hours_exercised)) # Actually order the list by the second element in each one. (The calories burned.) # (Reversing the list as it orders it in ascending order.) members_ordered = sorted(members_ordered, key=itemgetter(order_by))[::-1] # Display the page to pick the team. return render_template("exercise_picktheteam.html", page_title=page_title,pick_team=pick_team, members_ordered=members_ordered)
python
#!/usr/bin/env python """Tests for `magic_dot` package.""" import pytest from collections import namedtuple from magic_dot import MagicDot from magic_dot import NOT_FOUND from magic_dot.exceptions import NotFound def test_can(): """Test that dict key is accessible as a hash .""" md = MagicDot({"num": 1}) assert md.num.get() == 1 def test_yet(): """Test NOT_FOUND is returned.""" md = MagicDot({"num": 1}) assert md.buba.get() is NOT_FOUND def test_other(): """Test supplied default is returned for NOT_FOUND""" md = MagicDot({"num": 1}) assert md.bubba.get("something") == "something" def test_coat(): """Test that attributes are extracted first.""" class AttrKey(dict): a = 7 ak = AttrKey() ak['a'] = 8 md = MagicDot(ak) assert md.a.get() == 7 def test_ride(): """Test that indexed processing happens by default.""" nt = namedtuple("NT", "x")(1) md = MagicDot([nt, None, nt]) assert md[1].get() is None def test_both(): """Test that exception is enabled with init.""" md = MagicDot({}, exception=True) with pytest.raises(NotFound): md.nonexistent.get() def test_been(): """Test that exception is enabled with exception.""" md = MagicDot({}) with pytest.raises(NotFound): md.exception().nonexistent def test_curve(): """Test that exception does not affect the get after NOT_FOUND is detected.""" md = MagicDot({}) md.nonexistent.exception().get() def test_pie(): """Test that TypeError is raised when iterating over non-data""" md = MagicDot(1) with pytest.raises(TypeError): [x for x in md] def test_cat(): """Tests that TypeError is raised for valid non-iterable when iter_nf_as_empty() is set""" md = MagicDot(1, iter_nf_as_empty=True) with pytest.raises(TypeError): [x for x in md] def test_atom(): """Tests that TypeError is raised for NOT_FOUND by default""" md = MagicDot(1).nonexistent with pytest.raises(TypeError): [x for x in md] def test_lesson(): """Tests that NOT_FOUND returns empty generator with iter_nf_as_empty""" md = MagicDot(1, iter_nf_as_empty=True).nonexistent assert [x for x in md] == [] def test_layers(): """Tests that NOT_FOUND returns empty generator with iter_nf_as_empty()""" md = MagicDot(1).nonexistent.iter_nf_as_empty() assert [x for x in md] == [] def test_trace(): """Tests ability to walk iterable data.""" md = MagicDot([None, 1, 2]) expected = [None, 1, 2] for x in md: assert x.get() == expected.pop(0) def test_sign(): """Tests ability to walk iterable data.""" md = MagicDot([None, 1, 2]) expected = [None, 1, 2] for x in md: assert x.get() == expected.pop(0) def test_sign(): """Tests pluck of attributes and nonexistent data.""" nt = namedtuple("NT", "x")(1) md = MagicDot([nt, None, nt]) assert md.pluck("x").get() == [1, NOT_FOUND, 1] def test_money(): """Tests pluck of keys and nonexistent data.""" d = {"x": 1} md = MagicDot([d, None, d]) assert md.pluck("x").get() == [1, NOT_FOUND, 1] def test_whistle(): """Tests pluck of nonexistent data raises TypeError""" md = MagicDot(1) with pytest.raises(TypeError): md.nonexistent.pluck('z') def test_neighborhood(): """Tests that pluck of nonexistent data with .iter_nf_as_empty returns empty.""" md = MagicDot(1) assert md.nonexistent.iter_nf_as_empty().pluck('whatevs').get() == [] def test_vote(): """Tests that pluck of noniterable gives type_error""" md = MagicDot(1) with pytest.raises(TypeError): md.pluck('z') def test_vote(): """Tests that pluck of noniterable gives type_error even if .iter_nf_as_empty is set.""" md = MagicDot(1) with pytest.raises(TypeError): md.iter_nf_as_empty().pluck('z') def test_yellow(): """Test that a pluck of NOT_FOUND data raises an NotFound exception if .exception is set""" nt = namedtuple("NT", "x")(1) md = MagicDot([nt, None, nt]) with pytest.raises(NotFound): md.exception().pluck("x") def test_supply(): """Test that boolean math is not allowed with magic_dot.""" md = MagicDot(1) with pytest.raises(RuntimeError): not md def test_important(): """Test that boolean math is not allowed on NOT_FOUND""" md = MagicDot(1) with pytest.raises(RuntimeError): not md.nonexistent.get() def test_in(): """Test that repr for NOT_FOUND works nicely (for documentation).""" md = MagicDot(1) assert repr(md.nonexistent.get()) == "magic_dot.NOT_FOUND" def test_gate(): """Test that setting exception creates a new md""" md = MagicDot(1) assert md is not md.exception() def test_bowl(): """Test that setting exception twice does note create a new md""" md = MagicDot(1, exception=True) assert md is md.exception() def test_solve(): """Test that setting iter_nf_as_empty creates a new md""" md = MagicDot(1) assert md is not md.iter_nf_as_empty() def test_reader(): """Test that setting iter_nf_as_empty twice does note create a new md""" md = MagicDot(1, iter_nf_as_empty=True) assert md is md.iter_nf_as_empty()
python
message = 'This is submodule 1.' def module_testing(): print(message)
python
# SISO program G.py # This function is a placeholder for a generic computable function G. # This particular choice of G returns the first character of the input # string. import utils from utils import rf def G(inString): if len(inString) >= 1: return inString[0] else: return "" def testG(): testvals = [ ("", ""), ("x", "x"), ("abcdef", "a"), ] for (inString, solution) in testvals: val = G(inString) utils.tprint(inString, ":", val) assert val == solution
python
''' LICENSE: MIT https://github.com/keras-team/keras/blob/a07253d8269e1b750f0a64767cc9a07da8a3b7ea/LICENSE 実験メモ Dropoutをなくしてみたが、あまりへんかなし SGDにへんこうしたら、しゅうそくがすごくおそくなった 面白い。 試したいアイデアがあるので、 自前のactivation functionを書いてみる。 ''' from __future__ import print_function import keras from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout from keras import optimizers from keras.layers import Activation from keras import backend from keras.utils.generic_utils import get_custom_objects smoothing = 0 def custom_activation(x): return smoothing * backend.tanh(x / smoothing) def replace_intermediate_layer_in_keras(model, layer_id, new_layer): from keras.models import Model layers = [l for l in model.layers] x = layers[0].output for i in range(1, len(layers)): if i == layer_id: x = new_layer(x) else: x = layers[i](x) new_model = Model(input=model.input, output=x) return new_model batch_size = 128 num_classes = 10 epochs = 20 # the data, split between train and test sets (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.reshape(60000, 784) x_test = x_test.reshape(10000, 784) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) model = Sequential() model.add(Dense(512, activation='linear', input_shape=(784,))) model.add(Activation(custom_activation)) # model.add(Dropout(0.2)) model.add(Dense(512, activation='linear')) model.add(Activation(custom_activation)) # model.add(Dropout(0.2)) model.add(Dense(num_classes, activation='linear')) model.add(Activation(custom_activation)) # model.add(Dense(num_classes, activation='softmax')) model.summary() sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) rms_prop = optimizers.RMSprop() # model.compile(loss='categorical_crossentropy', # optimizer=sgd, # metrics=['accuracy']) # to create input layer model = replace_intermediate_layer_in_keras(model, 1, Activation(custom_activation)) for i in range(5): smoothing = 0.01 * 1e2**(1.0 * (4 - i) / 4) model = replace_intermediate_layer_in_keras(model, 2, Activation(custom_activation)) # model.summary() model = replace_intermediate_layer_in_keras(model, 4, Activation(custom_activation)) # model.summary() model = replace_intermediate_layer_in_keras(model, 6, Activation(custom_activation)) # model.summary() model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) history = model.fit(x_train, y_train, batch_size=batch_size, epochs=1, verbose=1, validation_data=(x_test, y_test)) score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1])
python
from .tracebackturbo import *
python
from __future__ import unicode_literals import csv import io import json import os import string from collections import OrderedDict from unittest import TestCase import pandas as pd from backports.tempfile import TemporaryDirectory from tempfile import NamedTemporaryFile from hypothesis import ( given, HealthCheck, reproduce_failure, settings, ) from hypothesis.strategies import ( dictionaries, integers, just, lists, text, tuples, ) from mock import patch, Mock from oasislmf.exposures.manager import OasisExposuresManager from oasislmf.exposures.pipeline import OasisFilesPipeline from oasislmf.utils.coverage import ( BUILDING_COVERAGE_CODE, CONTENTS_COVERAGE_CODE, OTHER_STRUCTURES_COVERAGE_CODE, TIME_COVERAGE_CODE, ) from oasislmf.utils.exceptions import OasisException from oasislmf.utils.status import ( KEYS_STATUS_FAIL, KEYS_STATUS_NOMATCH, KEYS_STATUS_SUCCESS, ) from ..models.fakes import fake_model from tests import ( canonical_exposure_data, keys_data, write_input_files, ) class OasisExposureManagerAddModel(TestCase): def test_models_is_empty___model_is_added_to_model_dict(self): model = fake_model('supplier', 'model', 'version') manager = OasisExposuresManager() manager.add_model(model) self.assertEqual({model.key: model}, manager.models) def test_manager_already_contains_a_model_with_the_given_key___model_is_replaced_in_models_dict(self): first = fake_model('supplier', 'model', 'version') second = fake_model('supplier', 'model', 'version') manager = OasisExposuresManager(oasis_models=[first]) manager.add_model(second) self.assertIs(second, manager.models[second.key]) def test_manager_already_contains_a_diferent_model___model_is_added_to_dict(self): first = fake_model('first', 'model', 'version') second = fake_model('second', 'model', 'version') manager = OasisExposuresManager(oasis_models=[first]) manager.add_model(second) self.assertEqual({ first.key: first, second.key: second, }, manager.models) class OasisExposureManagerDeleteModels(TestCase): def test_models_is_not_in_manager___no_model_is_removed(self): manager = OasisExposuresManager([ fake_model('supplier', 'model', 'version'), fake_model('supplier2', 'model2', 'version2'), ]) expected = manager.models manager.delete_models([fake_model('supplier3', 'model3', 'version3')]) self.assertEqual(expected, manager.models) def test_models_exist_in_manager___models_are_removed(self): models = [ fake_model('supplier', 'model', 'version'), fake_model('supplier2', 'model2', 'version2'), fake_model('supplier3', 'model3', 'version3'), ] manager = OasisExposuresManager(models) manager.delete_models(models[1:]) self.assertEqual({models[0].key: models[0]}, manager.models) class OasisExposureManagerLoadCanonicalExposuresProfile(TestCase): def test_model_and_kwargs_are_not_set___result_is_empty_dict(self): profile = OasisExposuresManager().load_canonical_exposures_profile() self.assertEqual(None, profile) @given(dictionaries(text(), text())) def test_model_is_set_with_profile_json___models_profile_is_set_to_expected_json(self, expected): model = fake_model(resources={'canonical_exposures_profile_json': json.dumps(expected)}) profile = OasisExposuresManager().load_canonical_exposures_profile(oasis_model=model) self.assertEqual(expected, profile) self.assertEqual(expected, model.resources['canonical_exposures_profile']) @given(dictionaries(text(), text()), dictionaries(text(), text())) def test_model_is_set_with_profile_json_and_profile_json_is_passed_through_kwargs___kwargs_profile_is_used( self, model_profile, kwargs_profile ): model = fake_model(resources={'canonical_exposures_profile_json': json.dumps(model_profile)}) profile = OasisExposuresManager().load_canonical_exposures_profile(oasis_model=model, canonical_exposures_profile_json=json.dumps(kwargs_profile)) self.assertEqual(kwargs_profile, profile) self.assertEqual(kwargs_profile, model.resources['canonical_exposures_profile']) @given(dictionaries(text(), text())) def test_model_is_set_with_profile_json_path___models_profile_is_set_to_expected_json(self, expected): with NamedTemporaryFile('w') as f: json.dump(expected, f) f.flush() model = fake_model(resources={'canonical_exposures_profile_json_path': f.name}) profile = OasisExposuresManager().load_canonical_exposures_profile(oasis_model=model) self.assertEqual(expected, profile) self.assertEqual(expected, model.resources['canonical_exposures_profile']) @settings(suppress_health_check=[HealthCheck.too_slow]) @given(dictionaries(text(), text()), dictionaries(text(), text())) def test_model_is_set_with_profile_json_path_and_profile_json_path_is_passed_through_kwargs___kwargs_profile_is_used( self, model_profile, kwargs_profile ): with NamedTemporaryFile('w') as model_file, NamedTemporaryFile('w') as kwargs_file: json.dump(model_profile, model_file) model_file.flush() json.dump(kwargs_profile, kwargs_file) kwargs_file.flush() model = fake_model(resources={'canonical_exposures_profile_json_path': model_file.name}) profile = OasisExposuresManager().load_canonical_exposures_profile(oasis_model=model, canonical_exposures_profile_json_path=kwargs_file.name) self.assertEqual(kwargs_profile, profile) self.assertEqual(kwargs_profile, model.resources['canonical_exposures_profile']) class OasisExposureManagerGetKeys(TestCase): def create_model( self, lookup='lookup', keys_file_path='key_file_path', keys_errors_file_path='keys_errors_file_path', model_exposures_file_path='model_exposures_file_path' ): model = fake_model(resources={'lookup': lookup}) model.resources['oasis_files_pipeline'].keys_file_path = keys_file_path model.resources['oasis_files_pipeline'].keys_errors_file_path = keys_errors_file_path model.resources['oasis_files_pipeline'].model_exposures_file_path = model_exposures_file_path return model @given( lookup=text(min_size=1, alphabet=string.ascii_letters), keys_file_path=text(min_size=1, alphabet=string.ascii_letters), keys_errors_file_path=text(min_size=1, alphabet=string.ascii_letters), exposures_file_path=text(min_size=1, alphabet=string.ascii_letters) ) def test_model_is_supplied_kwargs_are_not___lookup_keys_files_and_exposures_file_from_model_are_used( self, lookup, keys_file_path, keys_errors_file_path, exposures_file_path ): model = self.create_model(lookup=lookup, keys_file_path=keys_file_path, keys_errors_file_path=keys_errors_file_path, model_exposures_file_path=exposures_file_path) with patch('oasislmf.exposures.manager.OasisLookupFactory.save_results', Mock(return_value=(keys_file_path, 1, keys_errors_file_path, 1))) as oklf_mock: res_keys_file_path, res_keys_errors_file_path = OasisExposuresManager().get_keys(oasis_model=model) oklf_mock.assert_called_once_with( lookup, keys_file_path, errors_fp=keys_errors_file_path, model_exposures_fp=exposures_file_path ) self.assertEqual(model.resources['oasis_files_pipeline'].keys_file_path, keys_file_path) self.assertEqual(res_keys_file_path, keys_file_path) self.assertEqual(model.resources['oasis_files_pipeline'].keys_errors_file_path, keys_errors_file_path) self.assertEqual(res_keys_errors_file_path, keys_errors_file_path) @given( model_lookup=text(min_size=1, alphabet=string.ascii_letters), model_keys_fp=text(min_size=1, alphabet=string.ascii_letters), model_keys_errors_fp=text(min_size=1, alphabet=string.ascii_letters), model_exposures_fp=text(min_size=1, alphabet=string.ascii_letters), lookup=text(min_size=1, alphabet=string.ascii_letters), keys_fp=text(min_size=1, alphabet=string.ascii_letters), keys_errors_fp=text(min_size=1, alphabet=string.ascii_letters), exposures_fp=text(min_size=1, alphabet=string.ascii_letters) ) def test_model_and_kwargs_are_supplied___lookup_keys_files_and_exposures_file_from_kwargs_are_used( self, model_lookup, model_keys_fp, model_keys_errors_fp, model_exposures_fp, lookup, keys_fp, keys_errors_fp, exposures_fp ): model = self.create_model(lookup=model_lookup, keys_file_path=model_keys_fp, keys_errors_file_path=model_keys_errors_fp, model_exposures_file_path=model_exposures_fp) with patch('oasislmf.exposures.manager.OasisLookupFactory.save_results', Mock(return_value=(keys_fp, 1, keys_errors_fp, 1))) as oklf_mock: res_keys_file_path, res_keys_errors_file_path = OasisExposuresManager().get_keys( oasis_model=model, lookup=lookup, model_exposures_file_path=exposures_fp, keys_file_path=keys_fp, keys_errors_file_path=keys_errors_fp ) oklf_mock.assert_called_once_with( lookup, keys_fp, errors_fp=keys_errors_fp, model_exposures_fp=exposures_fp ) self.assertEqual(model.resources['oasis_files_pipeline'].keys_file_path, keys_fp) self.assertEqual(res_keys_file_path, keys_fp) self.assertEqual(model.resources['oasis_files_pipeline'].keys_errors_file_path, keys_errors_fp) self.assertEqual(res_keys_errors_file_path, keys_errors_fp) class OasisExposureManagerLoadMasterDataframe(TestCase): @settings(suppress_health_check=[HealthCheck.too_slow]) @given( profile_element_name=text(alphabet=string.ascii_letters, min_size=1), keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), size=10), exposures=canonical_exposure_data(10, min_value=1) ) def test_row_in_keys_data_is_missing_from_exposure_data___oasis_exception_is_raised( self, profile_element_name, keys, exposures ): matching_exposures = [e for e in exposures if e[0] in map(lambda k: k['id'], keys)] exposures.pop(exposures.index(matching_exposures[0])) profile = { profile_element_name: {'ProfileElementName': profile_element_name, 'FieldName': 'TIV', 'CoverageTypeID': 1} } with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file: write_input_files(keys, keys_file.name, exposures, exposures_file.name, profile_element_name=profile_element_name) with self.assertRaises(OasisException): OasisExposuresManager().load_master_data_frame(exposures_file.name, keys_file.name, profile) @settings(suppress_health_check=[HealthCheck.too_slow]) @given( profile_element_name=text(alphabet=string.ascii_letters, min_size=1), keys=keys_data(from_coverage_type_ids=just(CONTENTS_COVERAGE_CODE), from_statuses=just(KEYS_STATUS_SUCCESS), size=10), exposures=canonical_exposure_data(10, min_value=1) ) def test_canonical_profile_coverage_types_dont_match_model_defined_coverage_types___oasis_exception_is_raised( self, profile_element_name, keys, exposures ): matching_exposures = [e for e in exposures if e[0] in map(lambda k: k['id'], keys)] exposures.pop(exposures.index(matching_exposures[0])) profile = { profile_element_name: {'ProfileElementName': profile_element_name, 'FieldName': 'TIV', 'CoverageTypeID': BUILDING_COVERAGE_CODE} } with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file: write_input_files(keys, keys_file.name, exposures, exposures_file.name, profile_element_name=profile_element_name) with self.assertRaises(OasisException): OasisExposuresManager().load_master_data_frame(exposures_file.name, keys_file.name, profile) @settings(suppress_health_check=[HealthCheck.too_slow]) @given( profile_element_name=text(alphabet=string.ascii_letters, min_size=1), keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), size=10), exposures=canonical_exposure_data(num_rows=10, min_value=1) ) def test_each_row_has_a_single_row_per_element_with_each_row_having_a_positive_value_for_the_profile_element___each_row_is_present( self, profile_element_name, keys, exposures ): profile = { profile_element_name: {'ProfileElementName': profile_element_name, 'FieldName': 'TIV', 'CoverageTypeID': 1} } expected = [] keys_values_tuples = map(lambda li: tuple(filter(lambda v: type(v) == int, li)), [k.values() for k in keys]) for i, zipped_data in enumerate(zip(keys_values_tuples, exposures)): expected.append(( i + 1, zipped_data[0], zipped_data[1][1], )) with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file: write_input_files(keys, keys_file.name, exposures, exposures_file.name, profile_element_name=profile_element_name) result = OasisExposuresManager().load_master_data_frame( exposures_file.name, keys_file.name, profile, ) self.assertEqual(len(expected), len(result)) for i in range(len(result)): row = {k:(int(v) if k != 'tiv' else v) for k, v in result.iloc[i].to_dict().items()} self.assertEqual(i + 1, row['item_id']) self.assertEqual(i + 1, row['coverage_id']) self.assertEqual(exposures[i][1], row['tiv']) self.assertEqual(keys[i]['area_peril_id'], row['areaperil_id']) self.assertEqual(keys[i]['vulnerability_id'], row['vulnerability_id']) self.assertEqual(i + 1, row['group_id']) self.assertEqual(1, row['summary_id']) self.assertEqual(1, row['summaryset_id']) @settings(suppress_health_check=[HealthCheck.too_slow]) @given( profile_element_name=text(alphabet=string.ascii_letters, min_size=1), keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), size=10), exposures=canonical_exposure_data(num_rows=10, min_value=1) ) def test_each_row_has_a_single_row_per_element_with_each_row_having_any_value_for_the_profile_element___rows_with_profile_elements_gt_0_are_present( self, profile_element_name, keys, exposures ): profile = { profile_element_name: {'ProfileElementName': profile_element_name, 'FieldName': 'TIV', 'CoverageTypeID': 1} } expected = [] keys_values_tuples = map(lambda li: tuple(filter(lambda v: type(v) == int, li)), [k.values() for k in keys]) row_id = 0 for zipped_keys, zipped_exposure in zip(keys_values_tuples, exposures): if zipped_exposure[1] > 0: row_id += 1 expected.append(( row_id, zipped_keys, zipped_exposure[1], )) with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file: write_input_files(keys, keys_file.name, exposures, exposures_file.name, profile_element_name=profile_element_name) result = OasisExposuresManager().load_master_data_frame( exposures_file.name, keys_file.name, profile, ) self.assertEqual(len(expected), len(result)) for i in range(len(result)): row = {k:(int(v) if k != 'tiv' else v) for k, v in result.iloc[i].to_dict().items()} self.assertEqual(i + 1, row['item_id']) self.assertEqual(i + 1, row['coverage_id']) self.assertEqual(exposures[i][1], row['tiv']) self.assertEqual(keys[i]['area_peril_id'], row['areaperil_id']) self.assertEqual(keys[i]['vulnerability_id'], row['vulnerability_id']) self.assertEqual(i + 1, row['group_id']) self.assertEqual(1, row['summary_id']) self.assertEqual(1, row['summaryset_id']) class FileGenerationTestCase(TestCase): def setUp(self): self.items_filename = 'items.csv' self.coverages_filename = 'coverages.csv' self.gulsummaryxref_filename = 'gulsummaryxref.csv' def check_items_file(self, keys, out_dir): expected = [ { 'item_id': i + 1, 'coverage_id': i + 1, 'areaperil_id': key['area_peril_id'], 'vulnerability_id': key['vulnerability_id'], 'group_id': i + 1, } for i, key in enumerate(keys) ] with io.open(os.path.join(out_dir, self.items_filename), 'r', encoding='utf-8') as f: result = list(pd.read_csv(f).T.to_dict().values()) self.assertEqual(expected, result) def check_coverages_file(self, exposures, out_dir): expected = [ { 'coverage_id': item_id + 1, 'tiv': item[1], } for item_id, item in enumerate(exposures) ] with io.open(os.path.join(out_dir, self.coverages_filename), 'r', encoding='utf-8') as f: result = list(pd.read_csv(f).T.to_dict().values()) self.assertEqual(expected, result) def check_gul_file(self, exposures, out_dir): expected = [ { 'coverage_id': item_id + 1, 'summary_id': 1, 'summaryset_id': 1, } for item_id in range(len(exposures)) ] with io.open(os.path.join(out_dir, self.gulsummaryxref_filename), 'r', encoding='utf-8') as f: result = list(pd.read_csv(f).T.to_dict().values()) self.assertEqual(expected, result) class OasisExposuresManagerGenerateItemsFile(FileGenerationTestCase): @settings(suppress_health_check=[HealthCheck.too_slow]) @given( keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), min_size=10), exposures=canonical_exposure_data(10, min_value=1) ) def test_paths_are_stored_in_the_model___model_paths_are_used(self, keys, exposures): profile = { 'profile_element': {'ProfileElementName': 'profile_element', 'FieldName': 'TIV', 'CoverageTypeID': 1} } with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file, TemporaryDirectory() as out_dir: write_input_files(keys, keys_file.name, exposures, exposures_file.name) model = fake_model(resources={'canonical_exposures_profile': profile}) model.resources['oasis_files_pipeline'].keys_file_path = keys_file.name model.resources['oasis_files_pipeline'].canonical_exposures_file_path = exposures_file.name model.resources['oasis_files_pipeline'].items_file_path = os.path.join(out_dir, self.items_filename) OasisExposuresManager().generate_items_file(oasis_model=model) self.check_items_file(keys, out_dir) @settings(suppress_health_check=[HealthCheck.too_slow]) @given( keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), min_size=10), exposures=canonical_exposure_data(10, min_value=1) ) def test_paths_are_stored_in_the_kwargs___kwarg_paths_are_used(self, keys, exposures): profile = { 'profile_element': {'ProfileElementName': 'profile_element', 'FieldName': 'TIV', 'CoverageTypeID': 1} } with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file, TemporaryDirectory() as out_dir: write_input_files(keys, keys_file.name, exposures, exposures_file.name) model = fake_model() OasisExposuresManager().generate_items_file( oasis_model=model, canonical_exposures_profile=profile, keys_file_path=keys_file.name, canonical_exposures_file_path=exposures_file.name, items_file_path=os.path.join(out_dir, self.items_filename) ) self.check_items_file(keys, out_dir) class OasisExposuresManagerGenerateCoveragesFile(FileGenerationTestCase): @settings(suppress_health_check=[HealthCheck.too_slow]) @given( keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), min_size=10), exposures=canonical_exposure_data(10, min_value=1) ) def test_paths_are_stored_in_the_model___model_paths_are_used(self, keys, exposures): profile = { 'profile_element': {'ProfileElementName': 'profile_element', 'FieldName': 'TIV', 'CoverageTypeID': 1} } with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file, TemporaryDirectory() as out_dir: write_input_files(keys, keys_file.name, exposures, exposures_file.name) model = fake_model(resources={'canonical_exposures_profile': profile}) model.resources['oasis_files_pipeline'].keys_file_path = keys_file.name model.resources['oasis_files_pipeline'].canonical_exposures_file_path = exposures_file.name model.resources['oasis_files_pipeline'].coverages_file_path = os.path.join(out_dir, self.coverages_filename) OasisExposuresManager().generate_coverages_file(oasis_model=model) self.check_coverages_file(exposures, out_dir) @settings(suppress_health_check=[HealthCheck.too_slow]) @given( keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), min_size=10), exposures=canonical_exposure_data(10, min_value=1) ) def test_paths_are_stored_in_the_kwargs___kwarg_paths_are_used(self, keys, exposures): profile = { 'profile_element': {'ProfileElementName': 'profile_element', 'FieldName': 'TIV', 'CoverageTypeID': 1} } with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file, TemporaryDirectory() as out_dir: write_input_files(keys, keys_file.name, exposures, exposures_file.name) model = fake_model() OasisExposuresManager().generate_coverages_file( oasis_model=model, canonical_exposures_profile=profile, keys_file_path=keys_file.name, canonical_exposures_file_path=exposures_file.name, coverages_file_path=os.path.join(out_dir, self.coverages_filename) ) self.check_coverages_file(exposures, out_dir) class OasisExposuresManagerGenerateGulsummaryxrefFile(FileGenerationTestCase): @settings(suppress_health_check=[HealthCheck.too_slow]) @given( keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), min_size=10), exposures=canonical_exposure_data(10, min_value=1) ) def test_paths_are_stored_in_the_model___model_paths_are_used(self, keys, exposures): profile = { 'profile_element': {'ProfileElementName': 'profile_element', 'FieldName': 'TIV', 'CoverageTypeID': 1} } with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file, TemporaryDirectory() as out_dir: write_input_files(keys, keys_file.name, exposures, exposures_file.name) model = fake_model(resources={'canonical_exposures_profile': profile}) model.resources['oasis_files_pipeline'].keys_file_path = keys_file.name model.resources['oasis_files_pipeline'].canonical_exposures_file_path = exposures_file.name model.resources['oasis_files_pipeline'].gulsummaryxref_file_path = os.path.join(out_dir, self.gulsummaryxref_filename) OasisExposuresManager().generate_gulsummaryxref_file(oasis_model=model) self.check_gul_file(exposures, out_dir) @settings(suppress_health_check=[HealthCheck.too_slow]) @given( keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), min_size=10), exposures=canonical_exposure_data(10, min_value=1) ) def test_paths_are_stored_in_the_kwargs___kwarg_paths_are_used(self, keys, exposures): profile = { 'profile_element': {'ProfileElementName': 'profile_element', 'FieldName': 'TIV', 'CoverageTypeID': 1} } with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file, TemporaryDirectory() as out_dir: write_input_files(keys, keys_file.name, exposures, exposures_file.name) model = fake_model() OasisExposuresManager().generate_gulsummaryxref_file( oasis_model=model, canonical_exposures_profile=profile, keys_file_path=keys_file.name, canonical_exposures_file_path=exposures_file.name, gulsummaryxref_file_path=os.path.join(out_dir, self.gulsummaryxref_filename) ) self.check_gul_file(exposures, out_dir) class OasisExposuresManagerGenerateOasisFiles(FileGenerationTestCase): @settings(suppress_health_check=[HealthCheck.too_slow]) @given( keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), min_size=10), exposures=canonical_exposure_data(10, min_value=1) ) def test_paths_are_stored_in_the_model___model_paths_are_used(self, keys, exposures): profile = { 'profile_element': {'ProfileElementName': 'profile_element', 'FieldName': 'TIV', 'CoverageTypeID': 1} } with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file, TemporaryDirectory() as out_dir: write_input_files(keys, keys_file.name, exposures, exposures_file.name) model = fake_model(resources={'canonical_exposures_profile': profile}) model.resources['oasis_files_pipeline'].keys_file_path = keys_file.name model.resources['oasis_files_pipeline'].canonical_exposures_file_path = exposures_file.name model.resources['oasis_files_pipeline'].items_file_path = os.path.join(out_dir, self.items_filename) model.resources['oasis_files_pipeline'].coverages_file_path = os.path.join(out_dir, self.coverages_filename) model.resources['oasis_files_pipeline'].gulsummaryxref_file_path = os.path.join(out_dir, self.gulsummaryxref_filename) OasisExposuresManager().generate_oasis_files(oasis_model=model) self.check_items_file(keys, out_dir) self.check_coverages_file(exposures, out_dir) self.check_gul_file(exposures, out_dir) @settings(suppress_health_check=[HealthCheck.too_slow]) @given( keys=keys_data(from_statuses=just(KEYS_STATUS_SUCCESS), min_size=10), exposures=canonical_exposure_data(10, min_value=1) ) def test_paths_are_stored_in_the_kwargs___kwarg_paths_are_used(self, keys, exposures): profile = { 'profile_element': {'ProfileElementName': 'profile_element', 'FieldName': 'TIV', 'CoverageTypeID': 1} } with NamedTemporaryFile('w') as keys_file, NamedTemporaryFile('w') as exposures_file, TemporaryDirectory() as out_dir: write_input_files(keys, keys_file.name, exposures, exposures_file.name) model = fake_model() OasisExposuresManager().generate_oasis_files( oasis_model=model, canonical_exposures_profile=profile, keys_file_path=keys_file.name, canonical_exposures_file_path=exposures_file.name, items_file_path=os.path.join(out_dir, self.items_filename), coverages_file_path=os.path.join(out_dir, self.coverages_filename), gulsummaryxref_file_path=os.path.join(out_dir, self.gulsummaryxref_filename) ) self.check_items_file(keys, out_dir) self.check_coverages_file(exposures, out_dir) self.check_gul_file(exposures, out_dir) class OasisExposuresTransformSourceToCanonical(TestCase): @given( source_exposures_file_path=text(), source_to_canonical_exposures_transformation_file_path=text(), source_exposures_validation_file_path=text(), canonical_exposures_file_path=text() ) def test_model_is_not_set___parameters_are_taken_from_kwargs( self, source_exposures_file_path, source_to_canonical_exposures_transformation_file_path, source_exposures_validation_file_path, canonical_exposures_file_path ): trans_call_mock = Mock() with patch('oasislmf.exposures.manager.Translator', Mock(return_value=trans_call_mock)) as trans_mock: OasisExposuresManager().transform_source_to_canonical( source_exposures_file_path=source_exposures_file_path, source_exposures_validation_file_path=source_exposures_validation_file_path, source_to_canonical_exposures_transformation_file_path=source_to_canonical_exposures_transformation_file_path, canonical_exposures_file_path=canonical_exposures_file_path ) trans_mock.assert_called_once_with( os.path.abspath(source_exposures_file_path), os.path.abspath(canonical_exposures_file_path), os.path.abspath(source_to_canonical_exposures_transformation_file_path), os.path.abspath(source_exposures_validation_file_path), append_row_nums=True, ) trans_call_mock.assert_called_once_with() @given( source_exposures_file_path=text(), source_exposures_validation_file_path=text(), source_to_canonical_exposures_transformation_file_path=text(), canonical_exposures_file_path=text() ) def test_model_is_set___parameters_are_taken_from_model( self, source_exposures_file_path, source_to_canonical_exposures_transformation_file_path, source_exposures_validation_file_path, canonical_exposures_file_path): model = fake_model(resources={ 'source_exposures_file_path': source_exposures_file_path, 'source_exposures_validation_file_path': source_exposures_validation_file_path, 'source_to_canonical_exposures_transformation_file_path': source_to_canonical_exposures_transformation_file_path, }) model.resources['oasis_files_pipeline'].canonical_exposures_path = canonical_exposures_file_path trans_call_mock = Mock() with patch('oasislmf.exposures.manager.Translator', Mock(return_value=trans_call_mock)) as trans_mock: OasisExposuresManager().transform_source_to_canonical( source_exposures_file_path=source_exposures_file_path, source_to_canonical_exposures_transformation_file_path=source_to_canonical_exposures_transformation_file_path, source_exposures_validation_file_path=source_exposures_validation_file_path, canonical_exposures_file_path=canonical_exposures_file_path ) trans_mock.assert_called_once_with( os.path.abspath(source_exposures_file_path), os.path.abspath(canonical_exposures_file_path), os.path.abspath(source_to_canonical_exposures_transformation_file_path), os.path.abspath(source_exposures_validation_file_path), append_row_nums=True, ) trans_call_mock.assert_called_once_with() class OasisExposuresTransformCanonicalToModel(TestCase): @given( canonical_exposures_file_path=text(), canonical_exposures_validation_file_path=text(), canonical_to_model_exposures_transformation_file_path=text(), model_exposures_file_path=text() ) def test_model_is_not_set___parameters_are_taken_from_kwargs( self, canonical_exposures_file_path, canonical_to_model_exposures_transformation_file_path, canonical_exposures_validation_file_path, model_exposures_file_path): trans_call_mock = Mock() with patch('oasislmf.exposures.manager.Translator', Mock(return_value=trans_call_mock)) as trans_mock: OasisExposuresManager().transform_canonical_to_model( canonical_exposures_file_path=canonical_exposures_file_path, canonical_to_model_exposures_transformation_file_path=canonical_to_model_exposures_transformation_file_path, canonical_exposures_validation_file_path=canonical_exposures_validation_file_path, model_exposures_file_path=model_exposures_file_path, ) trans_mock.assert_called_once_with( os.path.abspath(canonical_exposures_file_path), os.path.abspath(model_exposures_file_path), os.path.abspath(canonical_to_model_exposures_transformation_file_path), os.path.abspath(canonical_exposures_validation_file_path), append_row_nums=False, ) trans_call_mock.assert_called_once_with() @given( canonical_exposures_file_path=text(), canonical_exposures_validation_file_path=text(), canonical_to_model_exposures_transformation_file_path=text(), model_exposures_file_path=text() ) def test_model_is_set___parameters_are_taken_from_model( self, canonical_exposures_file_path, canonical_to_model_exposures_transformation_file_path, canonical_exposures_validation_file_path, model_exposures_file_path): model = fake_model(resources={ 'canonical_exposures_validation_file_path': canonical_exposures_validation_file_path, 'canonical_to_model_exposures_transformation_file_path': canonical_to_model_exposures_transformation_file_path, }) model.resources['oasis_files_pipeline'].canonical_exposures_path = canonical_exposures_file_path model.resources['oasis_files_pipeline'].model_exposures_file_path = model_exposures_file_path trans_call_mock = Mock() with patch('oasislmf.exposures.manager.Translator', Mock(return_value=trans_call_mock)) as trans_mock: OasisExposuresManager().transform_canonical_to_model( canonical_exposures_file_path=canonical_exposures_file_path, canonical_exposures_validation_file_path=canonical_exposures_validation_file_path, canonical_to_model_exposures_transformation_file_path=canonical_to_model_exposures_transformation_file_path, model_exposures_file_path=model_exposures_file_path, ) trans_mock.assert_called_once_with( os.path.abspath(canonical_exposures_file_path), os.path.abspath(model_exposures_file_path), os.path.abspath(canonical_to_model_exposures_transformation_file_path), os.path.abspath(canonical_exposures_validation_file_path), append_row_nums=False, ) trans_call_mock.assert_called_once_with() class OasisExposureManagerCreate(TestCase): @given(supplier=text(), model_id=text(), version=text()) def test_supplier_model_and_version_are_supplied___correct_key_is_created(self, supplier, model_id, version): model = fake_model(supplier=supplier, model=model_id, version=version) self.assertEqual('{}/{}/{}'.format(supplier, model_id, version), model.key) def test_oasis_file_path_is_given___path_is_stored_as_absolute_path(self): model = fake_model(resources={'oasis_files_path': 'some_path'}) result = model.resources['oasis_files_path'] expected = os.path.abspath('some_path') self.assertEqual(expected, result) def test_oasis_file_path_is_not_given___path_is_abs_path_of_default(self): model = fake_model() result = model.resources['oasis_files_path'] expected = os.path.abspath(os.path.join('Files', model.key.replace('/', '-'))) self.assertEqual(expected, result) def test_file_pipeline_is_not_supplied___default_pipeline_is_set(self): model = fake_model() pipeline = model.resources['oasis_files_pipeline'] self.assertIsInstance(pipeline, OasisFilesPipeline) self.assertEqual(pipeline.model_key, model.key) def test_file_pipeline_is_supplied___pipeline_is_unchanged(self): pipeline = OasisFilesPipeline() model = fake_model(resources={'oasis_files_pipeline': pipeline}) self.assertIs(pipeline, model.resources['oasis_files_pipeline']) def test_pipeline_is_not_a_pipeline_instance___oasis_exception_is_raised(self): class FakePipeline(object): pass pipeline = FakePipeline() with self.assertRaises(OasisException): fake_model(resources={'oasis_files_pipeline': pipeline}) def test_canonical_exposures_profile_not_set___canonical_exposures_profile_in_none(self): model = fake_model() profile = model.resources['canonical_exposures_profile'] self.assertEqual(None, profile) @given(expected=dictionaries(text(), text())) def test_canonical_exposures_profile_json_set___canonical_exposures_profile_matches_json(self, expected): model = fake_model(resources={'canonical_exposures_profile_json': json.dumps(expected)}) profile = model.resources['canonical_exposures_profile'] self.assertEqual(expected, profile) @given(expected=dictionaries(text(), text())) def test_canonical_exposures_profile_path_set___canonical_exposures_profile_matches_json(self, expected): with NamedTemporaryFile('w') as f: json.dump(expected, f) f.flush() model = fake_model(resources={'canonical_exposures_profile_json_path': f.name}) profile = model.resources['canonical_exposures_profile'] self.assertEqual(expected, profile) @settings(deadline=None, suppress_health_check=[HealthCheck.too_slow]) @given(expected=dictionaries(text(), text()), new=dictionaries(text(), text())) def test_canonical_exposures_profile_set___profile_is_not_updated(self, expected, new): model = fake_model(resources={ 'canonical_exposures_profile': expected, 'canonical_exposures_profile_json': json.dumps(new), }) profile = model.resources['canonical_exposures_profile'] self.assertEqual(expected, profile)
python
#coding=utf-8 from datetime import datetime from django.db import models from django.utils import timezone from django.core.urlresolvers import reverse from aops.settings import INT_CHOICES, STATUS_CHOICES from cmdb import signals from cmdb.models.ip_record import IpRecord from cmdb.models.physical_server import PhysicalServer class Host(models.Model): uuid = models.CharField(max_length=255,unique=True) roles = models.CharField(max_length=255, null=True) physical_server = models.ForeignKey(PhysicalServer, related_name='host_physical_server', null=True) salt_id = models.CharField(max_length=255, null=True) ip_record = models.ManyToManyField(IpRecord, related_name='host_ip_record', null=True) operating_system = models.CharField(max_length=255, null=True) os_version = models.CharField(max_length=255, null=True) host_name = models.CharField(max_length=255, null=True) processor = models.CharField(max_length=255, null=True) memory = models.CharField(max_length=255, null=True) harddisk = models.CharField(max_length=255, null=True) comment = models.CharField(max_length=255, null=True) status = models.IntegerField(editable=True, choices=STATUS_CHOICES, default=0) is_run = models.IntegerField(editable=True, choices=INT_CHOICES, default=0) is_virtual_machine = models.IntegerField(editable=True, choices=INT_CHOICES, default=0) is_dynamic = models.IntegerField(editable=True, choices=INT_CHOICES, default=0) is_deleted = models.IntegerField(editable=True, choices=INT_CHOICES, default=0) create_time = models.DateTimeField(auto_now_add=True) update_time = models.DateTimeField(auto_now=True) class Meta: db_table = 'host' ordering = ['-uuid'] app_label = 'cmdb' def __unicode__(self): return self.uuid def search_name(self): return '%s: %s # %s # %s # %s # %s # %s # %s' % (self.__class__.__name__, self.uuid, self.roles, self.physical_server.__unicode__(), self.salt_id, self.operating_system, self.os_version, self.host_name) def get_absolute_url(self): return reverse('cmdb:edit_host', args=[self.id]) #为了在模板标签中可以使用items方法 def items(self): return [(field, field.value_to_string(self)) for field in Host._meta.fields] def delete(self, *args, **kwargs): super(Host, self).delete(*args, **kwargs) def save(self, *args, **kwargs): if self.id is not None : host = Host.objects.get(pk=self.id) else: print 'Alter' super(Host, self).save(*args, **kwargs)
python
# -*- coding:utf-8 -*- from unittest import TestCase from simstring.measure.cosine import CosineMeasure class TestCosine(TestCase): measure = CosineMeasure() def test_min_feature_size(self): self.assertEqual(self.measure.min_feature_size(5, 1.0), 5) self.assertEqual(self.measure.min_feature_size(5, 0.5), 2) def test_max_feature_size(self): self.assertEqual(self.measure.max_feature_size(5, 1.0), 5) self.assertEqual(self.measure.max_feature_size(5, 0.5), 20) def test_minimum_common_feature_count(self): self.assertEqual(self.measure.minimum_common_feature_count(5, 5, 1.0), 5) self.assertEqual(self.measure.minimum_common_feature_count(5, 20, 1.0), 10) self.assertEqual(self.measure.minimum_common_feature_count(5, 5, 0.5), 3) def test_similarity(self): x = ["a", "ab", "bc", "c"] y = ["a", "ab", "bc", "cd", "e"] self.assertEqual(round(self.measure.similarity(x, x), 2), 1.0) self.assertEqual(round(self.measure.similarity(x, y), 2), 0.67) z = ["a", "ab", "ba", "ab", "a"] self.assertEqual(round(self.measure.similarity(z, z), 2), 1.0) self.assertEqual(round(self.measure.similarity(x, z), 2), 0.58) self.assertEqual(round(self.measure.similarity(x, y), 2), 0.67) # Test as per paper trigrams with quotes of methyl sulphone and methyl sulfone a = [' "m', '"me', 'met', 'eth', 'thy', 'hyl', 'yl ', 'l s', ' su', 'sul', 'ulf', 'lfo', 'fon', 'one', 'ne"', 'e" '] b = [' "m', '"me', 'met', 'eth', 'thy', 'hyl', 'yl ', 'l s', ' su', 'sul', 'ulp', 'lph', 'pho', 'hon', 'one', 'ne"', 'e" '] self.assertEqual(round(self.measure.similarity(a, b), 3), 0.788) #BUG? Disagrees with paper that claims should be 0.788
python
# -*- coding: utf-8 -*- """ Created on Thu Apr 23 15:28:07 2020 @author: ESOL """ # Import module import jpype # Enable Java imports import jpype.imports # Pull in types from jpype.types import * jpype.addClassPath('C:/Users/esol/OneDrive - Equinor/programming/neqsim/NeqSim.jar') # Launch the JVM #jpype.startJVM() import neqsim import neqsim.thermo as thermo fluid1 = thermo.system.SystemSrkEos(303.15, 35.01325) fluid1.addComponent("nitrogen", 0.0028941); fluid1.addComponent("CO2", 0.054069291); fluid1.addComponent("methane", 0.730570915); fluid1.addComponent("ethane", 0.109004002); fluid1.addComponent("propane", 0.061518891); fluid1.addComponent("n-butane", 0.0164998); fluid1.addComponent("i-butane", 0.006585); fluid1.addComponent("n-pentane", 0.005953); fluid1.addComponent("i-pentane", 0.0040184); fluid1.addTBPfraction("C6", 0.6178399, 86.17801 / 1000.0, 0.6639999); fluid1.addComponent("water", 0.27082); fluid1.createDatabase(True); fluid1.setMixingRule(2); fluid1.setMultiPhaseCheck(True);
python
from fontbakery.callable import check from fontbakery.callable import condition from fontbakery.checkrunner import Section, PASS, FAIL, WARN from fontbakery.fonts_profile import profile_factory from tests.test_general import ( is_italic, com_roboto_fonts_check_italic_angle, com_roboto_fonts_check_fs_type, com_roboto_fonts_check_vendorid, com_roboto_fonts_check_digit_widths, com_roboto_fonts_check_charset_coverage, ) profile = profile_factory(default_section=Section("Roboto android v3")) exclude_glyphs = frozenset([0x00A0]) ROBOTO_PROFILE_CHECKS = [ "com.roboto.fonts/check/vertical_metrics", "com.roboto.fonts/check/italic_angle", "com.roboto.fonts/check/fs_type", "com.roboto.fonts/check/vendorid", "com.roboto.fonts/check/digit_widths", "com.roboto.fonts/check/glyph_dont_round_to_grid", "com.roboto.fonts/check/charset_coverage", ] @condition def include_glyphs(): return frozenset([ 0x2117, # SOUND RECORDING COPYRIGHT 0xEE01, 0xEE02, 0xF6C3] ) # legacy PUA @condition def exclude_glyphs(): return frozenset([ 0x20E3, # COMBINING ENCLOSING KEYCAP 0x2191, # UPWARDS ARROW 0x2193, # DOWNWARDS ARROW 0x2072, 0x2073, 0x208F] + # unassigned characters list(range(0xE000, 0xF8FF + 1)) + list(range(0xF0000, 0x10FFFF + 1)) # other PUA ) - include_glyphs() # don't exclude legacy PUA @check( id="com.roboto.fonts/check/glyph_dont_round_to_grid", ) def com_roboto_fonts_check_glyph_dont_round_to_grid(ttFont): """Test certain glyphs don't round to grid""" failed = False glyphset = ttFont.getGlyphSet() for name in ["ellipsis"]: glyph = glyphset[name]._glyph for component in glyph.components: if component.flags & (1 << 2): failed = True yield FAIL, f"Round to grid flag must be disabled for '{name}' components" if not failed: yield PASS, "Glyphs do not have round to grid enabled" # test names @check( id="com.roboto.fonts/check/vertical_metrics", ) def com_roboto_fonts_check_vertical_metrics(ttFont): """Check vertical metrics are correct""" failed = [] expected = { # Android values come from v2.136 android fonts # https://github.com/googlefonts/roboto/releases/tag/v2.136 ("head", "yMin"): -555, ("head", "yMax"): 2163, ("hhea", "descent"): -500, ("hhea", "ascent"): 1900, ("hhea", "lineGap"): 0, ("OS/2", "sTypoDescender"): -555, ("OS/2", "sTypoAscender"): 2146, ("OS/2", "sTypoLineGap"): 0, ("OS/2", "usWinDescent"): 555, ("OS/2", "usWinAscent"): 2146, } for (table, k), v in expected.items(): font_val = getattr(ttFont[table], k) if font_val != v: failed.append((table, k, v, font_val)) if not failed: yield PASS, "Fonts have correct vertical metrics" else: msg = "\n".join( [ f"- {tbl}.{k} is {font_val} it should be {v}" for tbl, k, v, font_val in failed ] ) yield FAIL, f"Fonts have incorrect vertical metrics:\n{msg}" # ligatures profile.auto_register(globals()) profile.test_expected_checks(ROBOTO_PROFILE_CHECKS, exclusive=True)
python
import socket as sk import sys import threading from PyQt4.QtCore import * MAX_THREADS = 50 #def usage(): #print("\npyScan 0.1") #print("usage: pyScan <host> [start port] [end port]") class Scanner(threading.Thread): def __init__(self, host, port): threading.Thread.__init__(self) # host and port self.host = host self.port = port # build up the socket obj self.sd = sk.socket(sk.AF_INET, sk.SOCK_STREAM) def run(self): try: # connect to the given host:port self.sd.connect((self.host, self.port)) print("%s:%d OPEN" % (self.host, self.port)) #self.emit('SIGNAL(QString),OPEN') self.sd.close() except: pass class pyScan: def __init__(self, args=[]): # arguments vector self.args = args # start port and end port self.start, self.stop = 1, 1024 # host name self.host = "" # check the arguments if len(self.args) == 4: self.host = self.args[1] try: self.start = int(self.args[2]) self.stop = int(self.args[3]) except ValueError: #usage() return if self.start > self.stop: #usage() return elif len(self.args) == 2: self.host = self.args[1] else: #usage() return try: sk.gethostbyname(self.host) except: print("hostname '%s' unknown" % self.host) self.scan(self.host, self.start, self.stop) def scan(self, host, start, stop): self.port = start while self.port <= stop: while threading.activeCount() < MAX_THREADS: Scanner(host, self.port).start() self.port += 1 if __name__ == "__main__": pyScan(sys.argv) ''' ############################################################# # a simple portscanner with multithreading # QUEUE BASED VERSION import socket import sys import threading, Queue MAX_THREADS = 50 class Scanner(threading.Thread): def __init__(self, inq, outq): threading.Thread.__init__(self) self.setDaemon(1) # queues for (host, port) self.inq = inq self.outq = outq def run(self): while 1: host, port = self.inq.get() sd = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: # connect to the given host:port sd.connect((host, port)) except socket.error: # set the CLOSED flag self.outq.put((host, port, 'CLOSED')) else: self.outq.put((host, port, 'OPEN')) sd.close() def scan(host, start, stop, nthreads=MAX_THREADS): toscan = Queue.Queue() scanned = Queue.Queue() scanners = [Scanner(toscan, scanned) for i in range(nthreads)] for scanner in scanners: scanner.start() hostports = [(host, port) for port in xrange(start, stop+1)] for hostport in hostports: toscan.put(hostport) results = {} for host, port in hostports: while (host, port) not in results: nhost, nport, nstatus = scanned.get() results[(nhost, nport)] = nstatus status = results[(host, port)] if status <> 'CLOSED': print '%s:%d %s' % (host, port, status) if __name__ == '__main__': scan('localhost', 0, 1024) ''' None
python
""" Tyson Reimer October 08th, 2020 """ import os import numpy as np import pandas as pd import statsmodels.api as sm from scipy.stats import norm from umbms import get_proj_path, get_script_logger from umbms.loadsave import load_pickle ############################################################################### __DATA_DIR = os.path.join(get_proj_path(), 'output/by-adi-preds/') phant_info_dir = os.path.join(get_proj_path(), 'data/phant-info/') ############################################################################### if __name__ == "__main__": logger = get_script_logger(__file__) # Load metadata lists of correct and incorrect predictions cor_preds = load_pickle(os.path.join(__DATA_DIR, 'byadi_cor_preds.pickle')) incor_preds = load_pickle(os.path.join(__DATA_DIR, 'byadi_incor_preds.pickle')) # Define list of metadata dicts for all predictions all_preds = cor_preds + incor_preds # Define array indicating correct vs incorrect prediction pred_labels = np.zeros([len(all_preds), ]) pred_labels[:len(cor_preds)] = 1 # Load phantom info phant_info = np.genfromtxt(os.path.join(phant_info_dir, 'phant_info.csv'), delimiter=',', dtype=['<U20', '<U20', float, float, float]) # All phantom IDs phant_ids = np.array(['%s%s' % (ii[0], ii[1]) for ii in phant_info]) # Init dicts for phantom density and breast volume phant_densities = dict() phant_vols = dict() for ii in range(len(phant_ids)): # Store the fibroglandular % by volume phant_densities[phant_ids[ii]] = 100 * phant_info[ii][2] # Store the adipose volume in cubic cm phant_vols[phant_ids[ii]] = phant_info[ii][3] / (10 * 10 * 10) tum_presence = np.array([~np.isnan(md['tum_rad']) for md in all_preds]) tum_preds = np.array(all_preds)[tum_presence] tum_labels = pred_labels[tum_presence] healthy_preds = np.array(all_preds)[~tum_presence] healthy_labels = pred_labels[~tum_presence] ########################################################################### logger.info('TUMOUR PREDICTIONS') # Init metadata dataframe md_df = pd.DataFrame() # Get the fibroglandular polar radii fib_polar_rad = np.array([np.sqrt((md['fib_x'] - md['adi_x']) ** 2 + (md['fib_y'] - md['adi_y']) ** 2) for md in tum_preds]) md_df['fib_polar_rad'] = fib_polar_rad # Get the adipose polar radii adi_polar_rad = np.array([np.sqrt(md['adi_x'] ** 2 + md['adi_y'] ** 2) for md in tum_preds]) md_df['adi_polar_rad'] = adi_polar_rad # Get breast density in % by volume from each scan, # include in dataframe density = np.array([phant_densities[md['phant_id']] for md in tum_preds]) md_df['density'] = density # Get Adipose ID from each scan, include in dataframe adi_vols = np.array([phant_vols[md['phant_id']] for md in tum_preds]) md_df['adi_vol'] = adi_vols # Get the tumor radii from each scan, include in dataframe tum_rads = np.array([md['tum_rad'] for md in tum_preds]) tum_rads[np.isnan(tum_rads)] = 0 md_df['tum_rad'] = tum_rads # Get tumor polar radii from each scan, include in dataframe tum_polar_rad = np.array([np.sqrt((md['tum_x'] - md['adi_x']) ** 2 + (md['tum_y'] - md['adi_y']) ** 2) for md in tum_preds]) tum_polar_rad[np.isnan(tum_polar_rad)] = 0 md_df['tum_polar_rad'] = tum_polar_rad # Include tumour z-position in metadata tum_zs = np.array([md['tum_z'] for md in tum_preds]) tum_zs[np.isnan(tum_zs)] = 0 tum_zs = np.abs(tum_zs) tum_zs = np.max(tum_zs) - tum_zs # Convert so that it is the distance from the antenna z-plane md_df['tum_z'] = tum_zs tum_in_fib = np.array([(md['tum_in_fib']) for md in tum_preds]) md_df['tum_in_fib'] = tum_in_fib # Store prediction score in dataframe md_df['pred_score'] = tum_labels # Create logistic regression model model = sm.GLM.from_formula("pred_score ~ " " adi_vol " " + density" " + fib_polar_rad" " + adi_polar_rad" " + tum_rad" " + tum_polar_rad" " + tum_z" " + C(tum_in_fib)" , family=sm.families.Binomial(), data=md_df) results = model.fit() # Report results logger.info(results.summary2()) logger.info('\tp-values:') logger.info('\t\t%s' % results.pvalues) # Critical value - look at 95% confidence intervals zstar = norm.ppf(0.95) # Report odds ratio and significance level results for ii in results.params.keys(): logger.info('\t%s' % ii) # Print metadata info coeff = results.params[ii] std_err = results.bse[ii] odds_ratio = np.exp(coeff) # Get odds ratio # Get 95% C.I. for odds ratio or_low = np.exp(coeff - zstar * std_err) or_high = np.exp(coeff + zstar * std_err) # Get p-val pval = results.pvalues[ii] logger.info('\t\tOdds ratio:\t\t\t%.3e\t(%.3e,\t%.3e)' % (odds_ratio, or_low, or_high)) logger.info('\t\tp-value:\t\t\t%.3e' % pval) ########################################################################### print('\n' * 5) logger.info('HEALTHY PREDICTIONS') # Init metadata dataframe md_df = pd.DataFrame() # Get the fibroglandular polar radii fib_polar_rad = np.array([np.sqrt((md['fib_x'] - md['adi_x']) ** 2 + (md['fib_y'] - md['adi_y']) ** 2) for md in healthy_preds]) md_df['fib_polar_rad'] = fib_polar_rad # Get the adipose polar radii adi_polar_rad = np.array([np.sqrt(md['adi_x'] ** 2 + md['adi_y'] ** 2) for md in healthy_preds]) md_df['adi_polar_rad'] = adi_polar_rad # Get breast density in % by volume from each scan, # include in dataframe density = np.array([phant_densities[md['phant_id']] for md in healthy_preds]) md_df['density'] = density # Get Adipose ID from each scan, include in dataframe adi_vols = np.array([phant_vols[md['phant_id']] for md in healthy_preds]) md_df['adi_vol'] = adi_vols # Store prediction score in dataframe md_df['pred_score'] = healthy_labels # Create logistic regression model model = sm.GLM.from_formula("pred_score ~ " " adi_vol " " + density" " + fib_polar_rad" " + adi_polar_rad" , family=sm.families.Binomial(), data=md_df) results = model.fit() # Report results logger.info(results.summary2()) logger.info('\tp-values:') logger.info('\t\t%s' % results.pvalues) # Critical value - look at 95% confidence intervals zstar = norm.ppf(0.95) # Report odds ratio and significance level results for ii in results.params.keys(): logger.info('\t%s' % ii) # Print metadata info coeff = results.params[ii] std_err = results.bse[ii] odds_ratio = np.exp(coeff) # Get odds ratio # Get 95% C.I. for odds ratio or_low = np.exp(coeff - zstar * std_err) or_high = np.exp(coeff + zstar * std_err) # Get p-val pval = results.pvalues[ii] logger.info('\t\tOdds ratio:\t\t\t%.3e\t(%.3e,\t%.3e)' % (odds_ratio, or_low, or_high)) logger.info('\t\tp-value:\t\t\t%.3e' % pval)
python
#encoding=utf-8 import sys #encoding=utf-8 ''' SocialMiner https://github.com/paulyang0125/SocialMiner Copyright (c) 2015 Yao-Nien, Yang Licensed under the MIT license. ''' import re from optparse import OptionParser import nltk #from nltk import * import nltk.cluster import nltk.cluster.kmeans import nltk.cluster.gaac import numpy from nltk.corpus import movie_reviews from nltk.corpus import wordnet #from nltk_contrib.wordnet import * import pickle import time import logging ### TODO ### 1. how to decide which used_classifier should be used - Naive, SVM ??? logger = logging.getLogger('myapp') #logger.setLevel(logging.DEBUG) logger.info('classification.py started') def stripLabels(testFeatures): """ Strips label from a test sentence feature vector """ return [testFeatures[i][0] for i in range(len(testFeatures))] def selectTrainingTestFeatures(featureVectors, cvstart, cvlength, sentences): """ Selects training and test feature subsets. Training set is the contingent sublist from location cvstart to cvlength """ testmappingList = [] trainmappingList = [] test = [] train = [] #test = [featureVectors[i] for i in range(len(featureVectors)) if cvstart <= i < cvstart + cvlength] myindex1 = 0 myindex2 = 0 for i in range(len(featureVectors)): mappingdic = {} if cvstart <= i < cvstart + cvlength: test.append(featureVectors[i]) mappingdic["before_ID"] = i #mappingdic["after_ID"] = test.index(featureVectors[i]) # index only return the first element that matches mappingdic["after_ID"] = myindex1 testmappingList.append(mappingdic) myindex1 += 1 #train = [featureVectors[i] for i in range(len(featureVectors)) if i < cvstart or cvstart + cvlength <= i] for i in range(len(featureVectors)): mappingdic = {} if i < cvstart or cvstart + cvlength <= i: train.append(featureVectors[i]) mappingdic["before_ID"] = i #mappingdic["after_ID"] = train.index(featureVectors[i]) mappingdic["after_ID"] = myindex2 trainmappingList.append(mappingdic) myindex2 += 1 testSents = [sentences[i] for i in range(len(featureVectors)) if cvstart <= i < cvstart + cvlength] assert len(featureVectors) == len(test) + len(train) assert len(testSents) == len(test) logger.debug("testmappingList:") print "testmappingList:" logger.debug(testmappingLis) print testmappingList time.sleep(0.1) #print "trainmappingList:" #print trainmappingList return train, test, testSents, testmappingList def selectPredictionTestFeatures(featureVectors, sentences): testmappingList = [] test = [] #test = [featureVectors[i] for i in range(len(featureVectors))] myindex = 0 for i in range(len(featureVectors)): mappingdic = {} test.append(featureVectors[i]) mappingdic["before_ID"] = i print("exam the feature vector:") print (featureVectors[i]) logger.debug("exam the feature vector:") logger.debug(featureVectors[i]) mappingdic["after_ID"] = myindex testmappingList.append(mappingdic) myindex += 1 testSents = [sentences[i] for i in range(len(featureVectors))] #print "testmappingList:" logger.debug("testmappingList:") #print testmappingList logger.debug(testmappingList) time.sleep(0.1) return test, testSents, testmappingList def classify_prediction(testFeatures, testSentences, messages, opt, used_classifier, testmappingList): #predictedLabelsDic = {} testFeaturesD = stripLabels(testFeatures) assert (testFeatures != None) classifier = used_classifier predictedLabels = classifier.batch_classify(testFeaturesD) print "start to assign the prediction tag into sentence obj" logger.info("start to assign the prediction tag into sentence obj") for msgObj in messages: for senObj in msgObj.sentences: for id, label in enumerate(predictedLabels): for test in testmappingList: if test["after_ID"] == id and senObj.vector_id == test["before_ID"]: if label == "Neutr": senObj.predict_opinion = 0 elif label == "Neg": senObj.predict_opinion = -1 elif label == "Pos": senObj.predict_opinion = 1 else: print "no tag, error!!" logger.error("no tag, error!!") #for id, labels in enumerate(predictedLabels) #vectorIDAssign = lambda n: 'http://www.ptt.cc/bbs/' + board_name + '/index' + str(n) + '.html' ## assign result to sentenceObj assert (len(predictedLabels) == len(testSentences)) stats_total = len(predictedLabels) return (stats_total, predictedLabels) def classify_training(trainingFeatures, testFeatures, testSentences, messages, opt, testmappingList): """ Classifies the feature vectos. """ assert (trainingFeatures != None and testFeatures != None) classifier = None; if (opt['cl_naive_bayes']): if opt['verbose']: print "Running NaiveBayes classifier" classifier = nltk.NaiveBayesClassifier.train(trainingFeatures) print "init accuracy for Naive:" logger.info("init accuracy for Naive:") print nltk.classify.accuracy(classifier, testFeatures) logger.info(nltk.classify.accuracy(classifier, testFeatures)) #### TODO ##### elif opt['cl_max_entropy'] != None: if opt['verbose']: logger.info("Running maximum entropy classifier") print "Running maximum entropy classifier" if opt['cl_max_entropy'] == "default": algorithm = None else: algorithm = opt['cl_max_entropy'] traceL=0; if opt['verbose']: traceL=3; elif opt['verbose_all']: traceL=5; classifier = nltk.MaxentClassifier.train(trainingFeatures, algorithm, traceL, max_iter=7) elif opt['cl_decision_tree']: if opt['verbose']: logger.info("Running decision tree classifier") print "Running decision tree classifier" classifier = nltk.DecisionTreeClassifier.train(trainingFeatures, entropy_cutoff=0.5, depth_cutoff=70, support_cutoff=10) if classifier == None: print "No classifier selected! Aborting!" logger.error("No classifier selected! Aborting!") exit(1) testFeaturesD = stripLabels(testFeatures) predictedLabels = classifier.batch_classify(testFeaturesD) ## shit.......... print "start to assign the prediction tag into sentence obj" logger.info("start to assign the prediction tag into sentence obj") for msgObj in messages: for senObj in msgObj.sentences: for id, label in enumerate(predictedLabels): for test in testmappingList: if test["after_ID"] == id and senObj.vector_id == test["before_ID"]: if label == "Neutr": senObj.predict_opinion = 0 elif label == "Neg": senObj.predict_opinion = -1 elif label == "Pos": senObj.predict_opinion = 1 else: print "no tag, error!!" logger.error("no tag, error!!") assert (len(predictedLabels) == len(testSentences)) stats_total = 0 stats_correct = 0 for origFV, newLabel in map(None, testFeatures, predictedLabels): origLabel = origFV[1] stats_total = stats_total + 1 if origLabel == newLabel: stats_correct = stats_correct + 1 if opt['verbose']: for l in classifier.labels(): print "'%s'\t" % l, logger.info("'%s'\t" % l,) print "L_orig\tL_new" logger.info("L_orig\tL_new") trainingFeaturesD = stripLabels(trainingFeatures) predLabs2 = classifier.batch_classify(trainingFeaturesD) probcfs = None try: probcfs = classifier.batch_prob_classify(trainingFeaturesD) except Exception: probcfs = ["-" for t in trainingFeaturesD] for pdist, origFV, newLabel in map(None, probcfs, trainingFeatures, predLabs2): origLabel = origFV[1] for l in classifier.labels(): if pdist != "-": print "%.3f\t" % pdist.prob(l), logger.info("%.3f\t" % pdist.prob(l),) else: print "- \t", logger.info("- \t",) print " %s\t%s" % (origLabel, newLabel), logger.info(" %s\t%s" % (origLabel, newLabel),) print "" logger.info("") ##### start to use testset with the text showed probcfs = None try: probcfs = classifier.batch_prob_classify(testFeaturesD) except Exception: probcfs = ["-" for t in testFeaturesD] for pdist, origFV, newLabel, sent in map(None, probcfs, testFeatures, predictedLabels, testSentences): origLabel = origFV[1] for l in classifier.labels(): if pdist != "-": print "%.3f\t" % pdist.prob(l), logger.info("%.3f\t" % pdist.prob(l),) else: print "- \t", logger.info("- \t",) print " %s\t%s" % (origLabel, newLabel), logger.info(" %s\t%s" % (origLabel, newLabel),) if opt['verbose_all']: print "\t%s" % sent.text logger.debug("\t%s" % sent.text) else: print "" logger.info("") stats_perc = 100.0 * stats_correct / stats_total f_measure = evaluateClassificationBCubed([f[1] for f in testFeatures], predictedLabels, opt) if opt['verbose']: if not (opt['cl_naive_bayes'] or not opt['cl_max_entropy']): classifier.show_most_informative_features() return (stats_correct, stats_total, stats_perc, f_measure, classifier, predictedLabels) def evaluateClassificationBCubed(originalLabels, newLabels, opt): label1 = None; label2 = None A = 0; B = 0; C = 0; D = 0; labelPair = map(None, originalLabels, newLabels) precision = 0.0 recall = 0.0 for (e1o, e1n) in labelPair: sameNew = [ (e2o, e2n) for e2o, e2n in labelPair if e1n == e2n ] ## same cluster sameOld = [ (e2o, e2n) for e2o, e2n in labelPair if e1o == e2o ] ## same category sameBoth = [(e2o, e2n) for e2o, e2n in labelPair if e1o == e2o and e1n == e2n] ## same cluster and category precision = precision + 1.0/len(sameNew) * len(sameBoth) recall = recall + 1.0/len(sameOld) * len(sameBoth) precision = precision / len(originalLabels) recall = recall / len(originalLabels) print precision, recall logger.info(precision, recall) Fmeasure = 2 * precision * recall / ( precision + recall ) return Fmeasure def processClassification(mode, featureVectors, allSentences, messages, options, used_classifier = None): if options['training']: print "training mode for Classification!" logger.info("training mode for Classification!") ##featureVectors for training : [({'f1':'','f2':''}, 'Subj'), (), () ] crossvalidate = int(1 + 0.01 * len(featureVectors) * float(options['o_crossvalidate'])) crosslen = int(0.01 * float(options['o_testpercentage']) * len(featureVectors) + 1) useCrossvalidation = options['o_crossvalidate'] != -1 cvstart = 0 if not useCrossvalidation: cvstart = len(featureVectors) - crosslen crossvalidate = crosslen results = [] i = 0 while cvstart < len(featureVectors): ## divide features in training and test set featureTraining, featureTest, testSentences, testmappingList = selectTrainingTestFeatures(featureVectors, cvstart, crosslen, allSentences) assert len(featureTraining) > 0 , "There must exist some training features" assert len(featureTest) > 0 , "There must exist some test features" ## perform classification ## res = tuple - (stats_correct, stats_total, stats_perc, f_measure, classifier) res = classify_training(featureTraining, featureTest, testSentences, messages, options, testmappingList) used_classifier = res[4] ## this is classifier, gonna save results.append(res) print "Run %d. Correct: %d / %d (%5.3f %%) [F = %5.3f] "%(i, res[0], res[1], res[2], res[3]) logger.info("Run %d. Correct: %d / %d (%5.3f %%) [F = %5.3f] "%(i, res[0], res[1], res[2], res[3])) cvstart = cvstart + crossvalidate i = i + 1 return evaluateResults(results, used_classifier) else: print "prediction mode for Classification!" logger.info("prediction mode for Classification!") ##featureVectors for predict : [({'f1':'','f2':''}, 'na'), (), () ] featureTest, testSentences, testmappingList = selectPredictionTestFeatures(featureVectors, allSentences) assert len(featureTest) > 0 , "There must exist some test features" res = classify_prediction(featureTest, testSentences, messages, options, used_classifier, testmappingList) stat_all = res[0]; predict_results = res[1] return stat_all , predict_results def evaluateResults(results, used_classifier): avg_correct = 0; avg_all = 0; avg_perc = 0; avg_f = 0 classifiersList = [] for r in results: avg_correct = avg_correct + r[0] avg_all = avg_all + r[1] avg_f = avg_f + r[3] classifiersList.append(r[4]) avg_perc = 100.0 * avg_correct / avg_all total_runs = len(results) avg_correct = avg_correct / total_runs avg_f = avg_f / total_runs avg_all = avg_all / total_runs #saveClassifier(classifiersList) print "RESULTS after %d runs" % total_runs logger.info("RESULTS after %d runs" % total_runs) print "Correct: %d / %d (%5.3f %%) [F = %5.3f]" % (avg_correct, avg_all, avg_perc, avg_f) logger.info("Correct: %d / %d (%5.3f %%) [F = %5.3f]" % (avg_correct, avg_all, avg_perc, avg_f)) # output of process(.) return (avg_correct, avg_all, avg_perc, avg_f, used_classifier), used_classifier
python
#!/usr/bin/python # Copyright 2016 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Allow creation of uart/console interface via usb google serial endpoint.""" import argparse import array import exceptions import os import sys import termios import threading import time import traceback import tty try: import usb except: print "import usb failed" print "try running these commands:" print " sudo apt-get install python-pip" print " sudo pip install --pre pyusb" print "" sys.exit(-1) """Class Susb covers USB device discovery and initialization. It can find a particular endpoint by vid:pid, serial number, and interface number. """ class SusbError(Exception): """Class for exceptions of Susb.""" def __init__(self, msg, value=0): """SusbError constructor. Args: msg: string, message describing error in detail value: integer, value of error when non-zero status returned. Default=0 """ super(SusbError, self).__init__(msg, value) self.msg = msg self.value = value class Susb(): """Provide USB functionality. Instance Variables: _read_ep: pyUSB read endpoint for this interface _write_ep: pyUSB write endpoint for this interface """ READ_ENDPOINT = 0x81 WRITE_ENDPOINT = 0x1 TIMEOUT_MS = 100 def __init__(self, vendor=0x18d1, product=0x500f, interface=1, serialname=None): """Susb constructor. Discovers and connects to USB endpoints. Args: vendor : usb vendor id of device product : usb product id of device interface : interface number ( 1 - 8 ) of device to use serialname: string of device serialnumber. Raises: SusbError: An error accessing Susb object """ # Find the device. dev_list = usb.core.find(idVendor=vendor, idProduct=product, find_all=True) if dev_list is None: raise SusbError("USB device not found") # Check if we have multiple devices. dev = None if serialname: for d in dev_list: dev_serial = "PyUSB doesn't have a stable interface" try: dev_serial = usb.util.get_string(d, 256, d.iSerialNumber) except: dev_serial = usb.util.get_string(d, d.iSerialNumber) if dev_serial == serialname: dev = d break if dev is None: raise SusbError("USB device(%s) not found" % serialname) else: try: dev = dev_list[0] except: try: dev = dev_list.next() except: raise SusbError("USB device %04x:%04x not found" % (vendor, product)) # If we can't set configuration, it's already been set. try: dev.set_configuration() except usb.core.USBError: pass # Get an endpoint instance. cfg = dev.get_active_configuration() intf = usb.util.find_descriptor(cfg, bInterfaceNumber=interface) self._intf = intf if not intf: raise SusbError("Interface not found") # Detach raiden.ko if it is loaded. if dev.is_kernel_driver_active(intf.bInterfaceNumber) is True: dev.detach_kernel_driver(intf.bInterfaceNumber) read_ep_number = intf.bInterfaceNumber + self.READ_ENDPOINT read_ep = usb.util.find_descriptor(intf, bEndpointAddress=read_ep_number) self._read_ep = read_ep write_ep_number = intf.bInterfaceNumber + self.WRITE_ENDPOINT write_ep = usb.util.find_descriptor(intf, bEndpointAddress=write_ep_number) self._write_ep = write_ep """Suart class implements a stream interface, to access Google's USB class. This creates a send and receive thread that monitors USB and console input and forwards them across. This particular class is hardcoded to stdin/out. """ class SuartError(Exception): """Class for exceptions of Suart.""" def __init__(self, msg, value=0): """SuartError constructor. Args: msg: string, message describing error in detail value: integer, value of error when non-zero status returned. Default=0 """ super(SuartError, self).__init__(msg, value) self.msg = msg self.value = value class Suart(): """Provide interface to serial usb endpoint.""" def __init__(self, vendor=0x18d1, product=0x501c, interface=0, serialname=None): """Suart contstructor. Initializes USB stream interface. Args: vendor: usb vendor id of device product: usb product id of device interface: interface number of device to use serialname: Defaults to None. Raises: SuartError: If init fails """ self._susb = Susb(vendor=vendor, product=product, interface=interface, serialname=serialname) self._exit = False def exit(self): self._exit = True def running(self): return (not self._exit) def __del__(self): """Suart destructor.""" self.exit() def run_rx_thread(self): while self.running(): try: r = self._susb._read_ep.read(64, self._susb.TIMEOUT_MS) if r: sys.stdout.write(r.tostring()) sys.stdout.flush() except Exception as e: # If we miss some characters on pty disconnect, that's fine. # ep.read() also throws USBError on timeout, which we discard. if type(e) not in [exceptions.OSError, usb.core.USBError]: print "rx %s" % e def run_tx_thread(self): while self.running(): try: r = sys.stdin.read(1) if r == '\x03': self.exit() if r: self._susb._write_ep.write(array.array('B', r), self._susb.TIMEOUT_MS) except Exception as e: print "tx %s" % e def run(self): """Creates pthreads to poll USB & PTY for data. """ self._exit = False self._rx_thread = threading.Thread(target=self.run_rx_thread, args=[]) self._rx_thread.daemon = True self._rx_thread.start() self._tx_thread = threading.Thread(target=self.run_tx_thread, args=[]) self._tx_thread.daemon = True self._tx_thread.start() """Terminal settings cleanup.""" def force_exit(): global old_settings global fd termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) os.system("stty echo") sys.exit(0) """Command line functionality Allows specifying vid:pid, serialnumber, interface. Ctrl-C exits. """ parser = argparse.ArgumentParser(description="Open a console to a USB device") parser.add_argument('-d', '--device', type=str, help="vid:pid of target device", default="18d1:501c") parser.add_argument('-i', '--interface', type=int, help="interface number of console", default=0) parser.add_argument('-s', '--serialno', type=str, help="serial number of device", default="") def main(): args = parser.parse_args() vidstr, pidstr = args.device.split(':') vid = int(vidstr, 16) pid = int(pidstr, 16) serialno = args.serialno interface = args.interface sobj = Suart(vendor=vid, product=pid, interface=interface, serialname=serialno) try: tty.setraw(sys.stdin.fileno()) except: pass sobj.run() # run() is a thread so just busy wait to mimic server while sobj.running(): time.sleep(.1) if __name__ == '__main__': global old_settings global fd try: os.system("stty -echo") fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) except: pass try: main() except KeyboardInterrupt: sobj.exit() except Exception as e: try: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) os.system("stty echo") finally: traceback.print_exc() finally: force_exit()
python
import numpy as np # general convolve framework def convframe(input, weight, output=None, init=0, mode='reflect', buffertype=None, keeptype=True, func=None): if output is None: output = np.zeros(input.shape, buffertype or input.dtype) output[:] = input if init is None else init buf = np.zeros_like(output) coreshp = weight.shape; coremar = np.array(weight.shape)//2 padimg = np.pad(input, [(i,i) for i in coremar], mode=mode) rcs = np.mgrid[tuple([slice(i) for i in coreshp])] rcs = rcs.reshape(input.ndim, -1).T for idx, w in zip(rcs, weight.ravel()): start, end = idx, idx + input.shape s = [slice(i,j) for i,j in zip(start, end)] buf[:] = padimg[tuple(s)] func(buf, output, w) return output.astype(input.dtype) if keeptype else output # split convolve in axis def axisframe(img, core, mode='reflect', f=None): dtype = img.dtype for i in range(len(core)): shape = np.ones(img.ndim, dtype=np.int8) shape[i] = -1 if core[i].size == 1: img = img * core[i] continue c = core[i].reshape(shape) print(c.shape) img = f(img, c, output=None, mode=mode, keeptype=False) return img.astype(dtype) def convolve(input, weight, output=None, mode='reflect', keeptype=True): def f(buf, output, w): buf *= w; output += buf return convframe(input, weight, output, 0, mode, 'float32', keeptype, f) def uniform_filter(img, size=3, mode='reflect'): if not hasattr(size, '__len__'): size = [size] * img.ndim def core(s): if s<=1: return np.array([1]) return np.ones(s).astype('float32')/s cores = [core(i) for i in size] return axisframe(img, cores, mode, convolve) def gaussian_filter(img, sig=2, mode='reflect'): if not hasattr(sig, '__len__'): sig = [sig] * img.ndim def core(s): if s==0: return np.array([1]) x = np.arange(-int(s*2.5+0.5), int(s*2.5+0.5)+1) return np.exp(-x**2/2/s**2)/s/(2*np.pi)**0.5 cores = [core(i) for i in sig] return axisframe(img, cores, mode, convolve) def _maximum_filter(input, weight=None, output=None, mode='reflect', keeptype=True): def f(buf, output, w): if w>0: np.maximum(buf, output, out=output) return convframe(input, weight, output, None, mode, None, keeptype, f) def maximum_filter(input, size=None, footprint=None, output=None, mode='reflect', keeptype=True): if not footprint is None: return _maximum_filter(input, footprint, output, mode) if not hasattr(size, '__len__'): size = [size]*input.ndim cores = [np.ones(i, 'bool') for i in size] return axisframe(input, cores, mode, _maximum_filter) def _minimum_filter(input, weight=None, output=None, mode='reflect', keeptype=True): def f(buf, output, w): if w>0: np.minimum(buf, output, out=output) return convframe(input, weight, output, None, mode, None, keeptype, f) def minimum_filter(input, size=None, footprint=None, output=None, mode='reflect', keeptype=True): if not footprint is None: return _minimum_filter(input, footprint, output, mode) if not hasattr(size, '__len__'): size = [size]*input.ndim cores = [np.ones(i, 'bool') for i in size] return axisframe(input, cores, mode, _minimum_filter) if __name__ == '__main__': from skimage.data import camera import matplotlib.pyplot as plt img = camera() simg = minimum_filter(img, footprint=np.ones((10,10))) plt.imshow(simg, cmap='gray') plt.show()
python
import unittest from user import User class UserTest(unittest.TestCase): """ Test class that defines test cases for the contact class behaviours. Args: unittest.TestCase: Inherits the testCase class that helps in creating test cases """ def setUp(self): """ Set up method to run before each test cases. """ self.new_user = User("user100", "1100") def test_init(self): """ test_init test case to test if the object is initialized properly """ self.assertEqual(self.new_user.login_name, "user100") self.assertEqual(self.new_user.pin, "1100") def test_save_user(self): """ test_save_user test case to test if the user object is saved into the user list """ self.new_user.save_user() self.assertEqual(len(User.user_list),1) def test_user_auth(self): """ test_user_auth tests case to authenticate the user """ self.assertTrue(self.new_user.user_auth("user100","1100")) if __name__ == "__main__": unittest.main()
python
# Generated by Django 2.0.8 on 2018-08-12 16:09 from django.db import migrations, models from django_add_default_value import AddDefaultValue class Migration(migrations.Migration): dependencies = [("dadv", "0001_initial")] operations = [ migrations.CreateModel( name="TestTextDefault", fields=[ ("id", models.BigAutoField(primary_key=True, serialize=False)), ("description", models.TextField(default="No description provided")), ], ), AddDefaultValue( model_name="TestTextDefault", name="description", value="No description provided", ), ]
python
import boto3 import json from datetime import datetime, timedelta from botocore.client import Config def handler(event, context): s3 = boto3.client('s3', config=Config(signature_version='s3v4')) BUCKET_NAME = 'photostorage113550-dev'; s3_bucket_content = s3.list_objects(Bucket=BUCKET_NAME)['Contents'] contents = [] for obj in s3_bucket_content: key = obj['Key'].replace('.jpg', '') params = {'Bucket': BUCKET_NAME, 'Key': obj['Key']} date = obj['LastModified'] # This is in print(date) url = s3.generate_presigned_url('get_object', params, ExpiresIn=600) contents.append({ 'key': key, 'date': date.strftime("%d-%b-%Y %H:%M:%S"), 'url': url }) return { 'contents': contents }
python
import collections import logging import os import time import suds.xsd.doctor import suds.client from suds.plugin import MessagePlugin from suds import WebFault from . import base logger = logging.getLogger(__name__) # Suds has broken array marshaling. See these links: # http://stackoverflow.com/questions/3519818/suds-incorrect-marshaling-of-array-of-arrays # https://fedorahosted.org/suds/ticket/340 class FixArrayPlugin(MessagePlugin): def marshalled(self, context): command = context.envelope.getChild('Body').getChildren()[0] # TODO: instead of blacklisting the affected types here, check the # actual WSDL and fix up any *ArrayArray types. affected = ('addNodes', 'addDrainingNodes', 'removeNodes', 'removeDrainingNodes', 'disableNodes', 'enableNodes', 'addPool', ) if command.name in affected: context.envelope.addPrefix( 'xsd', 'http://www.w3.org/1999/XMLSchema', ) child_spec = collections.defaultdict( lambda: 'values', addPool='nodes', disableNodes='nodes') values = command.getChild(child_spec[command.name]) values.set('SOAP-ENC:arrayType', 'xsd:list[1]') values.set('xsi:type', 'SOAP-ENC:Array') item = values[0] item.set('SOAP-ENC:arrayType', 'xsd:list[%s]' % len(item.children)) item.set('xsi:type', 'SOAP-ENC:Array') class StingrayBalancer(base.Balancer): def __init__(self, config): self.url = config['URL'] imp = suds.xsd.doctor.Import( 'http://schemas.xmlsoap.org/soap/encoding/') imp.filter.add('http://soap.zeus.com/zxtm/1.0/') doctor = suds.xsd.doctor.ImportDoctor(imp) # zxtm_pool.wsdl must be present in the same directory as this file. here = os.path.dirname(os.path.realpath(__file__)) wsdl = os.path.join(here, 'stingray_pool.wsdl') self.client = suds.client.Client( 'file:' + wsdl, username=config['USER'], password=config['PASSWORD'], location=self.url, plugins=[doctor, FixArrayPlugin()]) # All pool names will be prefixed with this string. self.pool_prefix = config.get('POOL_PREFIX', '') # Stingray has separate calls for disableNodes and removeNodes. The # latter will interrupt current connections. To minimize disruption, # we'll call disableNodes first, wait a configurable amount of time, # and then call removeNodes. self.grace_period = config.get('GRACE_PERIOD', 2) def _call_node_func(self, func, pool, nodes): # Generic function for calling any of the Stingray pool functions that # accept an array of pools, and an arrayarray of nodes. This function # will take a single pool and nodelist and do all the necessary # wrapping. nodes_wrapper = self.client.factory.create('StringArrayArray') nodes_array = self.client.factory.create('StringArray') nodes_array.item = nodes nodes_wrapper.item = [nodes_array] func([self.pool_prefix + pool], nodes_wrapper) def add_nodes(self, pool, nodes): # Stingray will kindly avoid creating duplicates if you submit a node # that is already in the pool. logger.info('Adding nodes %s to pool %s', nodes, pool) try: self._call_node_func(self.client.service.addNodes, pool, nodes) except WebFault as wf: if 'Unknown pool' in wf.message: # If pool doesn't exist, create it. self.add_pool(pool, nodes) else: raise def delete_nodes(self, pool, nodes): existing_nodes = set(self.get_nodes(pool)) nodes = list(existing_nodes.intersection(nodes)) if not nodes: logger.info('No nodes to delete from pool %s', pool) return logger.info('Deleting nodes %s from pool %s', nodes, pool) try: self._call_node_func(self.client.service.disableNodes, pool, nodes) # wait <grace_period> seconds for connections to finish before # zapping nodes completely. time.sleep(self.grace_period) self._call_node_func(self.client.service.removeNodes, pool, nodes) except WebFault as wf: if 'Unknown pool' in wf.message: # If you try to delete nodes from a pool, and it doesn't exist, # that's fine. pass else: raise # Clean up pool in StingRay self.delete_pool_if_empty(pool) def add_pool(self, pool, nodes): logger.info('Adding new pool %s', pool) self._call_node_func(self.client.service.addPool, pool, nodes) def delete_pool(self, pool): logger.info('Deleting pool %s', pool) try: self.client.service.deletePool([self.pool_prefix + pool]) except WebFault as wf: if 'Unknown pool' in str(wf): pass else: raise def delete_pool_if_empty(self, pool): nodes = self.get_nodes(pool) if not nodes: logger.info('Pool %s is empty', pool) self.delete_pool(pool) def get_nodes(self, pool): logger.info('Getting nodes for pool %s', pool) try: # get just the first item from the arrayarray nodes = self.client.service.getNodes([self.pool_prefix + pool])[0] except WebFault as wf: if 'Unknown pool' in wf.message: return [] else: raise # convert the sax text things into real strings return [str(n) for n in nodes]
python
import os import random import sys, getopt def getDesiredROMCount(): #Asks the user how many roms they want to select from, loops until it gets a valid input asking = True numFiles = 0 while asking: try: numFiles = int(input("Please enter the number of games you'd like randomly selected for analysis: ")) asking = False except ValueError: print("Invalid input, please try again") return numFiles def getAllFilesInRomDirectory(romDirectory): #Lists all files in the working directory all_files = os.listdir(romDirectory) if(len(all_files) == 1): print("Please put this script in the directory containing your rom files") return [] else: return all_files def pickROMS(rom_list, count): #This function does the randomization from the list of roms obtained selections = [] i = 0 #Ensures that we don't ask for more ROM files than are available in the folder lower = min(len(rom_list), count) while i in range(0, lower): selections.append(random.choice(rom_list)) i += 1 return selections def main(dir, romFormats): print("Welcome to the Game Randomizer.") print("You can use this small program to pick a specified number of random ROMS from a folder containing a collection of them.") numFiles = getDesiredROMCount() all_files = getAllFilesInRomDirectory(dir) #Filters the rom files from all the files in the directory rom_files = list(filter(lambda f: f[-3:] in romFormats, all_files)) if(len(rom_files) == 0): print("No valid ROM files found") return #The main loop of the program - picks roms until the user no longer wants to do that picking = True while picking: selected_files = pickROMS(rom_files, numFiles) print("\nThe games that have been chosen for you are: ") for count, fileName in enumerate(selected_files): print(str(count + 1) + ": " + fileName) pickAgain = str(input("\nDo you want to pick again(Y/N)? ")).upper() if pickAgain == 'Y': picking = True else: print("Thank you! Goodbye!") picking = False if __name__ == '__main__': directory = os.getcwd() romformats = ["zip"] #Parse the command line arguments try: options, arguments = getopt.getopt(sys.argv[1:], "hd:f:", ["help", "directory=", "romformat="]) for opt, arg in options: if opt in ('-h', "--help"): print("gamerandomizer.py -d <path to search directory> -f <rom file format>") print("The default rom file formats that are searched for are zip and smc, but to specify any custom formats, enter them as comma separated values with no spaces eg. 'zip,smc'") sys.exit() elif opt in ("-d", "--directory"): directory = arg elif opt in ("-f", "--romformat"): romformat = arg.split(',') else: raise getopt.GetoptError except (getopt.GetoptError, ValueError): #If there is an error parsing the arguments, display the error message and quit print("You have entered invalid command line arguments. Type 'gamerandomizer.py -h' or 'gamerandomizer.py --help' for usage instructions") sys.exit() main(directory, romformats)
python