content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import sys
import argparse
import logging
import textwrap
from pathlib import Path
import contextlib
from tqdm.auto import tqdm
import ase.io
import torch
from nequip.utils import Config
from nequip.data import AtomicData, Collater, dataset_from_config
from nequip.train import Trainer
from nequip.scripts.deploy import load_deployed_model
from nequip.scripts.train import default_config, _set_global_options
from nequip.utils import load_file, instantiate
from nequip.train.loss import Loss
from nequip.train.metrics import Metrics
from nequip.scripts.logger import set_up_script_logger
def main(args=None, running_as_script: bool = True):
# in results dir, do: nequip-deploy build . deployed.pth
parser = argparse.ArgumentParser(
description=textwrap.dedent(
"""Compute the error of a model on a test set using various metrics.
The model, metrics, dataset, etc. can specified individually, or a training session can be indicated with `--train-dir`.
In order of priority, the global settings (dtype, TensorFloat32, etc.) are taken from:
1. The model config (for a training session)
2. The dataset config (for a deployed model)
3. The defaults
Prints only the final result in `name = num` format to stdout; all other information is logging.debuged to stderr.
WARNING: Please note that results of CUDA models are rarely exactly reproducible, and that even CPU models can be nondeterministic.
"""
)
)
parser.add_argument(
"--train-dir",
help="Path to a working directory from a training session.",
type=Path,
default=None,
)
parser.add_argument(
"--model",
help="A deployed or pickled NequIP model to load. If omitted, defaults to `best_model.pth` in `train_dir`.",
type=Path,
default=None,
)
parser.add_argument(
"--dataset-config",
help="A YAML config file specifying the dataset to load test data from. If omitted, `config.yaml` in `train_dir` will be used",
type=Path,
default=None,
)
parser.add_argument(
"--metrics-config",
help="A YAML config file specifying the metrics to compute. If omitted, `config.yaml` in `train_dir` will be used. If the config does not specify `metrics_components`, the default is to logging.debug MAEs and RMSEs for all fields given in the loss function. If the literal string `None`, no metrics will be computed.",
type=str,
default=None,
)
parser.add_argument(
"--test-indexes",
help="Path to a file containing the indexes in the dataset that make up the test set. If omitted, all data frames *not* used as training or validation data in the training session `train_dir` will be used.",
type=Path,
default=None,
)
parser.add_argument(
"--batch-size",
help="Batch size to use. Larger is usually faster on GPU. If you run out of memory, lower this.",
type=int,
default=50,
)
parser.add_argument(
"--device",
help="Device to run the model on. If not provided, defaults to CUDA if available and CPU otherwise.",
type=str,
default=None,
)
parser.add_argument(
"--output",
help="XYZ file to write out the test set and model predicted forces, energies, etc. to.",
type=Path,
default=None,
)
parser.add_argument(
"--log",
help="log file to store all the metrics and screen logging.debug",
type=Path,
default=None,
)
# Something has to be provided
# See https://stackoverflow.com/questions/22368458/how-to-make-argparse-logging.debug-usage-when-no-option-is-given-to-the-code
if len(sys.argv) == 1:
parser.print_help()
parser.exit()
# Parse the args
args = parser.parse_args(args=args)
# Do the defaults:
dataset_is_from_training: bool = False
if args.train_dir:
if args.dataset_config is None:
args.dataset_config = args.train_dir / "config.yaml"
dataset_is_from_training = True
if args.metrics_config is None:
args.metrics_config = args.train_dir / "config.yaml"
if args.model is None:
args.model = args.train_dir / "best_model.pth"
if args.test_indexes is None:
# Find the remaining indexes that arent train or val
trainer = torch.load(
str(args.train_dir / "trainer.pth"), map_location="cpu"
)
train_idcs = set(trainer["train_idcs"].tolist())
val_idcs = set(trainer["val_idcs"].tolist())
else:
train_idcs = val_idcs = None
# update
if args.metrics_config == "None":
args.metrics_config = None
elif args.metrics_config is not None:
args.metrics_config = Path(args.metrics_config)
do_metrics = args.metrics_config is not None
# validate
if args.dataset_config is None:
raise ValueError("--dataset-config or --train-dir must be provided")
if args.metrics_config is None and args.output is None:
raise ValueError(
"Nothing to do! Must provide at least one of --metrics-config, --train-dir (to use training config for metrics), or --output"
)
if args.model is None:
raise ValueError("--model or --train-dir must be provided")
if args.output is not None:
if args.output.suffix != ".xyz":
raise ValueError("Only extxyz format for `--output` is supported.")
if args.device is None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
device = torch.device(args.device)
if running_as_script:
set_up_script_logger(args.log)
logger = logging.getLogger("nequip-evaluate")
logger.setLevel(logging.INFO)
logger.info(f"Using device: {device}")
if device.type == "cuda":
logger.info(
"WARNING: please note that models running on CUDA are usually nondeterministc and that this manifests in the final test errors; for a _more_ deterministic result, please use `--device cpu`",
)
# Load model:
logger.info("Loading model... ")
model_from_training: bool = False
try:
model, _ = load_deployed_model(
args.model,
device=device,
set_global_options=True, # don't warn that setting
)
logger.info("loaded deployed model.")
except ValueError: # its not a deployed model
model, _ = Trainer.load_model_from_training_session(
traindir=args.model.parent, model_name=args.model.name
)
model_from_training = True
model = model.to(device)
logger.info("loaded model from training session")
model.eval()
# Load a config file
logger.info(
f"Loading {'original ' if dataset_is_from_training else ''}dataset...",
)
config = Config.from_file(str(args.dataset_config))
# set global options
if model_from_training:
# Use the model config, regardless of dataset config
global_config = args.model.parent / "config.yaml"
global_config = Config.from_file(str(global_config), defaults=default_config)
_set_global_options(global_config)
del global_config
else:
# the global settings for a deployed model are set by
# set_global_options in the call to load_deployed_model
# above
pass
dataset_is_validation: bool = False
# Currently, pytorch_geometric prints some status messages to stdout while loading the dataset
# TODO: fix may come soon: https://github.com/rusty1s/pytorch_geometric/pull/2950
# Until it does, just redirect them.
with contextlib.redirect_stdout(sys.stderr):
try:
# Try to get validation dataset
dataset = dataset_from_config(config, prefix="validation_dataset")
dataset_is_validation = True
except KeyError:
# Get shared train + validation dataset
dataset = dataset_from_config(config)
logger.info(
f"Loaded {'validation_' if dataset_is_validation else ''}dataset specified in {args.dataset_config.name}.",
)
c = Collater.for_dataset(dataset, exclude_keys=[])
# Determine the test set
# this makes no sense if a dataset is given seperately
if (
args.test_indexes is None
and train_idcs is not None
and dataset_is_from_training
):
# we know the train and val, get the rest
all_idcs = set(range(len(dataset)))
# set operations
if dataset_is_validation:
test_idcs = list(all_idcs - val_idcs)
logger.info(
f"Using origial validation dataset minus validation set frames, yielding a test set size of {len(test_idcs)} frames.",
)
else:
test_idcs = list(all_idcs - train_idcs - val_idcs)
assert set(test_idcs).isdisjoint(train_idcs)
logger.info(
f"Using origial training dataset minus training and validation frames, yielding a test set size of {len(test_idcs)} frames.",
)
# No matter what it should be disjoint from validation:
assert set(test_idcs).isdisjoint(val_idcs)
if not do_metrics:
logger.info(
"WARNING: using the automatic test set ^^^ but not computing metrics, is this really what you wanted to do?",
)
elif args.test_indexes is None:
# Default to all frames
test_idcs = torch.arange(dataset.len())
logger.info(
f"Using all frames from the specified test dataset, yielding a test set size of {len(test_idcs)} frames.",
)
else:
# load from file
test_idcs = load_file(
supported_formats=dict(
torch=["pt", "pth"], yaml=["yaml", "yml"], json=["json"]
),
filename=str(args.test_indexes),
)
logger.info(
f"Using provided test set indexes, yielding a test set size of {len(test_idcs)} frames.",
)
# Figure out what metrics we're actually computing
if do_metrics:
metrics_config = Config.from_file(str(args.metrics_config))
metrics_components = metrics_config.get("metrics_components", None)
# See trainer.py: init() and init_metrics()
# Default to loss functions if no metrics specified:
if metrics_components is None:
loss, _ = instantiate(
builder=Loss,
prefix="loss",
positional_args=dict(coeffs=metrics_config.loss_coeffs),
all_args=metrics_config,
)
metrics_components = []
for key, func in loss.funcs.items():
params = {
"PerSpecies": type(func).__name__.startswith("PerSpecies"),
}
metrics_components.append((key, "mae", params))
metrics_components.append((key, "rmse", params))
metrics, _ = instantiate(
builder=Metrics,
prefix="metrics",
positional_args=dict(components=metrics_components),
all_args=metrics_config,
)
metrics.to(device=device)
batch_i: int = 0
batch_size: int = args.batch_size
logger.info("Starting...")
context_stack = contextlib.ExitStack()
with contextlib.ExitStack() as context_stack:
# "None" checks if in a TTY and disables if not
prog = context_stack.enter_context(tqdm(total=len(test_idcs), disable=None))
if do_metrics:
display_bar = context_stack.enter_context(
tqdm(
bar_format=""
if prog.disable # prog.ncols doesn't exist if disabled
else ("{desc:." + str(prog.ncols) + "}"),
disable=None,
)
)
if args.output is not None:
output = context_stack.enter_context(open(args.output, "w"))
else:
output = None
while True:
datas = [
dataset[int(idex)]
for idex in test_idcs[batch_i * batch_size : (batch_i + 1) * batch_size]
]
if len(datas) == 0:
break
batch = c.collate(datas)
batch = batch.to(device)
out = model(AtomicData.to_AtomicDataDict(batch))
with torch.no_grad():
# Write output
# TODO: make sure don't keep appending to existing file
if output is not None:
ase.io.write(
output,
AtomicData.from_AtomicDataDict(out)
.to(device="cpu")
.to_ase(type_mapper=dataset.type_mapper),
format="extxyz",
append=True,
)
# Accumulate metrics
if do_metrics:
metrics(out, batch)
display_bar.set_description_str(
" | ".join(
f"{k} = {v:4.4f}"
for k, v in metrics.flatten_metrics(
metrics.current_result()
)[0].items()
)
)
batch_i += 1
prog.update(batch.num_graphs)
prog.close()
if do_metrics:
display_bar.close()
if do_metrics:
logger.info("\n--- Final result: ---")
logger.critical(
"\n".join(
f"{k:>20s} = {v:< 20f}"
for k, v in metrics.flatten_metrics(metrics.current_result())[0].items()
)
)
if __name__ == "__main__":
main(running_as_script=True)
|
python
|
#!/usr/bin/python3
import datetime
import inquirer
import requests
import re
import csv
import os
import json
repositories = [
"beagle",
"beagle-web-react",
"beagle-web-core",
"beagle-web-angular",
"charlescd",
"charlescd-docs",
"horusec",
"horusec-engine-docs",
"ritchie-cli",
"ritchie-formulas",
"ritchie-formulas-demo"
]
def run(token):
insights = []
authorization = f"token {token}"
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization" : authorization,
}
for repository in repositories:
repo_url = f"https://api.github.com/repos/ZupIT/{repository}"
print(f"🐙 Getting insights for ZupIT's \033[36m{repository}\033[0m repository.")
traffic = requests.get(
url = repo_url + "/traffic/views",
headers = headers,
).json()
clones = requests.get(
url = repo_url + "/traffic/clones",
headers = headers,
).json()
contributors = requests.get(
url = repo_url + "/contributors",
headers = headers,
).json()
repo_stats = requests.get(
url = repo_url,
headers = headers,
).json()
try:
clones = clones["count"]
except (IndexError, KeyError) :
clones = "-"
try:
forks = repo_stats["forks_count"]
except (IndexError, KeyError):
forks = "-"
try:
stars = repo_stats["stargazers_count"]
except (IndexError, KeyError):
stars = "-"
try:
watchers = repo_stats["subscribers_count"]
except (IndexError, KeyError):
watchers = "-"
try:
views = traffic["count"]
except (IndexError, KeyError):
views = "-"
try:
uniques = traffic["uniques"]
except (IndexError, KeyError):
uniques = "-"
insights.append(
{
"repo": repository,
"views": views,
"uniques": uniques,
"clones": clones,
"contributors": len(contributors),
"contributors_list": contributors,
"forks": forks,
"stars": stars,
"watchers": watchers,
}
)
create_csv_file(insights)
def get_repositories(url, headers):
result = []
r = requests.get(
url = url,
headers = headers
)
if "next" in r.links :
result += get_repositories(r.links["next"]["url"], headers)
for data in r.json():
result.append(data["name"])
return result
def create_csv_file(insights):
current_date = datetime.datetime.now()
current_date_format = current_date.strftime("%m-%d-%Y-%Hh%M")
current_date_format_string = str(current_date_format)
filename = "zup-insights-" + current_date_format_string + ".csv"
file = open(filename, 'w+', newline ='')
with file:
header = ["Repository", "Views (14d)", "Uniques (14d)", "Clones (14d)", "Contributors", "Forks", "Stars", "Watchers"]
writer = csv.DictWriter(file, fieldnames = header)
writer.writeheader()
file = open(filename, 'a+', newline ='')
with file:
for insight in insights:
data = [[insight["repo"], insight["views"], insight["uniques"], insight["clones"], insight["contributors"], insight["forks"], insight["stars"], insight["watchers"]]]
write = csv.writer(file)
write.writerows(data)
print(f"\n\033[1m✅ Successfully generated \033[4m{filename}\033[0m\033[1m file for ZupIT's repositories\033[0m")
|
python
|
# Generated by Django 3.2.4 on 2021-06-18 13:58
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0008_historicalproduct'),
]
operations = [
migrations.AlterField(
model_name='coupon',
name='percent',
field=models.IntegerField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)], verbose_name='Coupon Percentage'),
),
]
|
python
|
import sys, os, copy, time, random
# global_list is all possible guesses
# answer_list is all possible solutions
mode = input("Are you playing Unlimited? ")
if mode[0].lower() == "y":
answer_file = "all_words.txt"
all_file = "all_words.txt"
else:
answer_file = "answers.txt"
all_file = "all_words.txt"
with open(os.path.join(sys.path[0], all_file),"r") as file:
global_list = file.read().splitlines()
with open(os.path.join(sys.path[0], answer_file),"r") as file:
answer_list = file.read().splitlines()
# best_guesses_list is precalculated best guesses for first solution
with open(os.path.join(sys.path[0], "best_words2.txt"),"r") as file:
best_guesses_list = file.read().splitlines()
best_guesses = {}
for i in range(len(best_guesses_list)):
key = best_guesses_list[i][:5]
value = float(best_guesses_list[i][8:])
best_guesses[key] = value
best_guesses = {k: v for k, v in sorted(best_guesses.items(), key = lambda x: x[1], reverse=True)}
# takes a word and outputs its point value, aka, how good of a guess it is
# point value calculated by determining the total number of words it can rule out over every combination of grey, yellow, green,
# then adjusting it based on probability of ruling out such a case
# grey:0, yellow:1, green:2
def calculate_point_value(_word) -> int:
total = len(answer_list)
pts = 0
for first in range(3):
for second in range(3):
for third in range(3):
for fourth in range(3):
for fifth in range(3):
clue = str(first) + str(second) + str(third) + str(fourth) + str(fifth)
if is_valid(_word, clue):
temp = rule_outs(_word, clue)
pts += (temp/total)*(total-temp)/total
return pts
# returns the number of words ruled out given a word and a clue
def rule_outs(_word: str , _clue: str , alist = None) -> int:
if alist is None:
alist = copy.deepcopy(answer_list)
original_len = len(alist)
for i in range(5):
if _clue[i] == "0":
# soft parse for duplicates
if _word[i] in _word[i:]:
alist = [w for w in alist if _word[i] != w]
# hard parse
else:
alist = [w for w in alist if _word[i] not in w]
elif _clue[i] == "1":
alist = [w for w in alist if _word[i] in w]
alist = [w for w in alist if _word[i] != w[i]]
else:
alist = [w for w in alist if _word[i] == w[i]]
return (original_len-len(alist))
# confirms whether a clue is valid
# a clue is valid if all recurring letters are lit if at least one is lit
def is_valid(_word: str , _clue: str) -> bool:
yellows = [_word[i] for i in range(5) if _clue[i] == "1"]
for i in range(5):
if _word[i] in yellows and _clue[i] == "0":
return False
return True
# mutates the lists and computes the new best guesses
def compute_guess(_word: str , _clue: str):
global global_list, answer_list, best_guesses
# checks for right answer
if _clue == "22222":
print("Woohoo! We got it!")
done()
return
# checks for number in word
if len(_word) == 1 and _word.isnumeric():
_word = list(best_guesses.keys())[int(_word)-1]
# checks for invalid guesses
if len(_word) != 5 or len(_clue) != 5:
print("Invalid guess or clue, please re-enter.")
return
# first parse guesses and then solutions (simultaneously)
flg = False
for i in range(5):
if _clue[i] == "0":
for j in range(5):
if _clue[j] != "0" and _word[j] == _word[i]:
answer_list = [w for w in answer_list if _word[i] != w]
global_list = [w for w in global_list if _word[i] != w]
flg = True
break
if flg:
continue
answer_list = [w for w in answer_list if _word[i] not in w]
global_list = [w for w in global_list if _word[i] not in w]
elif _clue[i] == "1":
global_list = [w for w in global_list if _word[i] in w]
global_list = [w for w in global_list if _word[i] != w[i]]
answer_list = [w for w in answer_list if _word[i] in w]
answer_list = [w for w in answer_list if _word[i] != w[i]]
else:
global_list = [w for w in global_list if _word[i] == w[i]]
answer_list = [w for w in answer_list if _word[i] == w[i]]
toProcess = len(global_list)
print("Processing rating: "+str(toProcess*len(answer_list)))
if input("Enter to process, any key to overide") != "":
return
if len(answer_list) < 15:
global_list = answer_list
# recalculate best guesses
best_guesses = {}
for word in range(len(global_list)):
print(str(word)+"/"+str(toProcess))
best_guesses.update({global_list[word]:calculate_point_value(global_list[word])})
# sort best_guesses by values
best_guesses = {k: v for k, v in sorted(best_guesses.items(), key = lambda x: x[1], reverse=True)}
if len(best_guesses) > 1:
print("I think these word(s) would be a good next guess...")
print(list(best_guesses.keys())[:10])
elif len(best_guesses) == 1:
print("I've got the answer! It's "+list(best_guesses.keys())[0])
done()
return
else:
print("Uh oh, something went wrong...")
sys.exit()
def done():
global global_list, answer_list
more = input("Wanna go again? ")
if more[0].lower() == "y":
with open(os.path.join(sys.path[0], all_file),"r") as file:
global_list = file.read().splitlines()
with open(os.path.join(sys.path[0], answer_file),"r") as file:
answer_list = file.read().splitlines()
return
else:
print("Cya next time!")
sys.exit()
# progress = 0
# maxProgress = len(global_list)
# start = time.perf_counter()
# for word in global_list:
# stop = time.perf_counter()
# progress += 1
# with open(os.path.join(sys.path[0], "best_words.txt"),"a") as file:
# file.write(word+" : "+str(calculate_point_value(word))+"\n")
# print(str(progress)+"/"+str(maxProgress)+"\tETA: "+str(round((stop-start)*(maxProgress/progress)-(stop-start)))+" s")
# aesthetics~
print("\n"+"-"*50)
print(" "*5+"Hi~ I'm RIN, I'm very good at wordle...")
print(" "*9+"But I can't play unfortunately-")
print(" "*5+"At least not without someone to help me")
print(" "*5+"That's where you come in, I think we'll")
print(" "*3+"make a great team! You play, I'll give tips!")
print("-"*50+"\n")
print("Maybe start out with ",end="")
print(list(best_guesses.keys())[:5])
while True:
guess = input("Enter guess: ")
clue = input("Enter clue: ")
print("One sec...")
compute_guess(guess,clue)
|
python
|
from django.urls import path
from .views import TodoListView, TodoDetailView, TodoCreateView
app_name = 'todos'
urlpatterns = [
path('', TodoListView.as_view(), name='todo_list'),
path('<int:pk>/', TodoDetailView.as_view(), name='todo_detail'),
path('novo/', TodoCreateView.as_view(), name='todo_new'),
]
|
python
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the gradient of `tf.sparse_tensor_dense_matmul()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class SparseTensorDenseMatMulGradientTest(tf.test.TestCase):
def _sparsify(self, x):
x[x < 0.5] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(np.int64).T
x_values = x[non_zero]
x_shape = x.shape
return tf.SparseTensor(
indices=x_indices, values=x_values, shape=x_shape), len(x_values)
def _randomTensor(self, size, np_dtype, adjoint=False, sparse=False):
n, m = size
x = np.random.randn(n, m).astype(np_dtype)
if adjoint:
x = x.transpose()
if sparse:
return self._sparsify(x)
else:
return tf.constant(x, dtype=np_dtype)
def _testGradients(self, adjoint_a, adjoint_b, name, np_dtype, use_gpu=False):
n, k, m = np.random.randint(1, 10, size=3)
sp_t, nnz = self._randomTensor(
[n, k], np_dtype, adjoint=adjoint_a, sparse=True)
dense_t = self._randomTensor([k, m], np_dtype, adjoint=adjoint_b)
matmul = tf.sparse_tensor_dense_matmul(
sp_t, dense_t, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name=name)
with self.test_session(use_gpu=use_gpu):
dense_t_shape = [m, k] if adjoint_b else [k, m]
sp_t_val_shape = [nnz]
err = tf.test.compute_gradient_error([dense_t, sp_t.values],
[dense_t_shape, sp_t_val_shape],
matmul, [n, m])
print("%s gradient err = %s" % (name, err))
self.assertLess(err, 1e-3)
def _testGradientsType(self, np_dtype, use_gpu=False):
for adjoint_a in [True, False]:
for adjoint_b in [True, False]:
name = "sparse_tensor_dense_matmul_%s_%s_%s" % (adjoint_a, adjoint_b,
np_dtype.__name__)
self._testGradients(adjoint_a, adjoint_b, name, np_dtype, use_gpu)
def testGradients(self):
np.random.seed(5) # Fix seed to avoid flakiness
for use_gpu in [True, False]:
self._testGradientsType(np.float32, use_gpu)
self._testGradientsType(np.float64, use_gpu)
if __name__ == "__main__":
tf.test.main()
|
python
|
"""
Copyright 2016 Udey Rishi
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import scrapy
class BoPipelineItem(scrapy.Item):
html_response = scrapy.Field()
entities_nlp_result = scrapy.Field()
keywords_nlp_result = scrapy.Field()
concepts_nlp_result = scrapy.Field()
tags = scrapy.Field()
sentiment_nlp_result = scrapy.Field()
category_nlp_result = scrapy.Field()
metadata = scrapy.Field()
parent_url = scrapy.Field()
def get_url(self):
return self['html_response'].url
class BoPackagedItem(scrapy.Item):
url = scrapy.Field()
language = scrapy.Field()
category = scrapy.Field()
doc_sentiment = scrapy.Field()
tags = scrapy.Field()
metadata = scrapy.Field()
parent_url = scrapy.Field()
time_updated = scrapy.Field()
|
python
|
# Create a mesh, compute the normals and set them active, and
# plot the active vectors.
#
import pyvista
mesh = pyvista.Cube()
mesh_w_normals = mesh.compute_normals()
mesh_w_normals.active_vectors_name = 'Normals'
arrows = mesh_w_normals.arrows
arrows.plot(show_scalar_bar=False)
|
python
|
from __future__ import absolute_import
# Add search patterns and config options for the things that are used in MultiQC_bcbio
def multiqc_bcbio_config():
from multiqc import config
""" Set up MultiQC config defaults for this package """
bcbio_search_patterns = {
'bcbio/metrics': {'fn': '*_bcbio.txt'},
'bcbio/coverage_dist': {'fn': '*-coverage.mosdepth.dist.txt'},
'bcbio/coverage_avg': {'fn': '*_bcbio_coverage_avg.txt'}, # deprecated in 1.0.6, replaced with 'bcbio/coverage_dist'
'bcbio/variants': {'fn': '*_bcbio_variants.txt'},
'bcbio/target': {'fn': 'target_info.yaml'},
'bcbio/qsignature': {'fn': '*qsignature.ma'},
'bcbio/vcfstats': {'fn': '*_bcbio_variants_stats.txt'},
'bcbio/seqbuster': {'contents': 'seqbuster'},
'bcbio/umi': {'fn': '*_umi_stats.yaml'},
'bcbio/viral_old': {'fn': '*viral*-counts.txt'},
'bcbio/viral': {'fn': '*viral*-completeness.txt'},
'bcbio/damage': {'fn': '*damage.yaml'},
}
config.update_dict(config.sp, bcbio_search_patterns)
config.fn_clean_exts.extend([
{'type': 'regex', 'pattern': '_bcbio.*'},
])
config.fn_clean_trim.extend([
'-coverage.mosdepth.dist',
])
config.update_dict(config.table_columns_visible, {
'FastQC': {
'percent_duplicates': False,
'total_sequences': False,
},
'QualiMap': {
'percentage_aligned': False,
'median_coverage': False,
},
'Samtools Stats': {
'non-primary_alignments': False,
'reads_mapped': False,
'reads_mapped_percent': False,
'raw_total_sequences': False,
'error_rate': False,
},
'SnpEff': {
'Change_rate': False,
'Ts_Tv_ratio': False,
'Number_of_variants_before_filter': False,
},
})
|
python
|
# Copyright (C) 2020 NumS Development Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from nums.core.array import utils as array_utils
from nums.core.compute.compute_manager import ComputeManager
from nums.core.grid.grid import ArrayGrid
class Block:
# pylint: disable=redefined-builtin, global-statement
# TODO(hme): Create a base class, and move this concrete class into blockarray.py.
# Do this when we implement a SparseBlock object.
block_id_counter = -1
def __init__(
self,
grid_entry,
grid_shape,
rect,
shape,
dtype,
transposed,
cm: ComputeManager,
id=None,
):
self._cm = cm
self.grid_entry: tuple = grid_entry
self.grid_shape: tuple = grid_shape
self.rect: list = rect
self.oid: np.object = None
self.shape: tuple = shape
self.dtype = dtype
self.num_dims = len(self.rect)
self.transposed = transposed
self.id = id
if self.id is None:
Block.block_id_counter += 1
self.id = Block.block_id_counter
# Set if a device id was used to compute this block.
self.device_id = None
def __repr__(self):
return "Block(" + str(self.oid) + ")"
def size(self):
return np.product(self.shape)
def copy(self, shallow=True):
assert shallow, "Only shallow copies are currently supported."
block = Block(
self.grid_entry,
self.grid_shape,
self.rect,
self.shape,
self.dtype,
self.transposed,
self._cm,
)
block.oid = self.oid
return block
def true_grid_entry(self):
if self.transposed:
return tuple(reversed(self.grid_entry))
return self.grid_entry
def true_grid_shape(self):
if self.transposed:
return tuple(reversed(self.grid_shape))
return self.grid_shape
def transpose(self, defer=False, redistribute=False):
# If defer is True, this operation does not modify the remote object.
# If defer is True and redistribute is False,
# this operation does not move the remote object.
grid_entryT = tuple(reversed(self.grid_entry))
grid_shapeT = tuple(reversed(self.grid_shape))
rectT = list(reversed(self.rect))
blockT = Block(
grid_entry=grid_entryT,
grid_shape=grid_shapeT,
rect=rectT,
shape=tuple(reversed(self.shape)),
dtype=self.dtype,
transposed=not self.transposed,
cm=self._cm,
)
blockT.oid = self.oid
if not defer:
blockT.transposed = False
if redistribute:
syskwargs = {"grid_entry": grid_entryT, "grid_shape": grid_shapeT}
else:
syskwargs = {
"grid_entry": self.grid_entry,
"grid_shape": self.grid_shape,
}
blockT.oid = self._cm.transpose(self.oid, syskwargs=syskwargs)
return blockT
def swapaxes(self, axis1, axis2):
block = self.copy()
grid_entry = list(block.grid_entry)
grid_shape = list(block.grid_shape)
shape = list(block.shape)
rect = block.rect
grid_entry[axis1], grid_entry[axis2] = grid_entry[axis2], grid_entry[axis1]
grid_shape[axis1], grid_shape[axis2] = grid_shape[axis2], grid_shape[axis1]
shape[axis1], shape[axis2] = shape[axis2], shape[axis1]
rect[axis1], rect[axis2] = rect[axis2], rect[axis1]
block.grid_entry = tuple(grid_entry)
block.grid_shape = tuple(grid_shape)
block.shape = tuple(shape)
block.rect = rect
block.oid = self._cm.swapaxes(
block.oid,
axis1,
axis2,
syskwargs={"grid_entry": block.grid_entry, "grid_shape": block.grid_shape},
)
return block
def ufunc(self, op_name, device_id=None):
return self.uop_map(op_name, device_id=device_id)
def uop_map(self, op_name, args=None, kwargs=None, device_id=None):
# This retains transpose.
block = self.copy()
block.dtype = array_utils.get_uop_output_type(op_name, self.dtype)
args = () if args is None else args
kwargs = {} if kwargs is None else kwargs
if device_id is None:
syskwargs = {"grid_entry": block.grid_entry, "grid_shape": block.grid_shape}
else:
syskwargs = {"device_id": device_id}
block.device_id = device_id
block.oid = self._cm.map_uop(
op_name, self.oid, args, kwargs, syskwargs=syskwargs
)
return block
def _block_from_other(self, other):
# Assume other is numeric.
# This only occurs during some numpy operations (e.g. np.mean),
# where a literal is used in the operation.
assert isinstance(other, (int, float, np.int, np.float))
block = Block(
self.grid_entry,
self.grid_shape,
[(0, 1)],
(1,),
self.dtype,
False,
self._cm,
)
# We pass syskwargs here for correct node placement for `other`,
# which should be local to self.
block.oid = self._cm.put(
np.array(other, dtype=self.dtype),
syskwargs={
"grid_entry": self.grid_entry,
"grid_shape": self.grid_shape,
},
)
return block
def bop(self, op, other, args: dict, device_id=None):
if not isinstance(other, Block):
other = self._block_from_other(other)
if op == "tensordot":
axes = args["axes"]
result_grid_entry = tuple(
list(self.grid_entry[:-axes]) + list(other.grid_entry[axes:])
)
result_grid_shape = tuple(
list(self.grid_shape[:-axes]) + list(other.grid_shape[axes:])
)
result_rect = list(self.rect[:-axes] + other.rect[axes:])
result_shape = tuple(list(self.shape[:-axes]) + list(other.shape[axes:]))
else:
# Broadcasting starts from trailing dimensions.
# Resulting shape is max of trailing shapes
result_grid_entry = []
result_grid_shape = []
result_rect = []
result_shape = []
for i in range(1, max(self.num_dims, other.num_dims) + 1):
other_i = other.num_dims - i
self_i = self.num_dims - i
if other_i < 0:
is_self = True
elif self_i < 0:
is_self = False
else:
is_self = other.shape[other_i] < self.shape[self_i]
if is_self:
result_grid_entry.append(self.grid_entry[self_i])
result_grid_shape.append(self.grid_shape[self_i])
result_rect.append(self.rect[self_i])
result_shape.append(self.shape[self_i])
else:
result_grid_entry.append(other.grid_entry[other_i])
result_grid_shape.append(other.grid_shape[other_i])
result_rect.append(other.rect[other_i])
result_shape.append(other.shape[other_i])
result_grid_entry = tuple(reversed(result_grid_entry))
result_grid_shape = tuple(reversed(result_grid_shape))
result_rect = list(reversed(result_rect))
result_shape = tuple(reversed(result_shape))
dtype = array_utils.get_bop_output_type(op, self.dtype, other.dtype)
block = Block(
grid_entry=result_grid_entry,
grid_shape=result_grid_shape,
rect=result_rect,
shape=result_shape,
dtype=dtype,
transposed=False,
cm=self._cm,
)
if device_id is None:
syskwargs = {"grid_entry": block.grid_entry, "grid_shape": block.grid_shape}
else:
syskwargs = {"device_id": device_id}
block.device_id = device_id
block.oid = self._cm.bop(
op,
self.oid,
other.oid,
self.transposed,
other.transposed,
axes=args.get("axes"),
syskwargs=syskwargs,
)
return block
def tensordot(self, other, axes):
return self.bop("tensordot", other, args={"axes": axes})
def __add__(self, other):
return self.bop("add", other, args={})
def __sub__(self, other):
return self.bop("sub", other, args={})
def __mul__(self, other):
return self.bop("mul", other, args={})
def __matmul__(self, other):
return self.tensordot(other, axes=1)
def __truediv__(self, other):
return self.bop("truediv", other, args={})
def __pow__(self, other):
return self.bop("pow", other, args={})
def __ge__(self, other):
return self.bop("ge", other, args={})
def __gt__(self, other):
return self.bop("gt", other, args={})
def __le__(self, other):
return self.bop("le", other, args={})
def __lt__(self, other):
return self.bop("lt", other, args={})
def __eq__(self, other):
return self.bop("eq", other, args={})
def __ne__(self, other):
return self.bop("ne", other, args={})
__iadd__ = __add__
__isub__ = __sub__
__imul__ = __mul__
__imatmul__ = __matmul__
__itruediv__ = __truediv__
__ipow__ = __pow__
def astype(self, dtype):
block = self.copy()
block.dtype = dtype
block.oid = self._cm.astype(
self.oid,
dtype.__name__,
syskwargs={"grid_entry": block.grid_entry, "grid_shape": block.grid_shape},
)
return block
def conjugate(self):
return self.ufunc("conjugate")
def sqrt(self):
return self.ufunc("sqrt")
def get(self):
return self._cm.get(self.oid)
class BlockArrayBase:
def __init__(self, grid: ArrayGrid, cm: ComputeManager, blocks: np.ndarray = None):
self.grid = grid
self.cm = cm
self.shape = self.grid.shape
self.block_shape = self.grid.block_shape
self.grid_shape = self.grid.grid_shape
self.size = np.product(self.shape)
self.ndim = len(self.shape)
self.dtype = self.grid.dtype
try:
self.nbytes = self.grid.nbytes()
except ValueError as _:
self.nbytes = None
self.blocks = blocks
if self.blocks is None:
# TODO (hme): Subclass np.ndarray for self.blocks instances,
# and override key methods to better integrate with NumPy's ufuncs.
self.blocks = np.empty(shape=self.grid.grid_shape, dtype=Block)
for grid_entry in self.grid.get_entry_iterator():
self.blocks[grid_entry] = Block(
grid_entry=grid_entry,
grid_shape=self.grid.grid_shape,
rect=self.grid.get_slice_tuples(grid_entry),
shape=self.grid.get_block_shape(grid_entry),
dtype=self.dtype,
transposed=False,
cm=self.cm,
)
def __repr__(self):
return "BlockArray(" + str(self.blocks) + ")"
def get(self) -> np.ndarray:
result: np.ndarray = np.zeros(shape=self.grid.shape, dtype=self.grid.dtype)
block_shape: np.ndarray = np.array(self.grid.block_shape, dtype=np.int)
arrays: list = self.cm.get(
[
self.blocks[grid_entry].oid
for grid_entry in self.grid.get_entry_iterator()
]
)
for block_index, grid_entry in enumerate(self.grid.get_entry_iterator()):
start = block_shape * grid_entry
entry_shape = np.array(self.grid.get_block_shape(grid_entry), dtype=np.int)
end = start + entry_shape
slices = tuple(map(lambda item: slice(*item), zip(*(start, end))))
block: Block = self.blocks[grid_entry]
arr: np.ndarray = arrays[block_index]
if block.transposed:
arr = arr.T
result[slices] = arr.reshape(block.shape)
return result
def broadcast_to(self, shape):
b = array_utils.broadcast(self.shape, shape)
result_block_shape = array_utils.broadcast_block_shape(
self.shape, shape, self.block_shape
)
result: BlockArrayBase = BlockArrayBase(
ArrayGrid(b.shape, result_block_shape, self.grid.dtype.__name__), self.cm
)
extras = []
# Below taken directly from _broadcast_to in numpy's stride_tricks.py.
it = np.nditer(
(self.blocks,),
flags=["multi_index", "refs_ok", "zerosize_ok"] + extras,
op_flags=["readonly"],
itershape=result.grid.grid_shape,
order="C",
)
with it:
# never really has writebackifcopy semantics
broadcast = it.itviews[0]
result.blocks = broadcast
return result
|
python
|
from os import path
from json_database import JsonConfigXDG, JsonStorageXDG
from ovos_utils.log import LOG
from ovos_utils.messagebus import Message
from ovos_utils.json_helper import merge_dict
from ovos_skills_manager import SkillEntry
from ovos_skills_manager.appstores import AbstractAppstore
from ovos_skills_manager.appstores.andlo import AndloSkillList
from ovos_skills_manager.appstores.mycroft_marketplace import \
MycroftMarketplace
from ovos_skills_manager.appstores.pling import Pling
from ovos_skills_manager.appstores.ovos import OVOSstore
from ovos_skills_manager.appstores.neon import NeonSkills
from ovos_skills_manager.config import get_config_object, safe_get_skills_folder
from ovos_skills_manager.exceptions import UnknownAppstore
from ovos_skills_manager.appstores.local import InstalledSkills
from ovos_skills_manager.github import author_repo_from_github_url
class OVOSSkillsManager:
def __init__(self, bus=None):
self.config = get_config_object()
self._boostrap_tracker = {}
self._threads = []
self.bus = None
def bind(self, bus):
# mycroft messagebus events
self.bus = bus
def emit(self, event_name, event_data=None):
event_data = event_data or {}
if self.bus:
self.bus.emit(Message(event_name, event_data))
def get_active_appstores(self, bootstrap:bool=False):
stores = {}
for appstore_id in self.config["appstores"]:
if self.config["appstores"][appstore_id]["active"]:
if bootstrap and appstore_id not in self._boostrap_tracker:
self._boostrap_tracker[appstore_id] = True
elif bootstrap and appstore_id in self._boostrap_tracker:
bootstrap = False
stores[appstore_id] = self.get_appstore(appstore_id,
bootstrap=bootstrap)
return stores
def get_appstore(self, appstore_id: str, bootstrap:bool=True):
if self.config["appstores"][appstore_id]["active"]:
parse_github = self.config["appstores"][appstore_id]["parse_github"]
store = self.name_to_appstore(appstore_id)
if bootstrap and appstore_id not in self._boostrap_tracker:
self._boostrap_tracker[appstore_id] = True
elif bootstrap and appstore_id in self._boostrap_tracker:
bootstrap = False
return store(parse_github=parse_github, bootstrap=bootstrap)
return None
@staticmethod
def name_to_appstore(name: str) -> AbstractAppstore:
if name in ["pling", "bigscreen"]:
return Pling
elif name in ["mycroft", "mycroft_marketplace"]:
return MycroftMarketplace
elif name in ["andlo", "andlo_skill_list"]:
return AndloSkillList
elif name in ["ovos", "ovos_appstore", "ovos_marketplace"]:
return OVOSstore
elif name in ["neon", "neon_gecko", "neon_skills"]:
return NeonSkills
elif name in ["local", "local_skills", "installed",
"installed_skills"]:
return InstalledSkills
else:
raise UnknownAppstore
def clear_cache(self, appstore_id:str=None):
if appstore_id:
self.get_appstore(appstore_id).clear_cache()
else:
for appstore in self.appstores:
appstore.clear_cache()
def validate_appstore_name(self, appstore: str):
if appstore in ["pling", "bigscreen"]:
appstore = "pling"
elif appstore in ["mycroft", "mycroft_marketplace"]:
appstore = "mycroft_marketplace"
elif appstore in ["andlo", "andlo_skill_list"]:
appstore = "andlo_skill_list"
elif appstore in ["ovos", "ovos_appstore", "ovos_marketplace"]:
appstore = "ovos"
elif appstore in ["neon", "neon_gecko", "neon_skills"]:
appstore = "neon"
elif appstore in ["local", "local_skills", "installed",
"installed_skills"]:
appstore = "local"
elif appstore not in self.config["appstores"]:
raise UnknownAppstore
return appstore
def enable_appstore(self, appstore_id: str):
appstore_id = self.validate_appstore_name(appstore_id)
self.config["appstores"][appstore_id]["active"] = True
self.emit("osm.store.enabled", {"store": appstore_id})
def set_appstore_priority(self, appstore_id: str, priority: int):
appstore_id = self.validate_appstore_name(appstore_id)
self.config["appstores"][appstore_id]["priority"] = priority
self.emit("osm.store.priority.change", {"store": appstore_id,
"priority": priority})
def set_appstore_auth_token(self, appstore_id: str, token: str):
appstore_id = self.validate_appstore_name(appstore_id)
self.config["appstores"][appstore_id]["auth_token"] = token
self.emit("osm.store.token.change", {"store": appstore_id})
def disable_appstore(self, appstore_id: str):
appstore_id = self.validate_appstore_name(appstore_id)
self.config["appstores"][appstore_id]["active"] = False
self.emit("osm.store.disabled", {"store": appstore_id})
def sync_appstores(self, merge:bool=False, new_only:bool=False, threaded:bool=False):
stores = self.get_active_appstores()
self.emit("osm.sync.start")
for appstore_id in stores:
LOG.info("Syncing skills from " + appstore_id)
self.emit("osm.store.sync.start", {"store": appstore_id})
try:
store = stores[appstore_id]
store.authenticate()
store.sync_skills_list(merge, new_only)
store.clear_authentication()
except Exception as e:
self.emit("osm.store.sync.error",
{"store": appstore_id, "error": str(e)})
self.emit("osm.store.sync.finish", {"store": appstore_id})
self.emit("osm.sync.finish")
@property
def total_skills(self):
return sum([s.total_skills() for s in self.appstores])
@property
def appstores(self):
stores = []
for appstore_id in self.config["appstores"]:
store = self.get_appstore(appstore_id)
if not store:
continue
priority = self.config["appstores"][appstore_id]["priority"]
stores.append((store, priority))
return [s[0] for s in sorted(stores, key=lambda k: k[1])]
def search_skills(self, name: str, as_json:bool=False, fuzzy:bool=True, thresh:float=0.85,
ignore_case:bool=True):
self.emit("osm.search.start",
{"query": name, "thresh": thresh, "fuzzy": fuzzy,
"ignore_case": ignore_case, "search_type": "generic"})
for store in self.appstores:
store.authenticate()
for skill in store.search_skills(name, as_json, fuzzy, thresh,
ignore_case):
self.emit("osm.search.store.result",
{"query": name, "thresh": thresh, "fuzzy": fuzzy,
"ignore_case": ignore_case,
"search_type": "generic", "skill": skill.json,
"store": store.appstore_id})
yield skill
store.clear_authentication()
self.emit("osm.search.finish",
{"query": name, "thresh": thresh, "fuzzy": fuzzy,
"ignore_case": ignore_case, "search_type": "generic"})
def search_skills_by_id(self, skill_id: str, as_json:bool=False, fuzzy:bool=False,
thresh:float=0.85, ignore_case:bool=True):
""" skill_id is repo.author , case insensitive,
searchs by name and filters results by author """
self.emit("osm.search.start",
{"query": skill_id, "thresh": thresh, "fuzzy": fuzzy,
"ignore_case": ignore_case, "search_type": "id"})
for store in self.appstores:
store.authenticate()
for skill in store.search_skills_by_id(skill_id, as_json,
fuzzy=fuzzy,
ignore_case=ignore_case,
thresh=thresh):
self.emit("osm.search.store.result",
{"query": skill_id, "thresh": thresh, "fuzzy": fuzzy,
"ignore_case": ignore_case,
"search_type": "id", "skill": skill.json,
"store": store.appstore_id})
yield skill
store.clear_authentication()
self.emit("osm.search.finish",
{"query": skill_id, "thresh": thresh, "fuzzy": fuzzy,
"ignore_case": ignore_case, "search_type": "id"})
def search_skills_by_name(self, name:str, as_json:bool=False,
fuzzy:bool=True, thresh:float=0.85, ignore_case:bool=True):
self.emit("osm.search.start",
{"query": name, "thresh": thresh, "fuzzy": fuzzy,
"ignore_case": ignore_case, "search_type": "name"})
for store in self.appstores:
store.authenticate()
for skill in store.search_skills_by_name(name, as_json, fuzzy,
thresh, ignore_case):
self.emit("osm.search.store.result",
{"query": name, "thresh": thresh, "fuzzy": fuzzy,
"ignore_case": ignore_case,
"search_type": "name", "skill": skill.json,
"store": store.appstore_id})
yield skill
store.clear_authentication()
self.emit("osm.search.finish",
{"query": name, "thresh": thresh, "fuzzy": fuzzy,
"ignore_case": ignore_case, "search_type": "name"})
def search_skills_by_url(self, url:str, as_json:bool=False):
self.emit("osm.search.start",
{"query": url, "search_type": "url"})
for store in self.appstores:
store.authenticate()
for skill in store.search_skills_by_url(url, as_json):
store.clear_authentication()
self.emit("osm.search.finish",
{"query": url, "search_type": "url",
"skill": skill.json})
yield skill
self.emit("osm.search.finish",
{"query": url, "search_type": "url"})
def search_skills_by_category(self, category:str, as_json:bool=False,
fuzzy:bool=True, thresh:float=0.85, ignore_case:bool=True):
self.emit("osm.search.start",
{"query": category, "thresh": thresh, "fuzzy": fuzzy,
"ignore_case": ignore_case, "search_type": "category"})
for store in self.appstores:
store.authenticate()
for skill in store.search_skills_by_category(category, as_json,
fuzzy, thresh,
ignore_case):
self.emit("osm.search.store.result",
{"query": category, "thresh": thresh, "fuzzy": fuzzy,
"ignore_case": ignore_case,
"search_type": "category", "skill": skill.json,
"store": store.appstore_id})
yield skill
store.clear_authentication()
self.emit("osm.search.finish",
{"query": category, "thresh": thresh, "fuzzy": fuzzy,
"ignore_case": ignore_case, "search_type": "category"})
def search_skills_by_author(self, authorname:str, as_json:bool=False,
fuzzy:bool=True, thresh:float=0.85, ignore_case:bool=True):
self.emit("osm.search.start",
{"query": authorname, "thresh": thresh, "fuzzy": fuzzy,
"ignore_case": ignore_case, "search_type": "author"})
for store in self.appstores:
store.authenticate()
for skill in store.search_skills_by_author(authorname, as_json,
fuzzy, thresh,
ignore_case):
self.emit("osm.search.store.result",
{"query": authorname, "thresh": thresh, "fuzzy": fuzzy,
"ignore_case": ignore_case,
"search_type": "author", "skill": skill.json,
"store": store.appstore_id})
yield skill
store.clear_authentication()
self.emit("osm.search.finish",
{"query": authorname, "thresh": thresh, "fuzzy": fuzzy,
"ignore_case": ignore_case, "search_type": "author"})
def search_skills_by_tag(self, tag:str, as_json:bool=False,
fuzzy:bool=True, thresh:float=0.85, ignore_case:bool=True):
self.emit("osm.search.start",
{"query": tag, "thresh": thresh, "fuzzy": fuzzy,
"ignore_case": ignore_case, "search_type": "tag"})
for store in self.appstores:
store.authenticate()
for skill in store.search_skills_by_tag(tag, as_json, fuzzy,
thresh, ignore_case):
self.emit("osm.search.store.result",
{"query": tag, "thresh": thresh, "fuzzy": fuzzy,
"ignore_case": ignore_case,
"search_type": "tag", "skill": skill.json,
"store": store.appstore_id})
yield skill
store.clear_authentication()
self.emit("osm.search.finish",
{"query": tag, "thresh": thresh, "fuzzy": fuzzy,
"ignore_case": ignore_case, "search_type": "tag"})
def search_skills_by_description(self, value: str, as_json:bool=False,
fuzzy:bool=True, thresh:float=0.85,
ignore_case:bool=True):
self.emit("osm.search.start",
{"query": value, "thresh": thresh, "fuzzy": fuzzy,
"ignore_case": ignore_case, "search_type": "description"})
for store in self.appstores:
store.authenticate()
for skill in store.search_skills_by_description(value, as_json,
fuzzy, thresh,
ignore_case):
self.emit("osm.search.store.result",
{"query": value, "thresh": thresh, "fuzzy": fuzzy,
"ignore_case": ignore_case,
"search_type": "description", "skill": skill.json,
"store": store.appstore_id})
yield skill
store.clear_authentication()
self.emit("osm.search.finish",
{"query": value, "thresh": thresh, "fuzzy": fuzzy,
"ignore_case": ignore_case, "search_type": "description"})
@staticmethod
def skill_entry_from_url(url: str):
"""
Builds a minimal SkillEntry object from the passed GitHub URL to use for skill installation
:param url: URL of skill to install
:return: SkillEntry object with url, branch, requirements, and authorname populated
"""
from ovos_skills_manager.exceptions import GithubInvalidBranch, GithubFileNotFound
from ovos_skills_manager.github import get_branch_from_github_url, normalize_github_url, get_requirements_json,\
get_skill_json
from ovos_skills_manager.skill_entry import SkillEntry
try:
branch = get_branch_from_github_url(url)
except GithubInvalidBranch:
branch = None
url = normalize_github_url(url)
requirements = get_requirements_json(url, branch)
requirements["system"] = {k: v.split() for k, v in requirements.get("system", {}).items()}
try:
json = get_skill_json(url, branch)
requirements = merge_dict(requirements, json.get("requirements", {}),
merge_lists=True, skip_empty=True, no_dupes=True)
except GithubFileNotFound:
json = {"authorname": author_repo_from_github_url(url)[0]}
return SkillEntry.from_json({"url": url,
"branch": branch,
"requirements": requirements,
"authorname": json.get("authorname")}, False)
def install_skill_from_url(self, url: str, skill_dir:str=None):
"""
Installs a Skill from the passed url
:param url: Git url of skill to install (including optional branch spec)
:param skill_dir: Skills directory to install to (skill unpacked to {folder}/{skill.uuid})
"""
self.install_skill(self.skill_entry_from_url(url), skill_dir)
def install_skill(self, skill: SkillEntry, folder=None):
"""
Installs a SkillEntry with any required auth_token
:param skill: Skill to install
:param folder: Skills directory to install to (skill unpacked to {folder}/{skill.uuid})
"""
self.emit("osm.install.start",
{"folder": folder, "skill": skill.json})
store = None
try:
self.validate_appstore_name(skill.appstore)
store = self.get_appstore(skill.appstore)
store.authenticate(bootstrap=False)
except Exception as e:
self.emit("osm.install.error",
{"folder": folder, "skill": skill.json, "error": str(e)})
try:
skill.install(folder)
except Exception as e:
self.emit("osm.install.error",
{"folder": folder, "skill": skill.json, "error": str(e)})
if store:
store.clear_authentication()
self.emit("osm.install.finish",
{"folder": folder, "skill": skill.json})
def __iter__(self):
for store in self.appstores:
for skill in store:
yield skill
|
python
|
from torch.utils.data import Dataset
import pandas as pd
from ast import literal_eval
from os import path
import numpy as np
from newsrec.config import model_name
import importlib
import torch
try:
config = getattr(importlib.import_module('newsrec.config'), f"{model_name}Config")
except AttributeError:
print(f"{model_name} not included!")
exit()
class BaseDataset(Dataset):
def __init__(self, behaviors_path, news_path, roberta_embedding_dir):
super(BaseDataset, self).__init__()
assert all(attribute in [
'category', 'subcategory', 'title', 'abstract', 'title_entities',
'abstract_entities', 'title_roberta', 'title_mask_roberta',
'abstract_roberta', 'abstract_mask_roberta'
] for attribute in config.dataset_attributes['news'])
assert all(attribute in ['user', 'clicked_news_length']
for attribute in config.dataset_attributes['record'])
self.behaviors_parsed = pd.read_table(behaviors_path)
self.news_parsed = pd.read_table(
news_path,
index_col='id',
usecols=['id'] + config.dataset_attributes['news'],
converters={
attribute: literal_eval
for attribute in set(config.dataset_attributes['news']) & set([
'title', 'abstract', 'title_entities', 'abstract_entities',
'title_roberta', 'title_mask_roberta', 'abstract_roberta',
'abstract_mask_roberta'
])
})
self.news_id2int = {x: i for i, x in enumerate(self.news_parsed.index)}
self.news2dict = self.news_parsed.to_dict('index')
for key1 in self.news2dict.keys():
for key2 in self.news2dict[key1].keys():
self.news2dict[key1][key2] = torch.tensor(
self.news2dict[key1][key2])
padding_all = {
'category': 0,
'subcategory': 0,
'title': [0] * config.num_words_title,
'abstract': [0] * config.num_words_abstract,
'title_entities': [0] * config.num_words_title,
'abstract_entities': [0] * config.num_words_abstract,
'title_roberta': [0] * config.num_words_title,
'title_mask_roberta': [0] * config.num_words_title,
'abstract_roberta': [0] * config.num_words_abstract,
'abstract_mask_roberta': [0] * config.num_words_abstract
}
for key in padding_all.keys():
padding_all[key] = torch.tensor(padding_all[key])
self.padding = {
k: v
for k, v in padding_all.items()
if k in config.dataset_attributes['news']
}
if model_name == 'Exp2' and not config.fine_tune:
if config.roberta_level == 'word':
self.roberta_embedding = {
k: torch.from_numpy(
np.load(
path.join(roberta_embedding_dir,
f'{k}_last_hidden_state.npy'))).float()
for k in set(config.dataset_attributes['news'])
& set(['title', 'abstract'])
}
name2length = {
'title': config.num_words_title,
'abstract': config.num_words_abstract
}
for k in set(config.dataset_attributes['news']) & set(
['title', 'abstract']):
self.padding[k] = torch.zeros((name2length[k], 768))
elif config.roberta_level == 'sentence':
self.roberta_embedding = {
k: torch.from_numpy(
np.load(
path.join(roberta_embedding_dir,
f'{k}_pooler_output.npy'))).float()
for k in set(config.dataset_attributes['news'])
& set(['title', 'abstract'])
}
for k in set(config.dataset_attributes['news']) & set(
['title', 'abstract']):
self.padding[k] = torch.zeros(768)
def _news2dict(self, id):
ret = self.news2dict[id]
if model_name == 'Exp2' and not config.fine_tune:
for k in set(config.dataset_attributes['news']) & set(
['title', 'abstract']):
ret[k] = self.roberta_embedding[k][self.news_id2int[id]]
return ret
def __len__(self):
return len(self.behaviors_parsed)
def __getitem__(self, idx):
item = {}
row = self.behaviors_parsed.iloc[idx]
if 'user' in config.dataset_attributes['record']:
item['user'] = row.user
item["clicked"] = list(map(int, row.clicked.split()))
item["candidate_news"] = [
self._news2dict(x) for x in row.candidate_news.split()
]
item["clicked_news"] = [
self._news2dict(x)
for x in row.clicked_news.split()[:config.num_clicked_news_a_user]
]
if 'clicked_news_length' in config.dataset_attributes['record']:
item['clicked_news_length'] = len(item["clicked_news"])
repeated_times = config.num_clicked_news_a_user - \
len(item["clicked_news"])
assert repeated_times >= 0
item["clicked_news"] = [self.padding
] * repeated_times + item["clicked_news"]
return item
|
python
|
# -*- coding: utf-8 -*-
import json
import functools
from TM1py.Objects import Chore, ChoreTask
from TM1py.Services.ObjectService import ObjectService
def deactivate_activate(func):
""" Higher Order function to handle activation and deactivation of chores before updating them
:param func:
:return:
"""
@functools.wraps(func)
def wrapper(self, chore):
# Get Chore
chore_old = self.get(chore.name)
# Deactivate
if chore_old.active:
self.deactivate(chore.name)
# Do stuff
try:
response = func(self, chore)
except Exception as e:
raise e
# Activate if necessary
finally:
if chore.active:
self.activate(chore.name)
return response
return wrapper
class ChoreService(ObjectService,object):
""" Service to handle Object Updates for TM1 Chores
"""
def __init__(self, rest):
super(ChoreService,self).__init__(rest)
def get(self, chore_name):
""" Get a chore from the TM1 Server
:param chore_name:
:return: instance of TM1py.Chore
"""
request = "/api/v1/Chores('{}')?$expand=Tasks($expand=*,Process($select=Name),Chore($select=Name))" \
.format(chore_name)
response = self._rest.GET(request)
return Chore.from_dict(response.json())
def get_all(self):
""" get a List of all Chores
:return: List of TM1py.Chore
"""
request = "/api/v1/Chores?$expand=Tasks($expand=*,Process($select=Name),Chore($select=Name))"
response = self._rest.GET(request)
return [Chore.from_dict(chore_as_dict) for chore_as_dict in response.json()['value']]
def get_all_names(self):
""" get a List of all Chores
:return: List of TM1py.Chore
"""
request = "/api/v1/Chores?$select=Name"
response = self._rest.GET(request)
return [chore['Name'] for chore in response.json()['value']]
def create(self, chore):
""" create chore in TM1
:param chore: instance of TM1py.Chore
:return:
"""
request = "/api/v1/Chores"
response = self._rest.POST(request, chore.body)
if chore.active:
self.activate(chore.name)
return response
def delete(self, chore_name):
""" delete chore in TM1
:param chore_name:
:return: response
"""
request = "/api/v1/Chores('{}')".format(chore_name)
response = self._rest.DELETE(request)
return response
def exists(self, chore_name):
""" Check if Chore exists
:param chore_name:
:return:
"""
request = "/api/v1/Chores('{}')".format(chore_name)
return self._exists(request)
@deactivate_activate
def update(self, chore):
""" update chore on TM1 Server
does not update: DST Sensitivity!
:param chore:
:return: response
"""
# Update StartTime, ExecutionMode, Frequency
request = "/api/v1/Chores('{}')".format(chore.name)
self._rest.PATCH(request, chore.body)
# Update Tasks
for i, task_new in enumerate(chore.tasks):
task_old = self._get_task(chore.name, i)
if task_old is None:
self._add_task(chore.name, task_new)
elif task_new != task_old:
self._update_task(chore.name, task_new)
def activate(self, chore_name):
""" activate chore on TM1 Server
:param chore_name:
:return: response
"""
request = "/api/v1/Chores('{}')/tm1.Activate".format(chore_name)
return self._rest.POST(request, '')
def deactivate(self, chore_name):
""" deactivate chore on TM1 Server
:param chore_name:
:return: response
"""
request = "/api/v1/Chores('{}')/tm1.Deactivate".format(chore_name)
return self._rest.POST(request, '')
def set_local_start_time(self, chore_name, date_time):
""" Makes Server crash if chore is activate (10.2.2 FP6) :)
:param chore_name:
:param date_time:
:return:
"""
request = "/api/v1/Chores('{}')/tm1.SetServerLocalStartTime".format(chore_name)
# function for 3 to '03'
fill = lambda t: str(t).zfill(2)
data = {
"StartDate": "{}-{}-{}".format(date_time.year, date_time.month, date_time.day),
"StartTime": "{}:{}:{}".format(fill(date_time.hour), fill(date_time.minute), fill(date_time.second))
}
return self._rest.POST(request, json.dumps(data))
def execute_chore(self, chore_name):
""" Ask TM1 Server to execute a chore
:param chore_name: String, name of the chore to be executed
:return: the response
"""
return self._rest.POST("/api/v1/Chores('" + chore_name + "')/tm1.Execute", '')
def _get_task(self, chore_name, step):
""" Get task from chore
:param chore_name: name of the chore
:param step: integer
:return: instance of TM1py.ChoreTask
"""
request = "/api/v1/Chores('{}')/Tasks({})?$expand=*,Process($select=Name),Chore($select=Name)" \
.format(chore_name, step)
response = self._rest.GET(request)
return ChoreTask.from_dict(response.json())
def _add_task(self, chore_name, chore_task):
""" Create Chore task on TM1 Server
:param chore_name: name of Chore to update
:param chore_task: instance of TM1py.ChoreTask
:return: response
"""
chore = self.get(chore_name)
if chore.active:
self.deactivate(chore_name)
try:
request = "/api/v1/Chores('{}')/Tasks".format(chore_name)
response = self._rest.POST(request, chore_task.body)
except Exception as e:
raise e
finally:
if chore.active:
self.activate(chore_name)
return response
def _update_task(self, chore_name, chore_task):
""" update a chore task
:param chore_name: name of the Chore
:param chore_task: instance TM1py.ChoreTask
:return: response
"""
request = "/api/v1/Chores('{}')/Tasks({})".format(chore_name, chore_task.step)
return self._rest.PATCH(request, chore_task.body)
|
python
|
from django.core.management.base import BaseCommand, CommandError
from gwasdb.hdf5 import get_hit_count, load_permutation_thresholds
from gwasdb.models import Study
from aragwas import settings
import os
class Command(BaseCommand):
help = 'Fetch number of SNPs passing filtering, adapt bonferroni thresholds and add number of hits'
def add_arguments(self, parser):
parser.add_argument('--maf',
dest='maf',
type=float,
default=0.05,
help='Specify the maf used to filter SNPs, this will be used to remove rare alleles and correct thresholds (default: 0.05)')
parser.add_argument('--update_all',
dest='update_all',
type=bool,
default=False,
help='Set to true if all studies must be re-updated (default: False)')
parser.add_argument('--id',
dest='study_id',
type=int,
default=None,
help='Specify a primary key to compute for a specific study. If empty will check entire phenotype list.')
parser.add_argument('--permutations',
dest='perm_file',
type=str,
default=None,
help='Specify the file name containing the permutation thresholds used to compute n_hits_perm')
def handle(self, *args, **options):
maf = options.get('maf', None)
update_all = options.get('update_all', None)
study_id = options.get('study_id', None)
perm_file = options.get('perm_file', None)
try:
if study_id:
ids_aragwas = [study_id]
else:
# Run through all studies with hdf5 files
ids_aragwas = Study.objects.all().values_list('id', flat=True)
if perm_file:
permutation_thresholds = load_permutation_thresholds(perm_file)
else:
permutation_thresholds = None
counter = 0
for id in ids_aragwas:
try:
study = Study.objects.get(pk=id)
if study.n_hits_bonf == None or update_all or study_id: # Condition for first run through, might be changed to update all
hdf5_file = os.path.join(settings.HDF5_FILE_PATH, 'gwas_results', '%s.hdf5' % study.pk)
perm_threshold = None
if permutation_thresholds:
perm_threshold = permutation_thresholds[study.pk]
hits, thresholds = get_hit_count(hdf5_file, maf=maf, perm_threshold=perm_threshold)
study.n_hits_bonf = hits['bonferroni_hits05']
study.n_hits_thr = hits['thr_e-4']
study.n_hits_fdr = hits['bh_hits']
study.bonferroni_threshold = thresholds['bonferroni_threshold05']
study.bh_threshold = thresholds['bh_threshold']
study.n_hits_total = thresholds['total_associations']
if perm_file:
study.n_hits_perm = hits['permutation_hits']
study.permutation_threshold = thresholds['permutation']
study.save()
self.stdout.write(self.style.SUCCESS('Study %s successfully updated' % study))
counter +=1
except Exception as err:
self.stdout.write(self.style.ERROR('HDF5 file for study %s not found' % study))
print(str(counter) + ' studies updated in the database.')
except Exception as err:
raise CommandError(
'Error saving phenotypes. Reason: %s' % str(err))
|
python
|
"""
Simple training loop; Boilerplate that could apply to any arbitrary neural network,
so nothing in this file really has anything to do with GPT specifically.
"""
import math
import logging
import os
from tqdm import tqdm
import numpy as np
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data.dataloader import DataLoader
from utils import evaluate
import warnings
logger = logging.getLogger(__name__)
class TrainerConfig:
# optimization parameters
max_epochs = 10
batch_size = 64
learning_rate = 3e-4
betas = (0.9, 0.95)
grad_norm_clip = 1.0
weight_decay = 0.1 # only applied on matmul weights
# learning rate decay params: linear warmup followed by cosine decay to 10% of original
lr_decay = False
warmup_samples = 375e6 # these two numbers come from the GPT-3 paper, but may not be good defaults elsewhere
final_samples = 260e9 # (at what point we reach 10% of original LR)
# checkpoint settings
ckpt_path = None
num_workers = 0 # for DataLoader
save_every_epoch = True
def __init__(self, **kwargs):
for k,v in kwargs.items():
setattr(self, k, v)
class Trainer:
def __init__(self, model, train_dataset, test_dataset, config, device='gpu', collate_fn=None, tester=None):
self.model = model
self.train_dataset = train_dataset
self.test_dataset = test_dataset
self.collate_fn = collate_fn
self.config = config
self.tester = tester
# take over whatever gpus are on the system
self.device = 'cpu'
if device in ('cuda','gpu') and torch.cuda.is_available():
self.device = torch.cuda.current_device()
self.model = torch.nn.DataParallel(self.model).to(self.device)
print('We are using the gpu now! device={}'.format(self.device))
self.best_loss = None
def save_checkpoint(self, name):
# DataParallel wrappers keep raw model object in .module attribute
raw_model = self.model.module if hasattr(self.model, "module") else self.model
file = os.path.join(self.config.ckpt_path, name)
logger.info("saving %s", file)
torch.save(raw_model.state_dict(), file)
def train(self):
model, config = self.model, self.config
raw_model = model.module if hasattr(self.model, "module") else model
optimizer = raw_model.configure_optimizers(config)
def run_epoch(split):
is_train = split == 'train'
model.train(is_train)
data = self.train_dataset if is_train else self.test_dataset
loader = DataLoader(data, shuffle=is_train, pin_memory=True,
batch_size=config.batch_size,
collate_fn=self.collate_fn,
num_workers=config.num_workers,
drop_last=is_train)
losses = []
pbar = tqdm(enumerate(loader), total=len(loader)) if is_train else enumerate(loader)
for it, (x, y, p, ind, v) in pbar:
# place data on the correct device
x = x.to(self.device) # input equation
y = y.to(self.device) # output equation
v = v.to(self.device) # number of variables
p = p.to(self.device) # points with indices
ind = ind.to(self.device)
# forward the model
with torch.set_grad_enabled(is_train):
logits, loss = model(x, y, p, ind, v, tokenizer=self.train_dataset.itos)
loss = loss.mean() # collapse all losses if they are scattered on multiple gpus
losses.append(loss.item())
if is_train:
# backprop and update the parameters
model.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), config.grad_norm_clip)
optimizer.step()
# decay the learning rate based on our progress
if config.lr_decay:
self.samples += y.shape[0] # number of samples processed this step
if self.samples < config.warmup_samples:
# linear warmup
lr_mult = float(self.samples) / float(max(1, config.warmup_samples))
else:
# cosine learning rate decay
progress = float(self.samples - config.warmup_samples) / float(max(1, config.final_samples - config.warmup_samples))
lr_mult = max(0.1, 0.5 * (1.0 + math.cos(math.pi * progress)))
lr = config.learning_rate * lr_mult
for param_group in optimizer.param_groups:
param_group['lr'] = lr
else:
lr = config.learning_rate
# report progress
pbar.set_description(f"epoch {epoch+1} iter {it}: train loss {loss.item():.5f}. lr {lr:e}")
if not is_train:
test_loss = float(np.mean(losses))
logger.info("test loss: %f", test_loss)
return test_loss
self.best_loss = float('inf') if self.best_loss is None else self.best_loss
self.samples = 0 # counter used for learning rate decay
for epoch in range(config.max_epochs):
run_epoch('train')
self.save_checkpoint('latest.pt')
if self.config.save_every_epoch:
self.save_checkpoint(f'epoch{epoch+1:03d}.pt')
if self.test_dataset is not None:
if self.tester is not None:
loader = DataLoader(self.test_dataset, shuffle=False, pin_memory=True,
batch_size=1,
collate_fn=self.collate_fn,
num_workers=self.config.num_workers,
drop_last=False)
test_loss = self.tester(raw_model, loader, self.device)
else:
test_loss = run_epoch('test')
logger.info("test loss: %f", test_loss)
# supports early stopping based on the test loss, or just save always if no test set is provided
good_model = self.test_dataset is None or test_loss < self.best_loss
if self.config.ckpt_path is not None and good_model:
self.best_loss = test_loss
self.save_checkpoint('best.pt')
|
python
|
# Copyright 2019 Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import platform
try:
import ConfigParser as configparser
except ImportError:
import configparser
from tools.statics import BUILD_CONFIG_PATH
def optionxform_arch(option):
return str(option).replace('#ARCH#', platform.machine())
class BuildConfigParser(configparser.ConfigParser): # pylint: disable=too-many-ancestors
def __init__(self, ini_file=BUILD_CONFIG_PATH):
configparser.ConfigParser.__init__(self)
self.ini_file = ini_file
self.optionxform = optionxform_arch
self.read(self.ini_file)
def items(self, section): # pylint: disable=arguments-differ
defaults = self.defaults()
resultlist = []
for item in configparser.ConfigParser.items(self, section):
if item[0] not in defaults:
resultlist.append(item)
return resultlist
|
python
|
import datetime
from time import timezone
from django.db import models
# Create your models here.
from issue.models import IssueModel
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
class PollsModel(models.Model):
class Meta:
db_table = 'POLLS'
vote_id = models.CharField(db_column='VOTE_ID', max_length=200, primary_key=True)
issue = models.ForeignKey(IssueModel, on_delete=models.CASCADE)
user = models.CharField(db_column='USER', max_length=200, blank=True, null=True)
email = models.CharField(db_column='EMAIL', max_length=200, blank=True, null=True)
vote_choice = models.IntegerField(default=0)
vote_date = models.DateTimeField(db_column='VOTE_DATE')
def __str__(self):
return self.vote_id
|
python
|
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for mocking a FlagValues object."""
import contextlib
import mock
from perfkitbenchmarker import context
from perfkitbenchmarker import flags
FLAGS = flags.FLAGS
class MockFlags(object):
"""Class for mocking a FlagValues object.
Supports setting flag values via __setattr__, getting flag values via
__getattr__, and getting mock Flag-like objects via __getitem__, where the
Flag-like object supports the 'present' and 'value' attributes.
Attempting to get a Flag that does not exist will generate a new MagicMock
with the 'present' attribute initialized to False.
"""
def __init__(self):
super(MockFlags, self).__setattr__('_dict', {})
def __setattr__(self, key, value):
mock_flag = self[key]
mock_flag.present = True
mock_flag.value = value
def __getattr__(self, key):
return self[key].value
def __getitem__(self, key):
if key not in self._dict:
mock_flag = mock.MagicMock()
mock_flag.present = False
self._dict[key] = mock_flag
return self._dict[key]
@contextlib.contextmanager
def PatchFlags(mock_flags=None):
"""Patches read and write access to perfkitbenchmarker.flags.FLAGS.
By patching the underlying FlagValuesProxy instance, this method affects all
modules that have read FLAGS from perfkitbenchmarker.flags. For example, a
module my_module.py may have the code
from perfkitbenchmarker import flags
FLAGS = flags.FLAGS
...
def Func():
my_flag = FLAGS['cloud']
my_value = FLAGS.cloud
FLAGS.cloud = my_override_value
Within the effect of the PatchFlags contextmanager, calling my_module.Func()
will cause my_flag and my_value to be initialized from mock_flags rather than
an actual FlagValues instance. Similarly, mock_flags.cloud will be set with
my_override_value.
Args:
mock_flags: None or MockFlags. If provided, the source of mocked flag
values. If not provided, a new MockFlags object will be used.
Yields:
MockFlags. Either mock_flags or the newly created MockFlags value.
"""
mock_flags = mock_flags or MockFlags()
patch = mock.patch(context.__name__ + '.FlagValuesProxy._thread_flag_values',
new_callable=mock.PropertyMock)
with patch as mock_property:
mock_property.return_value = mock_flags
yield mock_flags
|
python
|
# -*- coding: utf-8 -*-
from selenium import webdriver
import unittest
class NewVisitorTest(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
def test_it_worked(self):
self.browser.get('http://127.0.0.1:8000/')
self.assertIn('TaskBuster Django Tutorial', self.browser.title)
# self.assertIn('Django: the Web framework for perfectionists with deadlines', self.browser.title)
if __name__ == '__main__':
unittest.main(warnings='ignore')
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-05-24 03:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import mooringlicensing.components.proposals.models
class Migration(migrations.Migration):
dependencies = [
('mooringlicensing', '0122_vessel_blocking_owner'),
]
operations = [
migrations.CreateModel(
name='MooringLogDocument',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, verbose_name='name')),
('description', models.TextField(blank=True, verbose_name='description')),
('uploaded_date', models.DateTimeField(auto_now_add=True)),
('_file', models.FileField(max_length=512, upload_to=mooringlicensing.components.proposals.models.update_mooring_comms_log_filename)),
],
),
migrations.CreateModel(
name='MooringLogEntry',
fields=[
('communicationslogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='mooringlicensing.CommunicationsLogEntry')),
('mooring', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comms_logs', to='mooringlicensing.Mooring')),
],
bases=('mooringlicensing.communicationslogentry',),
),
migrations.CreateModel(
name='VesselLogDocument',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, verbose_name='name')),
('description', models.TextField(blank=True, verbose_name='description')),
('uploaded_date', models.DateTimeField(auto_now_add=True)),
('_file', models.FileField(max_length=512, upload_to=mooringlicensing.components.proposals.models.update_vessel_comms_log_filename)),
],
),
migrations.CreateModel(
name='VesselLogEntry',
fields=[
('communicationslogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='mooringlicensing.CommunicationsLogEntry')),
('vessel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comms_logs', to='mooringlicensing.Vessel')),
],
bases=('mooringlicensing.communicationslogentry',),
),
migrations.AddField(
model_name='vessellogdocument',
name='log_entry',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='documents', to='mooringlicensing.VesselLogEntry'),
),
migrations.AddField(
model_name='mooringlogdocument',
name='log_entry',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='documents', to='mooringlicensing.MooringLogEntry'),
),
]
|
python
|
from py_tests_common import *
def TypeofOperatorDeclaration_Test0():
c_program_text= """
type T= typeof(0);
"""
tests_lib.build_program( c_program_text )
def TypeofOperatorDeclaration_Test1():
c_program_text= """
type T= typeof( 55 * 88 );
"""
tests_lib.build_program( c_program_text )
def TypeofOperatorDeclaration_Test2():
c_program_text= """
type T= [ typeof( 0.25 ), 64 ];
"""
tests_lib.build_program( c_program_text )
def TypeofOperatorDeclaration_Test3():
c_program_text= """
type T= typeof( "str" );
"""
tests_lib.build_program( c_program_text )
def TypeofOperatorDeclaration_Test5():
c_program_text= """
fn Foo() : i32;
type T= typeof( Foo() );
"""
tests_lib.build_program( c_program_text )
def Typeof_Test0():
c_program_text= """
fn Baz() : i32 { return 666; }
fn Foo()
{
var typeof( Baz() ) x= Baz(); // Type will be "i32"
var i32 x_copy= x;
}
"""
tests_lib.build_program( c_program_text )
def Typeof_Test1():
c_program_text= """
fn Pass( f64& x ) : f64& { return x; }
fn Foo()
{
var f64 x= 0.52;
var typeof( Pass(x) ) x_copy= x; // Type will be "f64", function reference modifier ignored
}
"""
tests_lib.build_program( c_program_text )
def Typeof_Test2():
c_program_text= """
type PiType= typeof(3.14f); // Typeof for global typedef
var PiType e= 2.718281828f;
"""
tests_lib.build_program( c_program_text )
def Typeof_Test3():
c_program_text= """
struct S {}
var S constexpr s{};
fn GetS() : typeof(s)& // Typeof for function return type
{
return s;
}
"""
tests_lib.build_program( c_program_text )
def Typeof_Test4():
c_program_text= """
struct S {}
var S constexpr s{};
fn CopyS( typeof(s) mut arg ) : S // Typeof for function argument type
{
return move(arg);
}
"""
tests_lib.build_program( c_program_text )
def Typeof_Test5():
c_program_text= """
struct S
{
auto constexpr SomeConstant= "8"c8;
typeof(SomeConstant) field; // Typeof for class field
}
"""
tests_lib.build_program( c_program_text )
def Typeof_Test6():
c_program_text= """
fn Foo()
{
auto &constexpr str= "Some String";
var typeof(str) str_storage= zero_init; // Typeof for string type
static_assert( typeinfo</ typeof(str) />.element_count == size_type(11) ); // Typeof for typeinfo
}
"""
tests_lib.build_program( c_program_text )
def TypeofHasNoEffects_Test0():
c_program_text= """
fn Inc( i32 &mut x ) : i32 { ++x; return x; }
fn Foo()
{
var i32 mut x= 666;
var typeof( Inc(x) ) x_copy= x; // Only type evalueated for expression 'Inc(x)', no actual code generated.
halt if( x != 666 );
halt if( x_copy != 666 );
}
"""
tests_lib.build_program( c_program_text )
tests_lib.run_function( "_Z3Foov" )
def Typeof_ChecksExpression_Test0():
c_program_text= """
type T= typeof( CallUnknownFunction() );
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "NameNotFound" )
assert( errors_list[0].src_loc.line == 2 )
|
python
|
# -*- coding: utf-8 -*-
"""Get image links of the book's cover."""
import logging
from .dev import cache
from .dev.webquery import query as wquery
LOGGER = logging.getLogger(__name__)
UA = 'isbnlib (gzip)'
SERVICE_URL = ('https://www.googleapis.com/books/v1/volumes?q=isbn:{isbn}'
'&fields=items/volumeInfo(imageLinks)&maxResults=1')
@cache
def cover(isbn):
"""Get the urls for covers from Google Books."""
data = wquery(SERVICE_URL.format(isbn=isbn), user_agent=UA)
urls = {}
try:
urls = data['items'][0]['volumeInfo']['imageLinks']
except (KeyError, IndexError): # pragma: no cover
LOGGER.debug('No cover img data for %s', isbn)
return urls
|
python
|
from pytz import timezone
from django.test import TestCase
from django.urls import reverse, resolve
from django.contrib.auth.models import User
from ..models import SSHProfile, Task, TaskResult
class TaskViewTestCase(TestCase):
'''
A base test case of Task view
'''
def setUp(self):
# Setup a test account
self.user = User.objects.create_user(username='johndoe', email='[email protected]', password='test1234')
self.client.login(username='johndoe', password='test1234')
# Setup a test SSH profile
ssh_setup_url = reverse('setup_ssh')
sshProfile = SSHProfile.objects.get(pk=self.user.id)
sshProfile.ssh_server_address = '127.0.0.1'
sshProfile.ssh_username = 'test_user'
sshProfile.save()
url = reverse('list_task')
self.response = self.client.get(url)
class TaskViewTests(TaskViewTestCase):
'''
Verifying Task view
'''
def setUp(self):
super().setUp()
def test_task_view_status_code(self):
self.assertEquals(self.response.status_code, 200)
class TaskViewNoDataTests(TaskViewTestCase):
'''
Testing Task view with no data message
'''
def setUp(self):
super().setUp()
def test_task_view_no_data_message(self):
self.assertContains(self.response, 'Currently, there is no task information.')
class TaskViewWithDataTests(TaskViewTestCase):
'''
Testing Task view with task data
'''
def setUp(self):
super().setUp()
for i in range(5):
Task.objects.create(task_id='task_id_{0}'.format(i+1), task_name='task_name_{0}'.format(i+1), initiated_by=self.user)
def test_running_task_view_with_data_message(self):
url = reverse('list_task')
response = self.client.get(url)
self.assertContains(response, '<span class="badge badge-info">RUNNING</span>', 5)
for i in range(5):
self.assertContains(response, 'task_id_{0}'.format(i+1), 1)
self.assertContains(response, 'task_name_{0}'.format(i+1), 1)
def test_task_view_with_result_message(self):
TaskResult.objects.all().delete()
TaskResult.objects.create(task_id='task_id_1', status='SUCCESS', result={'exc_message':['task_id_1_success']})
TaskResult.objects.create(task_id='task_id_2', status='FAILURE', result={'exc_message':['task_id_2_failure']})
url = reverse('list_task')
response = self.client.get(url)
self.assertContains(response, '<span class="badge badge-success">SUCCESS</span>', 1)
self.assertContains(response, '<span class="badge badge-danger">FAILURE</span>', 1)
self.assertContains(response, '<span class="badge badge-info">RUNNING</span>', 3)
self.assertContains(response, 'task_id_1_success', 1)
self.assertContains(response, 'task_id_2_failure', 1)
def test_task_view_clear_task(self):
'''
Testing clear completed tasks
'''
TaskResult.objects.all().delete()
TaskResult.objects.create(task_id='task_id_1', status='SUCCESS', result={'exc_message':['task_id_1_success']})
TaskResult.objects.create(task_id='task_id_2', status='FAILURE', result={'exc_message':['task_id_2_failure']})
Task.objects.filter(task_id='task_id_1').update(is_notified=True)
Task.objects.filter(task_id='task_id_2').update(is_notified=True)
# Clearing tasks
clear_task_url = reverse('clear_task')
self.client.get(clear_task_url)
list_task_url = reverse('list_task')
response = self.client.get(list_task_url)
self.assertContains(response, 'All completed tasks have been cleared.', 1)
self.assertNotContains(response, '<span class="badge badge-success">SUCCESS</span>')
self.assertNotContains(response, '<span class="badge badge-danger">FAILURE</span>')
self.assertContains(response, '<span class="badge badge-info">RUNNING</span>', 3)
self.assertContains(response, 'task_id_3', 1)
self.assertContains(response, 'task_name_3', 1)
self.assertContains(response, 'task_id_4', 1)
self.assertContains(response, 'task_name_4', 1)
self.assertContains(response, 'task_id_5', 1)
self.assertContains(response, 'task_name_5', 1)
|
python
|
from math import * # This import statement allows for all other functions like log(x), sin(x), etc.
import numpy as np
import math
import matplotlib.pyplot as plt
class Visualizer:
def __init__(self, f_x, f_y):
# List of all colors
self.color_list = ['#e22b2b', '#e88e10', '#eae600', '#88ea00',
'#00eae2', '#0094ea', "#2700ea", '#bf00ea', '#ea0078']
# Create the functions
self.fx = eval("lambda x,y: " + f_x)
self.fy = eval("lambda x,y: " + f_y)
def div(self, x, y, d=0.0001):
return round(((self.fx(x + d, y) - self.fx(x, y)) / d) + ((self.fy(x, y + d) - self.fy(x, y)) / d), 3)
def curl(self, x, y, d=0.0001):
return round(((self.fy(x + d, y) - self.fy(x, y)) / d) - ((self.fx(x, y + d) - self.fx(x, y)) / d), 3)
def plot(self, bound=(-10, 10), skip=1):
c = "#0F0F0F"
space = np.append(np.arange(bound[0], bound[1], skip), [bound[1]])
head_size = (math.fabs(bound[0]) + math.fabs(bound[1])) / 40
for y in space:
for x in space:
plt.scatter([x], [y], c=c, s=[5 / head_size])
plt.arrow(x, y, self.fx(x, y), self.fy(x, y),
head_width=head_size, head_length=head_size, color=c)
return plt
def plot_color(self, bound=(-10, 10), skip=1, prop=0):
head_size = (math.fabs(bound[0]) + math.fabs(bound[1])) / 40 # calculate head size in proportion to bounds
space = np.append(np.arange(bound[0], bound[1], skip), [bound[1]]) # All points to place vectors on
# Loops
for y in space:
for x in space:
v = int(math.sqrt(x ** 2 + y ** 2) / 10 ** prop) # select color based on magnitude
index = len(self.color_list) - 1 if v > len(self.color_list) - 1 else v # prevent IndexError
c = self.color_list[index] # Choose color from list
try:
# Use functions passed to calculate X and Y components
x_val = float(self.fx(float(x), float(y)))
y_val = float(self.fy(float(x), float(y)))
# Make sure no Math Domain Error occurs, Eg. log(x)
except ValueError:
plt.scatter([x], [y], c="WHITE",
s=[5 / head_size]) # Make sure that it's not stretched by missing points
continue
# Place the beginning dot of the arrow
plt.scatter([x], [y], c=c, s=[5 / head_size])
try:
# Calculate angle; magnitude is always 1
angle = math.atan(y_val / x_val) if x_val > 0 else (math.atan(y_val / x_val) + math.pi)
plt.arrow(x, y, math.cos(angle), math.sin(angle),
head_width=head_size, head_length=head_size, color=c) # place the arrow
# If the angle is 0, use the X and Y components to make unit vector
except ZeroDivisionError:
try:
plt.arrow(x, y, 0, y_val / math.fabs(y_val), head_width=head_size, head_length=head_size,
color=c)
# If magnitude is 0, then it's a point
except ZeroDivisionError:
plt.scatter([x], [y], color=c, s=[10])
return plt
|
python
|
#!/usr/bin/env python
"""
CREATED AT: 2021/12/6
Des:
https://leetcode.com/problems/rotate-list/
https://leetcode.com/explore/learn/card/linked-list/213/conclusion/1295/
GITHUB: https://github.com/Jiezhi/myleetcode
Difficulty: Medium
Tag: ListNode
See:
"""
from typing import Optional
from src.list_node import ListNode, buildListNode
class Solution:
def rotateRight(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:
"""
Runtime: 36 ms, faster than 77.75% of Python3
Memory Usage: 14.2 MB, less than 86.24% of Python3
The number of nodes in the list is in the range [0, 500].
-100 <= Node.val <= 100
0 <= k <= 2 * 10^9
:param head:
:param k:
:return:
"""
if head is None:
return None
l = 1
node = head
while node.next is not None:
l += 1
node = node.next
tail = node
k = k % l
if k == 0:
return head
k = l - k - 1
i = 0
node = head
while i < k:
node = node.next
i += 1
new_head = node.next
node.next = None
tail.next = head
return new_head
def test():
assert Solution().rotateRight(head=buildListNode([1, 2, 3, 4, 5]), k=2) == buildListNode([4, 5, 1, 2, 3])
assert Solution().rotateRight(head=buildListNode([0, 1, 2]), k=4) == buildListNode([2, 0, 1])
assert Solution().rotateRight(head=buildListNode([0, 1, 2]), k=0) == buildListNode([0, 1, 2])
assert Solution().rotateRight(head=buildListNode([0, 1, 2]), k=3) == buildListNode([0, 1, 2])
assert Solution().rotateRight(head=buildListNode([]), k=3) == buildListNode([])
if __name__ == '__main__':
test()
|
python
|
from .backends import JWTAuthentication as auth
from rest_framework import generics, status
from rest_framework.generics import RetrieveUpdateAPIView, ListAPIView
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from .models import User
from djoser.compat import get_user_email, get_user_email_field_name
from .renderers import UserJSONRenderer
from .serializers import (
LoginSerializer, RegistrationSerializer, UserSerializer,
PasswordResetSerializer, SocialSerializer
)
from .reset_password import RecoverPassword
from ..utils.mailer import Email
from requests.exceptions import HTTPError
from social_django.utils import load_strategy, load_backend
from social_core.backends.oauth import BaseOAuth2, BaseOAuth1
from social_core.exceptions import MissingBackend, AuthForbidden
from drf_yasg.utils import swagger_auto_schema
class RegistrationAPIView(APIView):
# Allow any user (authenticated or not) to hit this endpoint.
permission_classes = (AllowAny,)
renderer_classes = (UserJSONRenderer,)
serializer_class = RegistrationSerializer
@swagger_auto_schema(
response={status.HTTP_200_OK: RegistrationSerializer},
request_body=RegistrationSerializer
)
def post(self, request):
user = request.data.get('user', {})
# The create serializer, validate serializer, save serializer pattern
# below is common and you will see it a lot throughout this course and
# your own work later on. Get familiar with it.
username = user.get('username', None)
if username is not None:
user['username'] = user['username'].lower()
serializer = self.serializer_class(data=user)
serializer.is_valid(raise_exception=True)
serializer.save()
# Set all variables to be used with send_email function
subject = "Welcome to Authors Haven"
# Checks if connections uses https or http
if request.is_secure():
protocol = 'https://'
else:
protocol = 'http://'
# Get host name and append url to login
link = request.get_host() + "/api/users/login"
full_link = protocol+link
contact_message = "To {},".format(serializer.data.get('username')) +\
"\n Thank you for joining Authors Haven. " +\
"We are glad to have you on board. " +\
"Please use the link {}".format(full_link) +\
" to sign in to your new account"
to_email = [serializer.data.get('email')]
email = Email(subject=subject, message=contact_message,
to_email=to_email)
email.send()
return Response(serializer.data, status=status.HTTP_201_CREATED)
class LoginAPIView(APIView):
permission_classes = (AllowAny,)
renderer_classes = (UserJSONRenderer,)
serializer_class = LoginSerializer
@swagger_auto_schema(
response={status.HTTP_200_OK: LoginSerializer},
request_body=LoginSerializer
)
def post(self, request):
user = request.data.get('user', {})
# Notice here that we do not call `serializer.save()` like we did for
# the registration endpoint. This is because we don't actually have
# anything to save. Instead, the `validate` method on our serializer
# handles everything we need.
serializer = self.serializer_class(data=user)
serializer.is_valid(raise_exception=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class UserRetrieveUpdateAPIView(RetrieveUpdateAPIView):
permission_classes = (IsAuthenticated,)
renderer_classes = (UserJSONRenderer,)
serializer_class = UserSerializer
def retrieve(self, request, *args, **kwargs):
# There is nothing to validate or save here. Instead, we just want the
# serializer to handle turning our `User` object into something that
# can be JSONified and sent to the client.
serializer = self.serializer_class(request.user)
return Response(serializer.data, status=status.HTTP_200_OK)
def update(self, request, *args, **kwargs):
serializer_data = request.data.get('user', {})
# Here is that serialize, validate, save pattern we talked about
# before.
serializer = self.serializer_class(
request.user, data=serializer_data, partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
class ResetPassword(generics.GenericAPIView):
serializer_class = PasswordResetSerializer
_users = None
def post(self, request):
email = request.data.get('user', {})
serializer = self.serializer_class(
data=email
)
serializer.is_valid(raise_exception=True)
email = serializer.data['email']
user = self.get_user(email)
if not user:
response = {
"status": "400",
"error": "Email address does not exist"
}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
self.send_reset_password_email(user[0])
msg = "Check email to reset password"
data = self.get_user_data(user[0])
token = data['token']
uid = data['uid']
response = {
"status": 200,
"token": token,
"uid": uid,
"message": msg
}
return Response(response, status=status.HTTP_200_OK)
def get_user(self, email):
if self._users is None:
email_field_name = get_user_email_field_name(User)
users = User.objects.filter(
**{email_field_name + '__iexact': email})
self._users = [
u for u in users if u.is_active and u.has_usable_password()
]
return self._users
def send_reset_password_email(self, user):
context = {'user': user}
recepient = get_user_email(user)
data = self.get_user_data(recepient)
return RecoverPassword(self.request,
context, recepient, data).send_email()
def get_user_data(self, email):
user = User.objects.get(email=email).username
user_object = User.objects.get(email=email)
uid = User.objects.get(email=email).id
token = user_object.token
data = {
"user": user,
"uid": uid,
"token": token
}
return data
class ResetPasswordConfirmView(generics.UpdateAPIView):
"""
patch:
Confirming a user's reset password.
"""
serializer_class = UserSerializer
def partial_update(self, request, pk=None):
uid = self.request.query_params.get('uid')
token = self.request.query_params.get('token')
if not auth().validate_token(token):
return Response({
'error': 'Invalid token',
'status': 403
}, status=status.HTTP_403_FORBIDDEN)
try:
serializer = User.objects.get(id=uid)
except User.DoesNotExist:
return Response({
'error': 'User does not exist',
'status': 400
}, status=status.HTTP_400_BAD_REQUEST)
if request.data['new_password'] != request.data['re_new_password']:
return Response({
'error': 'Ensure both passwords match.',
'status': 400
}, status=status.HTTP_400_BAD_REQUEST)
serializer.set_password(request.data['new_password'])
serializer.save()
return Response(status=status.HTTP_200_OK,
data={'message': 'Password reset successfully.',
'status': 200})
class UserListApiView(ListAPIView):
permission_classes = (IsAuthenticated,)
queryset = User.objects.all()
serializer_class = UserSerializer
def list(self, request):
queryset = self.get_queryset()
serializer = UserSerializer(
queryset, many=True, context={'request': request})
return Response(serializer.data)
class SocialAuthView(generics.GenericAPIView):
permission_classes = (AllowAny,)
renderer_classes = (UserJSONRenderer,)
serializer_class = SocialSerializer
def post(self, request, *args, **kwargs):
""" interrupt social_auth authentication pipeline"""
# pass the request to serializer to make it a python object
# serializer also catches errors of blank request objects
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
provider = serializer.data.get('provider', None)
strategy = load_strategy(request) # creates the app instance
try:
# load backend with strategy and provider
# from settings(AUTHENTICATION_BACKENDS)
backend = load_backend(
strategy=strategy, name=provider, redirect_uri=None)
except MissingBackend as error:
return Response({
"errors": str(error)
}, status=status.HTTP_400_BAD_REQUEST)
try:
# check type of oauth provider e.g
# facebook is BaseOAuth2 twitter is BaseOAuth1
if isinstance(backend, BaseOAuth1):
# oath1 passes access token and secret
access_token = {
"oauth_token": serializer.data.get('access_token'),
"oauth_token_secret": serializer.data.get(
'access_token_secret'),
}
elif isinstance(backend, BaseOAuth2):
# oauth2 only has access token
access_token = serializer.data.get('access_token')
authenticated_user = backend.do_auth(access_token)
except HTTPError as error:
# catch any error as a result of the authentication
return Response({
"error": "Http Error",
"details": str(error)
}, status=status.HTTP_400_BAD_REQUEST)
except AuthForbidden as error:
return Response({
"error": "invalid token",
"details": str(error)
}, status=status.HTTP_400_BAD_REQUEST)
if authenticated_user and authenticated_user.is_active:
# Check if the user you intend to authenticate is active
response = {"email": authenticated_user.email,
"username": authenticated_user.username,
"token": authenticated_user.token}
return Response(status=status.HTTP_200_OK, data=response)
|
python
|
from cli_args_system.args import Args
from cli_args_system.flags_content import FlagsContent
|
python
|
# Copyright 2016 Quora, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from qcore.asserts import assert_eq
from asynq import AsyncScopedValue, asynq, result, async_override
from asynq.batching import DebugBatchItem
v = AsyncScopedValue('a')
@asynq()
def async_scoped_value_helper(inner_val):
@asynq()
def nested():
assert_eq(v.get(), inner_val)
yield DebugBatchItem()
with v.override('c'):
yield DebugBatchItem() # just so other function gets scheduled
assert_eq(v.get(), 'c')
yield DebugBatchItem()
assert_eq(v.get(), 'a')
yield DebugBatchItem()
with v.override(inner_val):
yield DebugBatchItem()
assert_eq(v.get(), inner_val)
result((yield nested.asynq())); return
@asynq()
def async_scoped_value_caller():
yield async_scoped_value_helper.asynq('e'), async_scoped_value_helper.asynq('f')
def test_async_scoped_value():
async_scoped_value_caller()
val = AsyncScopedValue('capybara')
assert_eq('capybara', val.get())
val.set('nutria')
assert_eq('nutria', val.get())
assert_eq('AsyncScopedValue(nutria)', str(val))
assert_eq("AsyncScopedValue('nutria')", repr(val))
def test_exception():
@asynq()
def test_body():
assert_eq(v(), 'a')
yield None
try:
with v.override('b'):
yield None
assert_eq(v(), 'b')
yield None
raise NotImplementedError()
except NotImplementedError:
yield None
pass
yield None
assert_eq(v(), 'a')
test_body()
def test_override():
class TestObject(object):
def __init__(self):
self.v = None
o = TestObject()
o.v = 'a'
@asynq()
def test_body():
assert_eq(o.v, 'a')
yield None
with async_override(o, 'v', 'b'):
assert_eq(o.v, 'b')
yield None
try:
with async_override(o, 'v', 'c'):
assert_eq(o.v, 'c')
yield None
raise NotImplementedError()
except NotImplementedError:
pass
assert_eq(o.v, 'b')
yield None
assert_eq(o.v, 'a')
test_body()
|
python
|
import unittest
from unittest import TestCase
from unittest.mock import patch, Mock
import mysql.connector
from matflow.database.DatabaseTable import DatabaseTable
from matflow.exceptionpackage import MatFlowException
# TODO too complicated for first use of unittests. Come back to this after writing tests for other classes in module Database
class TestDatabaseTable(TestCase):
database_table: DatabaseTable
mysql: Mock
# names of database tables for teardown
# with respect to dependencies
table_names = [
"VersionFile",
"ConfFile",
"ResultFile",
"ActiveVersion",
"Version",
"FolderFile",
"Workflow",
"WorkflowTemplate",
"Server",
]
def setUp(self):
# mock mysql
self.mysql = Mock()
# set up Database
self.database_table = DatabaseTable.get_instance()
def tearDown(self):
# delete all database entries
db = mysql.connector.connect(option_files="../../matflow/database/mydb.conf")
cursor = db.cursor()
for rem in self.table_names:
print("Clear " + rem)
tmp = "DELETE FROM {}"
cursor.execute(tmp.format(rem))
db.commit()
cursor.close()
db.close()
class TestConnection(TestDatabaseTable):
@unittest.skip("Docker needs to be up")
def test_set_get_one(self):
# because some tables have dependencies and tests are generally not in order
# all basic table connection test are in this test function
# Arrange
workflow_set_query = "INSERT INTO Workflow (name, dag) VALUES (%s, %s)"
workflow_get_query = "SELECT * FROM Workflow;"
# Act
self.database_table.set(workflow_set_query, ("testname1", "testdag1"))
# No Idea why this won't count as called, but the test value is indeed written, so it DOES work
# only the assertion is flawed
# self.mysql.connector.connect.assert_called_once()
self.assertEqual(True, True)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .quicktime_hold import QuickTimeHold
from .quicktime_mash import QuickTimeMash
|
python
|
# -*- coding: utf-8 -*-
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from .fields import features_modal
from .fields import fields
from .info_modal import info_modal
from app.custom_widgets import custom_button
store = [
# JSON string of Parameters object
dcc.Store(id="params-data", storage_type="memory"),
# Timestamps for triggering fitting callbacks
dcc.Store(id="trigger-sim", storage_type="memory"),
dcc.Store(id="trigger-fit", storage_type="memory"),
dcc.Store(id="trigger-params-update", storage_type="memory"),
# String for deciding which workflow to run
dcc.Store(id="which-workflow", storage_type="memory"),
# # Bool for choosing to update tables to new values
# dcc.Store(id="update-vals", storage_type="memory"),
# # Bool for triggering table update only after fit
# dcc.Store(id="anticipate-table-update", storage_type="memory", data=False),
]
storage_div = html.Div(id="fitting-store", children=store)
def buttons():
"""Static user interface buttons"""
kwargs = {"outline": True, "color": "dark", "size": "md"}
sim = custom_button(
text="Sim",
icon_classname="fac fa-spectrum fa-lg",
id="sim-button",
tooltip="Simulate spectrum with current parameters",
**kwargs
)
fit = custom_button(
text="Fit",
icon_classname="fac fa-chi-squared fa-lg",
id="fit-button",
tooltip="Run a least-squared fitting analysis",
**kwargs
)
return dbc.ButtonGroup([sim, fit])
def feature_select():
"""Radio buttons for selecting spin system and method tables"""
sys_select_div = html.Div(id="sys-feature-select", className="feature-select")
mth_select_div = html.Div(id="mth-feature-select", className="feature-select")
sys_page_left = html.Span(
"<", id="page-sys-feature-left", className="btn-link hidden"
)
sys_page_right = html.Span(
">", id="page-sys-feature-right", className="btn-link hidden"
)
mth_page_left = html.Span(
"<", id="page-mth-feature-left", className="btn-link hidden"
)
mth_page_right = html.Span(
">", id="page-mth-feature-right", className="btn-link hidden"
)
sys_btns = html.Div(
[sys_page_left, sys_select_div, sys_page_right], className="feature-buttons"
)
mth_btns = html.Div(
[mth_page_left, mth_select_div, mth_page_right], className="feature-buttons"
)
sys_head = html.Div([html.H6("Spin Systems"), sys_btns], className="feature-select")
mth_head = html.Div([html.H6("Methods"), mth_btns], className="feature-select")
return html.Div([sys_head, mth_head], id="feature-select-div")
def features_header():
"""Header for features tab"""
help_button = html.Div(
html.I(className="fas fa-question-circle pl-1 fa-lg"),
id="features-info-modal-button",
style={"cursor": "pointer"},
)
# icon = html.I(className="fas fa-bullseye fa-lg")
text = html.H4("Features", className="hide-label-sm")
title = html.Div([text, help_button])
return html.Div([title, buttons()], className="card-header")
def ui():
page = html.Div(
[
features_header(),
feature_select(),
fields,
info_modal,
features_modal,
storage_div,
]
)
return html.Div(className="left-card", children=page, id="features-body")
features_body = ui()
|
python
|
# -*- coding: utf-8 -*-
from model.contact import Contact
import random
def test_delete_random(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="Nikto"))
oldcontacts = db.get_contact_list()
contact = random.choice(oldcontacts)
app.contact.delete_by_id(contact.id)
newcontacts = db.get_contact_list()
oldcontacts.remove(contact)
assert oldcontacts == newcontacts
if check_ui:
assert sorted(db.get_stripped_contact_list(), key=Contact.id_or_max) == \
sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
def test_delete_all_contacts(app, db, check_ui):
number_of_contacts = len(db.get_contact_list())
if number_of_contacts == 0:
app.contact.create(Contact(firstname="Nikto"))
app.contact.delete_all()
assert len(db.get_contact_list()) == 0
if check_ui:
assert sorted(db.get_stripped_contact_list(), key=Contact.id_or_max) == \
sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
|
python
|
from playwright.sync_api import sync_playwright
import socket
#Stage 2
from ipwhois.net import Net
from ipwhois.asn import IPASN
from pprint import pprint
import ssl, socket
class Solution:
def __init__(self, url):
#1.a
self.user_given_url = url
self.is_https = None
self.domain_name = None
#1.b
self.ip_address = None
#1.c
self.is_redirected = None
self.source_url = None
self.destination_url = None
#2.a
self.asn_result = {}
#2.b
self.cert_subject = {}
self.cert_issuer = {}
#2.c
self.source_html = None
self.source_text = []
self.strip_url()
def debug(self):
print("given URL: ",self.user_given_url)
print("is https: ",self.is_https)
print("domain name: ",self.domain_name)
print("ip address: ",self.ip_address)
def strip_url(self):
if "https" in self.user_given_url:
self.is_https = True
else:
self.is_https = False
ping_this = (self.user_given_url.split("://")[1]).split('/')[0]
ping_this = ping_this.strip('/')
if 'www.' not in ping_this:
ping_this = "www."+ping_this
self.domain_name = ping_this
def take_screenshot(self, filename):
with sync_playwright() as p:
browser = p.chromium.launch()
page = browser.new_page()
page.goto(self.user_given_url)
page.screenshot(path=filename)
browser.close()
def extract_ip(self):
self.ip_address = socket.gethostbyname(self.domain_name)
def find_redirected(self):
with sync_playwright() as p:
browser = p.chromium.launch()
page = browser.new_page()
response = page.goto(self.user_given_url)
if response.request.redirected_from and not response.request.redirected_from.redirected_to.url == self.user_given_url:
self.source_url = self.user_given_url
self.destination_url = response.url
self.is_redirected = True
else:
self.is_redirected = False
browser.close()
def extract_ASN(self):
net = Net(self.ip_address)
obj = IPASN(net)
res = obj.lookup()
self.asn_result = res
def extract_certifications(self):
if self.is_https:
hostname = self.domain_name.split("www.")[1]
ctx = ssl.create_default_context()
with ctx.wrap_socket(socket.socket(), server_hostname=hostname) as s:
s.connect((hostname, 443))
cert = s.getpeercert()
subject = dict(x[0] for x in cert['subject'])
issuer = dict(x[0] for x in cert['issuer'])
self.cert_issuer = issuer
self.cert_subject = subject
def extract_source(self):
with sync_playwright() as p:
browser = p.chromium.launch()
page = browser.new_page()
page.goto(self.user_given_url)
page_source = page.inner_html('html')
source_text_page = page.query_selector_all('p')
source_text = []
for s in source_text:
text = s.inner_text()
if len(text) > 0:
source_text.append(text.strip('\n '))
self.source_html = page_source
self.source_text = source_text
browser.close()
'''
filename = "screenshot.png"
basic_url = "https://google.com"
url = "https://playwright.dev/"
phase1 = Solution(basic_url)
#1.a
phase1.take_screenshot(filename)
#1.b
phase1.extract_ip()
#1.c
phase1.find_redirected()
#2.a
phase1.extract_ASN()
#2.b
phase1.extract_certifications()
#3.a
phase1.extract_source()
#phase1.debug()
'''
|
python
|
# Copyright 2019 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
"""Integration tests for proposals APIs"""
import time
import pytest
import requests
import rethinkdb as r
from rbac.common.logs import get_default_logger
from rbac.providers.common.db_queries import connect_to_db
from tests.utilities.creation_utils import (
create_next_admin,
create_test_role,
create_test_user,
user_login,
)
from tests.utilities.db_queries import wait_for_resource_in_db
from tests.utils import (
add_role_member,
add_role_owner,
approve_proposal,
delete_role_by_name,
delete_user_by_username,
is_group_in_db,
update_manager,
)
# pylint: disable=redefined-outer-name
# this rule is typically disabled as pytest is prone to trigger it with fixtures.
LOGGER = get_default_logger(__name__)
TEST_USERS = [
{
"username": "Link",
"name": "Hero Link",
"password": "P@ssw0rd",
"email": "[email protected]",
},
{
"username": "Sheik",
"name": "Mysterious Sheik",
"password": "P@ssw0rd",
"email": "[email protected]",
},
{
"username": "Zelda",
"name": "Princess Zelda",
"password": "P@ssw0rd",
"email": "[email protected]",
},
{
"username": "Daltus",
"name": "King Daltus",
"password": "P@ssw0rd",
"email": "[email protected]",
},
{
"username": "Midna",
"name": "Princess Midna",
"password": "P@ssw0rd",
"email": "[email protected]",
},
{
"username": "Beedle",
"name": "Trader Beedle",
"password": "P@ssw0rd",
"email": "[email protected]",
},
{
"username": "Epona",
"name": "Trusy Epona",
"password": "P@ssw0rd123",
"email": "[email protected]",
},
]
TEST_ROLES = [{"name": "Hyrule_Heroes"}]
UPDATE_MANAGER_CASES = [
(TEST_USERS[1], "next_admin", 200),
(TEST_USERS[2], TEST_USERS[4], 401),
(TEST_USERS[3], TEST_USERS[2], 401),
(TEST_USERS[1], TEST_USERS[5], 401),
]
ADD_ROLE_MEMBER_CASES = [
(TEST_USERS[5], TEST_USERS[6], 200),
(TEST_USERS[2], TEST_USERS[0], 401),
(TEST_USERS[3], TEST_USERS[5], 200),
]
ADD_ROLE_OWNER_CASES = [
(TEST_USERS[1], TEST_USERS[6], 200),
(TEST_USERS[5], TEST_USERS[0], 401),
(TEST_USERS[4], TEST_USERS[5], 200),
]
@pytest.fixture(autouse=True, scope="module")
def test_role_owner():
"""A pytest fixture that yields the user that owns the test_role used
in this module"""
return TEST_USERS[-1]
@pytest.fixture(autouse=True, scope="module")
def test_requestor():
"""A pytest fixture that yields the user that requests membership in the role
used in this module"""
return TEST_USERS[0]
@pytest.fixture(autouse=True, scope="module")
def test_role():
"""A pytest fixture that yields the role that is used in this module"""
return TEST_ROLES[0]
def fetch_manager_chain(next_id):
"""Get a user's manager chain up to 5 manager's high.
Args:
next_id:
str: the next_id of a user object.
"""
manager_chain = []
db_conn = connect_to_db()
with db_conn as conn:
for _ in range(5):
user_object = (
r.db("rbac")
.table("users")
.filter({"next_id": next_id})
.coerce_to("array")
.run(conn)
)
if user_object:
manager_id = user_object[0]["manager_id"]
if manager_id != "":
manager_object = (
r.db("rbac")
.table("users")
.filter(
(r.row["remote_id"] == manager_id)
| (r.row["next_id"] == manager_id)
)
.coerce_to("array")
.run(conn)
)
if manager_object:
if manager_object[0]:
manager_chain.append(manager_object[0]["next_id"])
next_id = manager_object[0]["next_id"]
else:
break
else:
break
else:
break
return manager_chain
def is_role_ready(role_name, attempts=4, delay=5):
"""Checks to see if the given role is present in rethinkdb. retries for the
given number of attempts before returning False.
Args:
role_name:
str: the name of a given role in NEXT
attempts:
int: the number of times to chekc for the role before giving up
- defaults to 4
delay:
int: the number of seconds to wait between attempts
- defaults to 5
Returns:
role_status:
bool:
True: if the role was found in the db
False: if hte roel was not found in the db
"""
role_status = False
i = 0
while i < attempts:
if is_group_in_db(role_name):
role_status = True
return role_status
time.sleep(delay)
i += 1
return role_status
def wait_for_rethink(table_count=12, attempts=4, delay=10):
"""Waits for rethink to respond and returns DB status
Args:
table_count:
int: the number of tables in rethink tha tare expected to be ready.
- defaults: 12
attempts:
int: the number of attempts to query rethink before returning False.
- defaults: 4
delay:
int: the time in seconds to wait between query attempts.
-default: 10
"""
with connect_to_db() as db_conn:
is_rethink_ready = False
i = 0
while i < attempts:
count = r.db("rbac").wait(wait_for="all_replicas_ready").run(db_conn)
if count == table_count:
is_rethink_ready = True
break
i += 1
time.sleep(delay)
return is_rethink_ready
def setup_module():
"""actions to be performed to configure the database before tests are run.
"""
wait_for_rethink()
with requests.Session() as session:
# create a management chain of users
create_next_admin(session)
user_id = None
for i, user in enumerate(TEST_USERS):
# Sixth User should be outside of the management chain
# Fifth User is the highest manager and should have no managers
if i > 1:
user["manager"] = user_id
response = create_test_user(session, user)
assert response.status_code == 200, response.json()
user_id = response.json()["data"]["user"]["id"]
# save the returned next_id in the TEST_USER object
user["next_id"] = user_id
# create test role(s)
for i, role in enumerate(TEST_ROLES):
# set the Zeroth User as the role owner
role["owners"] = [user_id]
role["administrators"] = [user_id]
response = create_test_role(session, role)
assert response.status_code == 200, response.json()
role_id = response.json()["data"]["id"]
role["role_id"] = role_id
add_member_payload = {"id": user_id}
add_role_member(session, role_id, add_member_payload)
def teardown_module():
"""actions to be performed to clear configurations after tests are run.
"""
# delete the user(s)
for user in TEST_USERS:
delete_user_by_username(user["username"])
# delete the role(s)
for role in TEST_ROLES:
delete_role_by_name(role["name"])
@pytest.mark.parametrize("test_role", TEST_ROLES)
async def test_proposal_approvers_list(test_role_owner, test_requestor, test_role):
"""Test the additions of a role owner's managers to the proposal approvers
list through the proposal API.
Args:
role_owner:
str: The next_id of the user that owns the role.
requestor:
str: the next_id of the user requesting membership in the role.
role:
str: the role_id of the role that a user is requesting to join.
"""
with requests.Session() as session:
# make sure the role is in rethink
role_status = is_role_ready(test_role["name"])
# authenticate
assert role_status is True, "Test resources were not put in rethinkDB."
response = user_login(
session, test_role_owner["username"], test_role_owner["password"]
)
assert (
response.status_code == 200
), "Failed to authenticate as role owner. {}".format(response.json())
# create proposal to add Sixth User to the test_role as a role member`
user_id = test_requestor["next_id"]
role_id = test_role["role_id"]
payload = {"id": user_id}
response = session.post(
"http://rbac-server:8000/api/roles/{}/members".format(role_id), json=payload
)
assert (
response.status_code == 200
), "An error occured while creating a role member proposal. {}".format(
response.json()
)
proposal_id = response.json()["proposal_id"]
# call fetch_managers on the role owner
manager_chain = fetch_manager_chain(test_role_owner["next_id"])
# get approvers list for the add member proposal
response = session.get(
"http://rbac-server:8000/api/proposals/{}".format(proposal_id)
)
assert (
response.status_code == 200
), "An error occured while getting the proposal approvers list. {}".format(
response.json()
)
approver_list = response.json()["data"]["approvers"]
# assert that the role owner and all returned managers are in the
# proposal approver list
are_managers_approvers = set(manager_chain).issubset(set(approver_list))
assert (
are_managers_approvers is True
), "Missing role_owner's managers in proposal approvers list:\n{}\n{}".format(
manager_chain, approver_list
)
@pytest.mark.parametrize("user, approver, expected_status_code", UPDATE_MANAGER_CASES)
def test_approve_update_manager(user, approver, expected_status_code):
""" Tests four UpdateUserManager proposal approval scenarios:
1. NextAdmin approves the proposal
2. A random user tries to approve the proposal
3. New manager (related_id of proposal) tries to approve the proposal
4. Manager of NextAdmin tries to approve the proposal
Args:
user: (dict) User object that will be added to the role. Dict should
contain the following field:
{
next_id: str
}
approver: (dict) User object of the approver. Dict should contain the
following fields:
{
username: str
password: str
}
expected_status_code: (int) Expected HTTP response code (either 200 or 401)
"""
user_id = user["next_id"]
update_user_payload = {"id": TEST_USERS[2]["next_id"]}
with requests.Session() as session:
# Create UpdateUserManager proposal
create_next_admin(session)
response = update_manager(session, user_id, update_user_payload)
LOGGER.info(response)
proposal_id = response.json()["proposal_id"]
proposal_exists = wait_for_resource_in_db(
"proposals", "proposal_id", proposal_id
)
if proposal_exists:
# Attempt to approve the UpdateUserManager proposal
if approver != "next_admin":
log_in_payload = {
"id": approver["username"],
"password": approver["password"],
}
user_login(session, log_in_payload["id"], log_in_payload["password"])
approval_response = approve_proposal(session, proposal_id)
assert (
approval_response.status_code == expected_status_code
), "An error occurred while approving UpdateUserManager proposal: {}".format(
approval_response.json()
)
@pytest.mark.parametrize("user, approver, expected_status_code", ADD_ROLE_MEMBER_CASES)
def test_approve_add_member(user, approver, expected_status_code):
""" Tests three AddRoleMember proposal approval scenarios:
1. Role owner approves the proposal
2. A random user tries to approve the proposal
3. Role owner's manager approves the proposal
Args:
user: (dict) User object that will be added to the role. Dict should
contain the following field:
{
next_id: str
}
approver: (dict) User object of the approver. Dict should contain the
following fields:
{
username: str
password: str
}
expected_status_code: (int) Expected HTTP response code (either 200 or 401)
"""
log_in_payload = {"id": approver["username"], "password": approver["password"]}
add_role_member_payload = {"id": user["next_id"]}
role_id = TEST_ROLES[0]["role_id"]
with requests.Session() as session:
# Create AddRoleMember proposal
user_login(session, log_in_payload["id"], log_in_payload["password"])
response = add_role_member(session, role_id, add_role_member_payload)
proposal_id = response.json()["proposal_id"]
proposal_exists = wait_for_resource_in_db(
"proposals", "proposal_id", proposal_id
)
if proposal_exists:
# Attempt to approve the AddRoleMember proposal
approval_response = approve_proposal(session, proposal_id)
assert (
approval_response.status_code == expected_status_code
), "An error occurred while approving AddRoleMember proposal: {}".format(
approval_response.json()
)
@pytest.mark.parametrize("user, approver, expected_status_code", ADD_ROLE_OWNER_CASES)
def test_add_role_owner(user, approver, expected_status_code):
""" Tests three AddRoleOwner proposal approval scenarios:
1. Role owner approves the proposal
2. A random user tries to approve the proposal
3. Role owner's manager approves the proposal
Args:
user: (dict) User object that will be added to the role. Dict should
contain the following field:
{
next_id: str
}
approver: (dict) User object of the approver. Dict should contain the
following fields:
{
username: str
password: str
}
expected_status_code: (int) Expected HTTP response code (either 200 or 401)
"""
log_in_payload = {"id": approver["username"], "password": approver["password"]}
add_role_member_payload = {"id": user["next_id"]}
role_id = TEST_ROLES[0]["role_id"]
with requests.Session() as session:
# Create AddRoleOwner proposal
user_login(session, log_in_payload["id"], log_in_payload["password"])
response = add_role_owner(session, role_id, add_role_member_payload)
proposal_id = response.json()["proposal_id"]
proposal_exists = wait_for_resource_in_db(
"proposals", "proposal_id", proposal_id
)
if proposal_exists:
# Attempt to approve the AddRoleOwner proposal
approval_response = approve_proposal(session, proposal_id)
assert (
approval_response.status_code == expected_status_code
), "An error occurred while approving AddRoleOwner proposal: {}".format(
approval_response.json()
)
|
python
|
class QuestionStructure(object):
'''The QuestionStructure Will be imported
from the Questions package and used to model the question Structure
'''
def __init__(self, q_text, answer, options):
self.q_text = q_text
self.answer = answer
self.options = options
def check_answer(self, answer_provided):
"""Checks whether the answer provided by the user matches
the one set to is_true"""
if not answer_provided or len(answer_provided) == 0:
return 'You haven\'t answered the question'
return self.answer.lower() == answer_provided.lower()
def combine_string(self):
'''Set How the Question and answer looks when output'''
q_text_string = self.q_text + '\n'
for key, letter in enumerate(self.options):
q_text_string += str(letter).strip().upper() + ' ==> ' + \
self.options[str(letter)] + '\n'
return q_text_string
|
python
|
#!/usr/bin/env python
'''
octoDNS Versions
'''
from __future__ import absolute_import, division, print_function, \
unicode_literals
from octodns.cmds.args import ArgumentParser
from octodns.manager import Manager
def main():
parser = ArgumentParser(description=__doc__.split('\n')[1])
parser.add_argument('--config-file', required=True,
help='The Manager configuration file to use')
args = parser.parse_args()
Manager(args.config_file)
if __name__ == '__main__':
main()
|
python
|
import boto3
import os
import json
from datetime import datetime
import logging
import json
'''
AWS Lambda Function to get all data. Requests come from Amazon API Gateway.
'''
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def get_data(id):
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.getenv("TABLE_NAME"))
'''
[TASK] Get the item based on the ID from DynamoDB
'''
'''
[END of TASK]
'''
item = response['Item']
return item
def handler(event, context):
logger.info(event)
logger.info('get-data is called')
'''
[TASK] Load the path parameters from the `event` variable.
'''
'''
[END of TASK]
'''
if id:
data = get_data(id)
response = {"statusCode": 200, "body": json.dumps({"data": data})}
return response
|
python
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""test_scale_up.py"""
import logging
import subprocess
from ..common import status
from . import test_template
class TestScaleUp(test_template.TestTemplate):
expected_container_count = 1
expected_instance_count = 3
def get_expected_container_count(self):
return self.expected_container_count
def get_expected_min_instance_count(self):
return self.expected_instance_count
def execute_test_case(self):
scale_up(self.params['cliPath'], self.params['cluster'], self.params['topologyName'])
self.expected_container_count += 1
self.expected_instance_count += 1
def pre_check_results(self, physical_plan_json):
instances = physical_plan_json['instances']
instance_count = len(instances)
if instance_count != self.expected_instance_count:
raise status.TestFailure("Found %s instances but expected %s: %s" %
(instance_count, self.expected_instance_count, instances))
def scale_up(heron_cli_path, test_cluster, topology_name):
splitcmd = [
heron_cli_path, 'update', '--verbose', test_cluster, topology_name,
'--component-parallelism=identity-bolt:2'
]
logging.info("Increasing number of component instances: %s", splitcmd)
if subprocess.call(splitcmd) != 0:
raise status.TestFailure("Unable to update topology %s" % topology_name)
logging.info("Increased number of component instances")
|
python
|
from django.shortcuts import render
from rbac.models import Menu
from system.models import SystemSetup
def customerView(request):
if request.method == 'GET':
ret = Menu.getMenuByRequestUrl(url=request.path_info)
ret.update(SystemSetup.getSystemSetupLastData())
return render(request, 'adm/bsm/customer.html', ret)
def customerListView():
return None
def customerDetailView():
return None
def customerDeleteView():
return None
|
python
|
# import unittest
# import os
# from testfixtures import tempdir, compare, TempDirectory
# from tests.context import categorize
# Pair = categorize.ArchivePair
# import json
# class test_archive_pair(unittest.TestCase):
# """
# Ensure that when something needs to be stored it can be gotten and set
# """
# def setUp(self):
# self.d = TempDirectory()
# self.filePath = self.d.path + '/nosql.json'
# self.pair = Pair(self.filePath)
# def tearDown(self):
# self.d.cleanup()
# def test_make_new_pair_file_if_needed_does_create_file_when_doesnt_exist(self):
# self.pair.make_new_pair_file_if_needed()
# self.assertTrue(os.path.isfile(self.filePath))
# def test_read_file_does_read_json_file(self):
# data = [{"key":'12-11',"value":'choora'},{"key":'12-12',"value":'crystal_ring'}]
# with open(self.filePath,'w') as outfile:
# json.dump(data, outfile)
# self.assertEqual(data,self.pair.read_pair_file())
# def test_set_pairs_does_create_from_empty(self):
# data = [{"key":'12-11',"value":'choora'}]
# self.pair.set_pair('12-11','choora')
# self.assertEqual(data,self.pair.read_pair_file())
# def test_set_pairs_appends_to_an_existing_file(self):
# data = [{"key":'12-11',"value":'choora'},{"key":'12-12',"value":'crystal_ring'}]
# with open(self.filePath,'w') as outfile:
# json.dump(data, outfile)
# self.pair.set_pair('12-13','omega')
# final_data = [{"key":'12-11',"value":'choora'},{"key":'12-12',"value":'crystal_ring'},{"key":'12-13',"value":'omega'}]
# data_read = self.pair.read_pair_file()
# self.assertEqual(final_data, data_read)
# def test_set_pair_does_store_key_value_pairs(self):
# key = 'test_key'
# self.pair.set_pair(key,'test_value')
# pair = self.pair.get_pair(key)
# self.assertEqual('test_value', pair['value'])
# self.assertEqual('test_key', pair['key'])
# def test_when_adding_a_pair_previous_pairs_are_maintained(self):
# key = 'test_key2'
# self.pair.set_pair(key,'test_value2')
# self.pair.set_pair('test_key','test_value')
# pair = self.pair.get_pair(key)
# self.assertEqual('test_value2', pair['value'])
# self.assertEqual('test_key2', pair['key'])
# def test_doesnt_add_a_pair_if_it_already_exists(self):
# key = 'test_key'
# self.pair.set_pair(key,'test_value')
# self.pair.set_pair(key,'test_value')
# pair = self.pair.read_pair_file()
# self.assertEqual(len(pair), 1)
# if __name__ == '__main__':
# unittest.main()
|
python
|
# coding: utf-8
from datetime import datetime, timedelta
from .._base import to_base64
from .base import TestCase
from .base import create_server, sqlalchemy_provider, cache_provider
from .base import db, Client, User, Grant
class TestDefaultProvider(TestCase):
def create_server(self):
create_server(self.app)
def prepare_data(self):
self.create_server()
oauth_client = Client(
name='ios', client_id='code-client', client_secret='code-secret',
_redirect_uris='http://localhost/authorized',
)
db.session.add(User(username='foo'))
db.session.add(oauth_client)
db.session.commit()
self.oauth_client = oauth_client
self.authorize_url = (
'/oauth/authorize?response_type=code&client_id=%s'
) % oauth_client.client_id
def test_get_authorize(self):
rv = self.client.get('/oauth/authorize')
assert 'client_id' in rv.location
rv = self.client.get('/oauth/authorize?client_id=no')
assert 'client_id' in rv.location
url = '/oauth/authorize?client_id=%s' % self.oauth_client.client_id
rv = self.client.get(url)
assert 'error' in rv.location
rv = self.client.get(self.authorize_url)
assert b'confirm' in rv.data
def test_post_authorize(self):
url = self.authorize_url + '&scope=foo'
rv = self.client.post(url, data={'confirm': 'yes'})
assert 'invalid_scope' in rv.location
url = self.authorize_url + '&scope=email'
rv = self.client.post(url, data={'confirm': 'yes'})
assert 'code' in rv.location
url = self.authorize_url + '&scope='
rv = self.client.post(url, data={'confirm': 'yes'})
assert 'error=Scopes+must+be+set' in rv.location
def test_invalid_token(self):
rv = self.client.get('/oauth/token')
assert b'unsupported_grant_type' in rv.data
rv = self.client.get('/oauth/token?grant_type=authorization_code')
assert b'error' in rv.data
assert b'code' in rv.data
url = (
'/oauth/token?grant_type=authorization_code'
'&code=nothing&client_id=%s'
) % self.oauth_client.client_id
rv = self.client.get(url)
assert b'invalid_client' in rv.data
url += '&client_secret=' + self.oauth_client.client_secret
rv = self.client.get(url)
assert b'invalid_client' not in rv.data
assert rv.status_code == 401
def test_invalid_redirect_uri(self):
authorize_url = (
'/oauth/authorize?response_type=code&client_id=code-client'
'&redirect_uri=http://localhost:8000/authorized'
'&scope=invalid'
)
rv = self.client.get(authorize_url)
assert 'error=' in rv.location
assert 'Mismatching+redirect+URI' in rv.location
def test_get_token(self):
expires = datetime.utcnow() + timedelta(seconds=100)
grant = Grant(
user_id=1,
client_id=self.oauth_client.client_id,
scope='email',
redirect_uri='http://localhost/authorized',
code='test-get-token',
expires=expires,
)
db.session.add(grant)
db.session.commit()
url = '/oauth/token?grant_type=authorization_code&code=test-get-token'
rv = self.client.get(
url + '&client_id=%s' % (self.oauth_client.client_id)
)
assert b'invalid_client' in rv.data
rv = self.client.get(
url + '&client_id=%s&client_secret=%s' % (
self.oauth_client.client_id,
self.oauth_client.client_secret
)
)
assert b'access_token' in rv.data
grant = Grant(
user_id=1,
client_id=self.oauth_client.client_id,
scope='email',
redirect_uri='http://localhost/authorized',
code='test-get-token',
expires=expires,
)
db.session.add(grant)
db.session.commit()
rv = self.client.get(url, headers={
'authorization': 'Basic ' + to_base64(
'%s:%s' % (
self.oauth_client.client_id,
self.oauth_client.client_secret
)
)
})
assert b'access_token' in rv.data
class TestSQLAlchemyProvider(TestDefaultProvider):
def create_server(self):
create_server(self.app, sqlalchemy_provider(self.app))
class TestCacheProvider(TestDefaultProvider):
def create_server(self):
create_server(self.app, cache_provider(self.app))
def test_get_token(self):
url = self.authorize_url + '&scope=email'
rv = self.client.post(url, data={'confirm': 'yes'})
assert 'code' in rv.location
code = rv.location.split('code=')[1]
url = (
'/oauth/token?grant_type=authorization_code'
'&code=%s&client_id=%s'
) % (code, self.oauth_client.client_id)
rv = self.client.get(url)
assert b'invalid_client' in rv.data
url += '&client_secret=' + self.oauth_client.client_secret
rv = self.client.get(url)
assert b'access_token' in rv.data
|
python
|
from sqlalchemy import (
Boolean,
Column,
ForeignKey,
Integer,
Text,
Unicode,
UniqueConstraint,
)
from sqlalchemy.orm import relationship
from nbexchange.models import Base
from nbexchange.models.actions import Action
class Assignment(Base):
"""The Assigments known for each course
There is probably more than 1 assignment for each course
Tthere will be multiple users interaction with a single assignment - each interaction can have a different "action"
assigment_code is what comes from formgrader
crs = Course(org_id=1, course_code=$couurse_code)
ass = crs.assignments(assignment_code='test%201')
acc = Action(action='fetch')
acc.user_id = self.get_current_user().id
acc.assignment_id = ass.id
ass.users.append(acc)
"""
__tablename__ = "assignment"
__table_args__ = (UniqueConstraint("course_id", "assignment_code", "active"),)
id = Column(Integer, primary_key=True, autoincrement=True)
assignment_code = Column(Text(), nullable=False, index=True)
active = Column(Boolean, default=True, nullable=False)
## course <-> assignment mappings
# each assignment has just one parent course
course_id = Column(Integer, ForeignKey("course.id", ondelete="CASCADE"), index=True)
# can set 'course.assignments'
course = relationship("Course", back_populates="assignments")
# Maps this assignment to multiple actions [thence to users]
actions = relationship("Action", back_populates="assignment")
# Tracks the notebooks in each assignment
notebooks = relationship("Notebook", backref="assignment", order_by="Notebook.name")
@classmethod
def find_by_pk(cls, db, pk, log=None):
"""Find an Assignment by Primary Key.
Returns None if not found.
"""
if log:
log.debug(f"Assignment.find_by_pk - pk:{pk}")
if pk is None:
raise ValueError(f"Primary Key needs to be defined")
if isinstance(pk, int):
return db.query(cls).filter(cls.id == pk).first()
else:
raise TypeError(f"Primary Keys are required to be Ints")
@classmethod
def find_by_code(cls, db, code, course_id=None, active=True, log=None, action=None):
"""Find an assignment by code.
assignment = orm.Assignment.find_by_code(
db=session, code=assignment_code, course_id=course.id
)
optional params:
'active' True/False - defaults to true
'action' Allows one to restrict the search to a specific action. Not used
if set to None. Defaults to None
Returns None if not found.
"""
if log:
log.debug(
f"Assignment.find_by_code - code:{code} (course_id:{course_id}, active:{active}, action:{action})"
)
if code is None:
raise ValueError(f"code needs to be defined")
if course_id is None:
raise ValueError(f"course_id needs to be defined")
if not isinstance(code, str):
raise TypeError(f"code must be an Str")
if not isinstance(course_id, int):
raise TypeError(f"Course_id must be an Int")
filters = [
cls.assignment_code == code,
cls.course_id == course_id,
cls.active == active,
]
if action:
filters.append(cls.actions.any(Action.action == action))
return db.query(cls).filter(*filters).order_by(cls.id.desc()).first()
@classmethod
def find_for_course(
cls, db, course_id, active=True, log=None, action=None, path=None
):
"""Find the list of assignments for a course.
assignments = orm.Assignment.find_for_course(
db=session, course_id=course.id, log=self.log
)
optional params:
'active' True/False - defaults to true
'action' Allows one to restrict the search to a specific action. Not used
if set to None. Defaults to None
'path' Allows one to restrict the search to a specific location [path] in an action.
Not used if set to None. Defaults to None
Returns None if not found.
"""
if log:
log.debug(
f"Assignment.find_for_course - course_id:{course_id}, active:{active}, action:{action}"
)
if course_id and not isinstance(course_id, int):
raise TypeError(f"Course_id, if specified, must be an Int")
filters = [cls.course_id == course_id, cls.active == active]
if action:
filters.append(cls.actions.any(Action.action == action))
if path:
filters.append(cls.actions.any(Action.location == path))
return db.query(cls).filter(*filters).order_by(cls.id.desc())
def __repr__(self):
return f"Assignment {self.assignment_code} for course {self.course_id}"
|
python
|
# Copyright (c) 2022 Keith Carrara <[email protected]> & Edward Kopp <[email protected]>
# Licensed under the MIT License. the LICENSE file in repository root for information.
from abc import abstractmethod
from kivy import Config
from kivy.app import App
from kivy.core.window import Window
from kivy.uix.screenmanager import Screen, ScreenManager, NoTransition
from kivy.uix.layout import Layout
from kivy.uix.floatlayout import FloatLayout
from ._theme import Theme, ThemeColors
_DEFAULT_APP_TITLE = "Plazable UI"
_DESKTOP_MINIMUM_WIDTH = 600
_DESKTOP_MINIMUM_HEIGHT = 400
_DESKTOP_DEFAULT_WIDTH = 1000
_DESKTOP_DEFAULT_HEIGHT = 600
if _DESKTOP_MINIMUM_WIDTH > _DESKTOP_DEFAULT_WIDTH or _DESKTOP_MINIMUM_HEIGHT > _DESKTOP_DEFAULT_HEIGHT:
raise ValueError("Minimum window dimensions cannot be smaller than the default dimensions")
class PlazableApp(App):
def __init__(self, **kwargs):
self._app_title: str
# Get app title
try:
self._app_title = kwargs["title"]
except KeyError:
self._app_title = _DEFAULT_APP_TITLE
self._mobile: bool
# Check if mobile
try:
if isinstance(kwargs["mobile"], bool):
self._mobile = kwargs["mobile"]
kwargs.pop("mobile")
except KeyError:
self._mobile = False
# Config for non-mobile if necessary
if not self._mobile:
# Disable multitouch
Config.set("input", "mouse", "mouse,disable_multitouch")
# Set window settings
Window.size = (_DESKTOP_DEFAULT_WIDTH, _DESKTOP_DEFAULT_HEIGHT)
Window.minimum_width = _DESKTOP_MINIMUM_WIDTH
Window.minimum_height = _DESKTOP_MINIMUM_HEIGHT
Window.clearcolor = ThemeColors.BACKGROUND.from_theme_data(Theme.data)
super().__init__(**kwargs)
self.manager = ScreenManager(transition=NoTransition())
self.update_title_status()
def build(self) -> ScreenManager:
return self.manager
def update_title_status(self, new_status: str = "") -> str:
if self._mobile:
raise ValueError("Title status cannot be updated for a mobile app")
new_title: str
if new_status == "":
new_title = self._app_title
else:
new_title = str(new_status) + " - " + self._app_title
self.title = new_title
return new_title
def add_screen(self, screen: Screen) -> None:
self.manager.add_widget(screen)
class PlazableScreen(Screen):
layout: Layout = FloatLayout()
def __init__(self, app: PlazableApp, **kwargs):
super().__init__(**kwargs)
self.app = app
self.add_widget(self.layout)
self.screen()
@abstractmethod
def screen(self) -> None:
pass
# Load default theme
Theme.load()
|
python
|
import os
import random
import math
import json
import numpy as np
import pandas as pd
from scipy import stats
from scipy import sparse
from scipy.stats import poisson
from scipy.optimize import nnls
from fdr import qvalues
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from util import print_prefix
class BarseqLayoutItem:
def __init__(self, itnum, item_type, experiment_condition):
self.__itnum = itnum
self.__item_type = item_type
self.__experiment_condition = experiment_condition
@property
def itnum(self):
return self.__itnum
@property
def item_type(self):
return self.__item_type
@property
def experiment_condition(self):
return self.__experiment_condition
class BarseqLayout:
def __init__(self, layout_file_name):
self.__layout_file_name = layout_file_name
self.__df = None
self.__load()
# Check if the layout has time zero items
if len(self.time_zero_items) == 0:
raise ValueError(
'No time zero experiments were found in the layout')
def save(self, fname):
with open(fname, 'w') as f:
f.write('\t'.join((
'itnum',
'type',
'name'
)))
f.write('\n')
for item in self.all_items:
f.write(
'\t'.join(
str(x) for x in [
item.itnum,
item.item_type,
item.experiment_condition
]
)
)
f.write('\n')
def __load(self):
self.__df = pd.read_csv(self.__layout_file_name, sep='\t')
@property
def layout_file_name(self):
return self.__layout_file_name
@property
def time_zero_items(self):
df = self.__df
return self.__to_items(df[df.type == 'Time0'])
@property
def lb_items(self):
df = self.__df
return self.__to_items(df[df.type == 'LB'])
@property
def stress_items(self):
df = self.__df
return self.__to_items(df[df.type == 'stress'])
@property
def non_time_zero_items(self):
df = self.__df
return self.__to_items(df[df.type != 'Time0'])
@property
def all_items(self):
return self.__to_items(self.__df)
def __to_items(self, df):
items = []
for _, row in df.iterrows():
items.append(BarseqLayoutItem(row.itnum, row.type, row['name']))
return items
@property
def experiment_types(self):
return self.__df.type.unique()
class BpagItem:
__slots__ = ['barcode_up', 'barcode_dn',
'bpair_read_count', 'up_read_count', 'dn_read_count',
'contig_id', 'pos_from', 'pos_to']
def __init__(self, barcode_up, barcode_dn, bpair_read_count,
up_read_count, dn_read_count,
contig_id, pos_from, pos_to):
self.barcode_up = barcode_up
self.barcode_dn = barcode_dn
self.bpair_read_count = bpair_read_count
self.up_read_count = up_read_count
self.dn_read_count = dn_read_count
self.contig_id = contig_id
self.pos_from = pos_from
self.pos_to = pos_to
class BpagSet:
def __init__(self, blag_file_name):
self.__blag_file_name = blag_file_name
self.__items = []
self.__up_barcode_2_item = {}
self.__load()
@property
def blag_file_name(self):
return self.__blag_file_name
@property
def size(self):
return len(self.__items)
def get_item(self, index):
return self.__items[index]
def find_up_item(self, barcode_up):
return self.__up_barcode_2_item.get(barcode_up)
def __load(self):
df = pd.read_csv(self.__blag_file_name, sep='\t')
for _, row in df.iterrows():
if row.recommended == '+':
item = BpagItem(
row.barcode_up,
row.barcode_dn,
row.bpair_read_count,
row.up_read_count,
row.dn_read_count,
row.up_contig_id,
row.pos_from,
row.pos_to
)
self.__items.append(item)
self.__up_barcode_2_item[item.barcode_up] = item
class TimeZeroItem:
def __init__(self, barcode, time0_experiments_count):
self.__barcode = barcode
self.__read_counts = [0] * time0_experiments_count
@property
def barcode(self):
return self.__barcode
@property
def total_read_count(self):
return sum(self.__read_counts)
@property
def max_read_count(self):
return max(self.__read_counts)
def set_read_count(self, experiment_index, count):
self.__read_counts[experiment_index] = count
class TimeZeroSet:
def __init__(self, bpag_set, barseq_layout, barseq_dir):
self.__time0_itnums = []
for item in barseq_layout.time_zero_items:
self.__time0_itnums.append(item.itnum)
self.__barcode_2_item = {}
self.__items = []
self.__load(bpag_set, barseq_dir)
def filter_items(self, good_item_method):
for i in range(self.size)[::-1]:
if not good_item_method(self.__items[i]):
barcode = self.__items[i].barcode
del self.__barcode_2_item[barcode]
del self.__items[i]
@property
def size(self):
return len(self.__items)
def __load(self, bpag_set, barseq_dir):
for experiment_index, itnum in enumerate(self.__time0_itnums):
bstat_fname = self.__get_bstat_file(itnum, barseq_dir)
if not bstat_fname:
raise ValueError(
'Can not find bstat file for itnum %s in %s directory' % (itnum, barseq_dir))
df = pd.read_csv(bstat_fname, sep='\t')
for _, row in df.iterrows():
if row.recommnended != '+':
continue
if not bpag_set.find_up_item(row.barcode):
continue
self.__register_read_count(
row.barcode, experiment_index, int(row.reads_count))
def __get_bstat_file(self, itnum, barseq_dir):
file_path = None
for file_name in os.listdir(barseq_dir):
if not file_name.endswith('.bstat.tsv'):
continue
if '_' + itnum + '_' in file_name:
file_path = os.path.join(barseq_dir, file_name)
break
return file_path
@property
def experiment_count(self):
return len(self.__time0_itnums)
def __register_read_count(self, barcode, exp_index, read_count):
t0_item = self.__barcode_2_item.get(barcode)
if not t0_item:
t0_item = TimeZeroItem(barcode, self.experiment_count)
self.__barcode_2_item[barcode] = t0_item
if t0_item:
t0_item.set_read_count(exp_index, read_count)
class Fitness:
# GFF_FILE = None
# BPAG_FILE = None
SCORE_TYPE_MEAN = 0
SCORE_TYPE_NNLS = 1
SCORE_TYPE_C_NNLS = 2
SCORE_TYPE_RIDGE = 3
SCORE_TYPE_LASSO = 4
SCORE_TYPE_ELASTIC_NET = 5
SCORE_TYPE_NAMES = ['mean', 'nnls', 'cnnls', 'ridge', 'lasso', 'enet']
SPARSE_REGRESSION_MATRIX = {
SCORE_TYPE_MEAN: False,
SCORE_TYPE_NNLS: False,
SCORE_TYPE_C_NNLS: False,
SCORE_TYPE_RIDGE: True,
SCORE_TYPE_LASSO: True,
SCORE_TYPE_ELASTIC_NET: True
}
MIN_TIME0_READ_COUNT = 10
CONDITIONS = {}
BARCODE_2_INDEX = {}
BARCODE_COUNTS = []
BARCODE_INDICES = []
BARCODE_REPLICATES = []
GENES = []
GENOME_SEGMENTS = []
RIDGE_PARAM_ALPHA = 1
LASSO_PARAM_ALPHA = 1
ELASTIC_NET_PARAM_ALPHA = 1
ELASTIC_NET_PARAM_L1_RATIO = 0.5
GSCORE_VARIENCE_ALPHA = 0.02
#######################
# Init
#######################
@staticmethod
def init(barseq_layout, barseq_dir, bpag_fname, genes_gff_fname=None, gene_pairs=False):
Fitness.initConditions(barseq_layout, barseq_dir)
t0Indeces = Fitness.getTimeZeroIndeces()
# Load data
print_prefix("loadBPAG:...")
Fitness.loadBPAG(bpag_fname)
print('Done!')
Fitness.loadCounts()
Fitness.buildREF_TIME0(t0Indeces)
Fitness.cleanBARCODE_COUNTS(t0Indeces)
if genes_gff_fname:
Fitness.loadGenes(genes_gff_fname)
if gene_pairs:
Fitness.add_gene_pairs()
Fitness.associateGenesWithBarcodes()
# cleanGENES()
Fitness.buildGENOME_SEGMENTS()
@staticmethod
def initConditions(barseq_layout, barseq_dir):
for index, item in enumerate(barseq_layout.all_items):
Fitness.CONDITIONS[item.itnum] = {
"index": index,
"type": item.item_type,
"desc": item.experiment_condition}
for file_name in os.listdir(barseq_dir):
if not file_name.endswith('.bstat.tsv'):
continue
vals = file_name.split(".")
for val in vals:
if val in Fitness.CONDITIONS:
itnum = val
Fitness.CONDITIONS[itnum]['file'] = os.path.join(
barseq_dir, file_name)
break
#######################
# Gettters
#######################
@staticmethod
def getTimeZeroIndeces():
indeces = []
for itnum in Fitness.CONDITIONS:
it = Fitness.CONDITIONS[itnum]
if it['type'] == 'Time0':
indeces.append(it['index'])
return indeces
@staticmethod
def get_sample(itIndex):
sample = []
for bIndex in Fitness.BARCODE_INDICES:
row = Fitness.BARCODE_COUNTS[bIndex]
count = row['counts'][itIndex]
sample.append(count)
return sample
@staticmethod
def get_tzero_sample():
sample = []
for bIndex in Fitness.BARCODE_INDICES:
row = Fitness.BARCODE_COUNTS[bIndex]
count = row['time0']
sample.append(count)
return sample
@staticmethod
def getTotalCount(itIndex):
total = 0
for bIndex in Fitness.BARCODE_INDICES:
row = Fitness.BARCODE_COUNTS[bIndex]
total += row['counts'][itIndex]
return total
@staticmethod
def getItNum(itIndex):
for itnum in Fitness.CONDITIONS:
it = Fitness.CONDITIONS[itnum]
if it['index'] == itIndex:
return itnum
return None
#######################
# Exporters
#######################
@staticmethod
def save_gscore_base(fname):
with open(fname, 'w') as f:
f.write(
'\t'.join((
'gene_index',
'covering_fragment_count',
'name',
'locus_tag',
'gene_type',
'contig_id',
'pos_from',
'pos_to',
'strand',
'product',
'note',
'description',
'barcodes'
))
)
f.write('\n')
for gene_index, gene in enumerate(Fitness.GENES):
vals = [
gene_index,
len(gene['barcodeIndeces']),
gene['name'],
gene['locusTag'],
gene['geneType'],
gene['contigId'],
gene['posFrom'],
gene['posTo'],
gene['strand'],
gene['product'],
gene['note'],
gene['description'],
','.join(Fitness.BARCODE_COUNTS[i]['barcode']
for i in gene['barcodeIndeces'])
]
f.write('\t'.join(str(x) for x in vals))
f.write('\n')
@staticmethod
def save_fscore_base(fname):
with open(fname, 'w') as f:
f.write('\t'.join((
'barcode',
'contig_id',
'pos_from',
'pos_to',
't0_count',
't0_reads_avg',
't0_reads_total',
't0_reads',
't0_itnums'
)))
f.write('\n')
time_zero_indeces = Fitness.getTimeZeroIndeces()
for item in Fitness.BARCODE_COUNTS:
time_zero_vals = [item['counts'][i] for i in time_zero_indeces]
time_zero_itnums = [Fitness.getItNum(
i) for i in time_zero_indeces]
vals = [
item['barcode'],
item['contigId'],
item['posFrom'],
item['posTo'],
len(time_zero_vals),
sum(time_zero_vals) / float(len(time_zero_vals)),
sum(time_zero_vals),
','.join(str(x) for x in time_zero_vals),
','.join(str(x) for x in time_zero_itnums)
]
f.write('\t'.join(str(x) for x in vals))
f.write('\n')
@staticmethod
def save_fscores(score_fname, fs, ss, ts):
with open(score_fname, 'w') as f:
f.write('%s\n' % '\t'.join(
['barcode', 'score', 'stress_read_count', 't0_total_read_count']))
for index, score in enumerate(fs):
barcode = Fitness.BARCODE_COUNTS[index]['barcode']
f.write('%s\n' % '\t'.join(str(x)
for x in [barcode, score, ss[index], ts[index]]))
@staticmethod
def save_gscores(score_fname, score_types, gss):
with open(score_fname, 'w') as f:
column_names = ['index', 'gene_name', 'locus_tag']
for score_type in score_types:
column_names.append(
'score_' + Fitness.SCORE_TYPE_NAMES[score_type])
f.write('\t'.join(column_names) + '\n')
for index, gene in enumerate(Fitness.GENES):
vals = [index, gene['name'], gene['locusTag']]
for gs in gss:
vals.append(gs[index])
f.write('\t'.join(str(x) for x in vals) + '\n')
@staticmethod
def save_gstat(fname, gstats):
with open(fname, 'w') as f:
column_names = ['index',
'gene_name',
'locus_tag',
'score_type',
'score',
'gscore_bootstrap_avg',
'gscore_bootstrap_var',
'gscore_poisson_avg',
'gscore_poisson_var',
'gscore_var',
'fcount',
'gscore_var_moderated',
'tscore',
'pvalue',
'qvalue',
'set_qvalue']
f.write('\t'.join(column_names) + '\n')
for index, gstat in enumerate(gstats):
vals = []
vals.append(index)
for key in column_names[1:]:
vals.append(gstat[key])
f.write('\t'.join(str(x) for x in vals) + '\n')
#######################
# Loaders
#######################
@staticmethod
def loadBPAG(bpag_fname):
del Fitness.BARCODE_COUNTS[:]
df = pd.read_csv(bpag_fname, sep='\t')
for _, row in df.iterrows():
if row.recommended != '+':
continue
Fitness.BARCODE_COUNTS.append({
'pairBarcodeUp': row.barcode_up,
'pairBarcodeDn': row.barcode_dn,
"barcode": row.barcode_up,
"contigId": row.up_contig_id,
"posFrom": row.pos_from,
"posTo": row.pos_end,
"time0": 0,
"counts": [0] * len(Fitness.CONDITIONS)
})
Fitness.updateBARCODE_INDICES()
@staticmethod
def updateBARCODE_INDICES():
Fitness.BARCODE_2_INDEX.clear()
del Fitness.BARCODE_INDICES[:]
del Fitness.BARCODE_REPLICATES[:]
for index, br in enumerate(Fitness.BARCODE_COUNTS):
Fitness.BARCODE_2_INDEX[br['barcode']] = index
Fitness.BARCODE_INDICES.append(index)
Fitness.BARCODE_REPLICATES.append(1)
@staticmethod
def loadCounts():
for itnum in Fitness.CONDITIONS:
it = Fitness.CONDITIONS[itnum]
print('\t loading: ', it['file'])
df = pd.read_csv(it['file'], sep='\t')
for _, row in df.iterrows():
if row.sim_recommended != '+':
continue
barcode = row.barcode
count = row.reads_count
if barcode in Fitness.BARCODE_2_INDEX:
barcodeIndex = Fitness.BARCODE_2_INDEX[barcode]
itIndex = it['index']
Fitness.BARCODE_COUNTS[barcodeIndex]['counts'][itIndex] = count
@staticmethod
def cleanBARCODE_COUNTS(time0Indeces):
len0 = len(Fitness.BARCODE_COUNTS)
delCount = 0
for i in range(len0)[::-1]:
counts = Fitness.BARCODE_COUNTS[i]['counts']
hasData = False
for t0Index in time0Indeces:
if counts[t0Index] >= Fitness.MIN_TIME0_READ_COUNT:
hasData = True
break
if not hasData:
del Fitness.BARCODE_COUNTS[i]
delCount += 1
Fitness.updateBARCODE_INDICES()
@staticmethod
def add_gene_pairs():
gene_pairs = []
for i in range(len(Fitness.GENES) - 1):
g1 = Fitness.GENES[i]
g2 = Fitness.GENES[i + 1]
if g1['contigId'] == g2['contigId']:
gene_pair = {
'contigId': g1['contigId'],
'geneType': '%s-%s' % (g1['geneType'], g2['geneType']),
'posFrom': min(g1['posFrom'], g2['posFrom']),
'posTo': max(g1['posTo'], g2['posTo']),
'strand': '%s-%s' % (g1['strand'], g2['strand']),
'name': '%s-%s' % (g1['name'], g2['name']),
'product': '%s;%s' % (g1['product'], g2['product']),
'locusTag': '%s-%s' % (g1['locusTag'], g2['locusTag']),
'note': '%s;%s' % (g1['note'], g2['note']),
'description': '%s;%s' % (g1['description'], g2['description']),
'index': 0,
'barcodeIndeces': []
}
gene_pairs.append(gene_pair)
for gene_pair in gene_pairs:
Fitness.GENES.append(gene_pair)
Fitness.GENES.sort(key=lambda x: x['posFrom'], reverse=False)
@staticmethod
def loadGenes(genes_gff_fname):
del Fitness.GENES[:]
# First read all features that has "Parent" property and hash them
id2features = {}
with open(genes_gff_fname, 'r') as f:
for line in f:
if line.startswith('#'):
continue
vals = line.split('\t')
f_contig = vals[0]
f_pos_from = int(vals[3])
f_pos_to = int(vals[4])
f_strand = vals[6]
f_description = vals[8].strip()
f_parent = None
f_name = ""
f_product = ""
f_note = ""
f_pseudo = False
for dval in f_description.split(";"):
if dval.startswith("Parent="):
f_parent = dval[len("Parent="):].strip()
elif dval.startswith("ID="): # MOD
f_name = dval[len("ID="):].strip() # MOD
# elif dval.startswith("gene="):
# f_name = dval[len("gene="):].strip()
elif dval.startswith("product="):
f_product = dval[len("product="):].strip()
elif dval.startswith("Note="):
note = dval[len("Note="):].strip()
elif 'pseudo=true' in dval:
f_pseudo = True
# Set feature as its own parent if it has no parent
if f_name and not f_parent: # MOD
f_parent = f_name # MOD
if f_parent:
features = id2features.get(f_parent)
if not features:
features = []
id2features[f_parent] = features
features.append({
'gene_type': vals[2],
'gene_name': f_name,
'contig': f_contig,
'pos_from': f_pos_from,
'pos_to': f_pos_to,
'strand': f_strand,
'pseudo': f_pseudo,
'product': f_product,
'note': f_note,
'description': f_description
})
# Now read all "gene" features and collect of children
with open(genes_gff_fname, 'r') as f:
for line in f:
if line.startswith('#'):
continue
vals = line.split('\t')
if vals[2] == 'gene':
gene_contig = vals[0]
gene_pos_from = int(vals[3])
gene_pos_to = int(vals[4])
gene_strand = vals[6]
gene_description = vals[8].strip()
gene_locus_tag = None
gene_id = None
for term in vals[8].split(';'):
try:
(key, value) = term.split('=')
if key == 'locus_tag':
gene_locus_tag = value.strip()
elif key == 'ID':
gene_id = value.strip()
except ValueError:
continue
if not gene_id:
continue
features = id2features.get(gene_id)
if not features:
continue
# build features related to this gene and locations are correct
gene_features = []
for f in features:
if f['contig'] != gene_contig:
continue
if f['strand'] != gene_strand:
continue
if f['pos_from'] < gene_pos_from:
continue
if f['pos_to'] > gene_pos_to:
continue
gene_features.append(f)
if len(gene_features) == 0:
continue
# if there are more than one feature, check that the type of feature is the same
gene_types = {}
for f in gene_features:
gene_types[f['gene_type']] = 1
if len(gene_types) > 1:
raise ValueError(
"More than one gene type for a given gene: " + gene_id)
f = gene_features[0]
Fitness.GENES.append({
'contigId': f['contig'],
'geneType': f['gene_type'],
'posFrom': f['pos_from'],
'posTo': f['pos_to'],
'strand': f['strand'],
'name': f['gene_name'],
'product': f['product'],
'locusTag': gene_locus_tag,
'note': f['note'],
'description': f['description'],
'index': 0,
'barcodeIndeces': []
})
Fitness.GENES.sort(key=lambda x: x['posFrom'], reverse=False)
@staticmethod
def _loadGenes(genes_gff_fname):
del Fitness.GENES[:]
gene_found = False
gene_contig = None
gene_pos_from = None
gene_pos_to = None
gene_strand = None
gene_locus_tag = None
with open(genes_gff_fname, 'r') as f:
for line in f:
if line.startswith('#'):
continue
vals = line.split('\t')
gene_type = vals[2]
if gene_type == 'gene':
gene_contig = vals[0]
gene_pos_from = int(vals[3])
gene_pos_to = int(vals[4])
gene_strand = vals[6]
gene_found = True
gene_locus_tag = None
for term in vals[8].split(';'):
(key, value) = term.split('=')
if key == 'locus_tag':
gene_locus_tag = value.strip()
if gene_type == 'CDS' or 'rna' in gene_type.lower():
if gene_found:
f_contig = vals[0]
f_pos_from = int(vals[3])
f_pos_to = int(vals[4])
f_strand = vals[6]
f_description = vals[8].strip()
if f_contig == gene_contig and f_pos_from == gene_pos_from and f_pos_to == gene_pos_to and f_strand == gene_strand:
f_locus_tag = gene_locus_tag
f_name = ""
f_product = ""
f_note = ""
for dval in f_description.split(";"):
if dval.startswith("gene="):
f_name = dval[len("gene="):].strip()
elif dval.startswith("product="):
f_product = dval[len("product="):].strip()
elif dval.startswith("Note="):
f_note = dval[len("Note="):].strip()
Fitness.GENES.append({
'contigId': f_contig,
'geneType': gene_type,
'posFrom': f_pos_from,
'posTo': f_pos_to,
'strand': f_strand,
'name': f_name,
'product': f_product,
'locusTag': f_locus_tag,
'note': f_note,
'description': f_description,
'index': 0,
'barcodeIndeces': []
})
gene_found = False
@staticmethod
def associateGenesWithBarcodes():
for bIndex, bcode in enumerate(Fitness.BARCODE_COUNTS):
for gene in Fitness.GENES:
if bcode['contigId'] == gene['contigId']:
if bcode['posFrom'] <= gene['posFrom'] and bcode['posTo'] >= gene['posTo']:
gene['barcodeIndeces'].append(bIndex)
# Delete genes that do not have associated barcodes
@staticmethod
def cleanGENES():
len0 = len(Fitness.GENES)
delCount = 0
for i in range(len0)[::-1]:
gene = Fitness.GENES[i]
if len(gene['barcodeIndeces']) == 0:
del Fitness.GENES[i]
delCount += 1
#######################
# Core Builders
#######################
@staticmethod
def buildGENOME_SEGMENTS():
del Fitness.GENOME_SEGMENTS[:]
prevGene = None
geneIndeces = []
for gIndex, gene in enumerate(Fitness.GENES):
if prevGene is not None:
hasSameBarcode = False
for bIndex1 in prevGene['barcodeIndeces']:
for bIndex2 in gene['barcodeIndeces']:
if bIndex1 == bIndex2:
hasSameBarcode = True
break
if not hasSameBarcode:
# new segment
if len(geneIndeces) > 0:
Fitness.GENOME_SEGMENTS.append({
'geneIndeces': geneIndeces
})
geneIndeces = []
geneIndeces.append(gIndex)
prevGene = gene
# final check
if len(geneIndeces) > 0:
Fitness.GENOME_SEGMENTS.append({
'geneIndeces': geneIndeces
})
@staticmethod
def buildREF_TIME0(time0Indeces):
for bIndex in Fitness.BARCODE_INDICES:
row = Fitness.BARCODE_COUNTS[bIndex]
counts = row['counts']
total = 0
for t0Index in time0Indeces:
total += counts[t0Index]
Fitness.BARCODE_COUNTS[bIndex]['time0'] = total
# No needs to adjust by total...
@staticmethod
def build_fscores(sample, sampleT0):
scores = []
stotal = sum(sample)
stotalT0 = sum(sampleT0)
for index, t in enumerate(sampleT0):
s = sample[index]
score = (s + 1.0) / (t + 1.0) * stotalT0 / stotal
scores.append(score)
# normalize by median
scoreMedian = np.median(scores)
for index, val in enumerate(scores):
scores[index] = math.log(val * 1.0 / scoreMedian, 2)
return scores
@staticmethod
def genes_2_sparse_regression_matrix(fscores):
reg_g_indices = []
reg_f_indices = []
reg_fg_values = []
reg_fg_matrix = None
reg_f_scores = []
# define total list of barcode indices
b_indices_hash = {}
b_index_offsets = [-1] * len(fscores)
for gene in Fitness.GENES:
for b_index in gene['barcodeIndeces']:
b_indices_hash[str(b_index)] = b_index
for b_index in b_indices_hash.values():
b_index_offsets[b_index] = len(reg_f_scores)
for i in range(Fitness.BARCODE_REPLICATES[b_index]):
reg_f_scores.append(fscores[b_index])
# build matrix of presence/absence
for g_index, gene in enumerate(Fitness.GENES):
for b_index in gene['barcodeIndeces']:
b_index_offset = b_index_offsets[b_index]
for i in range(Fitness.BARCODE_REPLICATES[b_index]):
reg_g_indices.append(g_index)
reg_f_indices.append(b_index_offset + i)
reg_fg_values.append(1)
reg_fg_matrix = sparse.coo_matrix(
(reg_fg_values, (reg_f_indices, reg_g_indices)))
reg_g_indices = np.array(reg_g_indices)
reg_f_indices = np.array(reg_f_indices)
reg_f_scores = np.array(reg_f_scores)
return (reg_f_scores, reg_fg_matrix)
@staticmethod
def genes_2_deep_regression_matrix(gene_indices, fscores):
# fragments (barcodes) indices. It may have multiple copies of
# the same index (becuase of bootstrap)
reg_f_indices = []
# indexes of genes to be used in the regression
reg_g_indices = []
# array of fragment scores
reg_f_scores = []
# 2d array (fragments vs genes) of ones and zeros : 1 means that a given fragment
# covers a given gene completely
reg_fg_matrix = []
# define total list of barcode indices
b_indices_hash = {}
for g_index in gene_indices:
for b_index in Fitness.GENES[g_index]['barcodeIndeces']:
b_indices_hash[str(b_index)] = b_index
for b_index in b_indices_hash.values():
for i in range(Fitness.BARCODE_REPLICATES[b_index]):
reg_f_indices.append(b_index)
# build a subset of barcode (fragemtn) scores corresponding to bIndeces
for f_index in reg_f_indices:
reg_f_scores.append(fscores[f_index])
# build matrix of presence/absence
for g_index in gene_indices:
row = [0] * len(reg_f_indices)
row_fragment_count = 0
for i, b_index1 in enumerate(reg_f_indices):
for b_index2 in Fitness.GENES[g_index]['barcodeIndeces']:
if b_index1 == b_index2:
row[i] = 1
row_fragment_count += 1
if row_fragment_count > 0:
reg_fg_matrix.append(row)
reg_g_indices.append(g_index)
# convert to numpy array
reg_f_indices = np.array(reg_f_indices)
reg_g_indices = np.array(reg_g_indices)
reg_f_scores = np.array(reg_f_scores)
reg_fg_matrix = np.array(reg_fg_matrix)
reg_fg_matrix = reg_fg_matrix.T
return (reg_f_indices, reg_g_indices, reg_f_scores, reg_fg_matrix)
@staticmethod
def build_gscores(fscores, score_type):
# print('\t doing: ' + Fitness.SCORE_TYPE_NAMES[score_type])
gscores = [0] * len(Fitness.GENES)
if Fitness.SPARSE_REGRESSION_MATRIX[score_type]:
(reg_f_scores, reg_fg_matrix) = Fitness.genes_2_sparse_regression_matrix(fscores)
sample_n = len(reg_f_scores)
estimator = None
if score_type == Fitness.SCORE_TYPE_RIDGE:
alpha = Fitness.RIDGE_PARAM_ALPHA
estimator = Ridge(
alpha=alpha, fit_intercept=False, solver='lsqr')
elif score_type == Fitness.SCORE_TYPE_LASSO:
alpha = Fitness.LASSO_PARAM_ALPHA / sample_n / 2
estimator = Lasso(alpha=alpha, fit_intercept=False)
elif score_type == Fitness.SCORE_TYPE_ELASTIC_NET:
alpha = Fitness.ELASTIC_NET_PARAM_ALPHA / sample_n / 2
l1_ratio = Fitness.ELASTIC_NET_PARAM_L1_RATIO
estimator = ElasticNet(
alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False)
estimator.fit(reg_fg_matrix, reg_f_scores)
for i, gscore in enumerate(estimator.coef_):
gscores[i] = gscore
else:
for genome_segment in Fitness.GENOME_SEGMENTS:
gene_indices = list(genome_segment['geneIndeces'])
(reg_f_indices, reg_g_indices, reg_f_scores, reg_fg_matrix) = \
Fitness.genes_2_deep_regression_matrix(
gene_indices, fscores)
if reg_g_indices.shape[0] == 0 and reg_f_indices.shape[0] == 0:
continue
scores = []
if score_type == Fitness.SCORE_TYPE_MEAN:
f_counts = np.array([max(x, 1)
for x in np.sum(reg_fg_matrix, axis=0)])
scores = np.dot(reg_f_scores, reg_fg_matrix) / f_counts
elif score_type == Fitness.SCORE_TYPE_NNLS:
x = nnls(reg_fg_matrix, reg_f_scores)
scores = x[0]
elif score_type == Fitness.SCORE_TYPE_C_NNLS:
x = nnls(reg_fg_matrix, reg_f_scores)
scores_direct = x[0]
x = nnls(reg_fg_matrix, reg_f_scores * (-1))
scores_reverse = x[0]
for i, score_direct in enumerate(scores_direct):
score_reverse = -scores_reverse[i]
score = 0
if score_direct != 0 and score_reverse == 0:
score = score_direct
elif score_direct == 0 and score_reverse != 0:
score = score_reverse
scores.append(score)
for i, gscore in enumerate(scores):
g_index = reg_g_indices[i]
gscores[g_index] = gscore
return gscores
@staticmethod
def buildPoissonNoisedSample(sample):
pSample = []
for count in sample:
pcount = poisson.rvs(count, size=1)[0] if count > 0 else 0
pSample.append(pcount)
return pSample
@staticmethod
def bootstrapSampleReadCounts(sample):
bootSample = [0] * len(sample)
total = sum(sample)
indices = list(range(len(sample)))
probs = [0] * len(sample)
for i, val in enumerate(sample):
probs[i] = (val) * 1.0 / total
bIndeces = np.random.choice(indices, total, replace=True, p=probs)
for bIndex in bIndeces:
bootSample[bIndex] += 1
return bootSample
@staticmethod
def bootstrapBARCODE_INDICES():
barcodesNumber = len(Fitness.BARCODE_COUNTS)
indices = list(range(barcodesNumber))
probs = [0] * barcodesNumber
for i, val in enumerate(indices):
probs[i] = 1.0 / barcodesNumber
BARCODE_INDICES = np.random.choice(
indices, barcodesNumber, replace=True, p=probs)
for i in range(len(Fitness.BARCODE_REPLICATES)):
Fitness.BARCODE_REPLICATES[i] = 0
for bIndex in BARCODE_INDICES:
Fitness.BARCODE_REPLICATES[bIndex] += 1
@staticmethod
def build_noised_gscores(n_cycles, sample, score_type, do_bootstrap_indices, do_bootstrap_read_counts, do_poisson_noise, fl_noise_t0):
gene_scores = [None] * len(Fitness.GENES)
# init geneScores array
for i in range(len(gene_scores)):
gene_scores[i] = [0] * n_cycles
for cycle_index in range(n_cycles):
if cycle_index % 10 == 0:
print_prefix('\t%s: ' % cycle_index)
print_prefix('.')
if (cycle_index + 1) % 10 == 0:
print('')
sample_stress = sample
sample_t0 = Fitness.get_tzero_sample()
# bootstrap barcode indeces if needed
Fitness.updateBARCODE_INDICES()
if do_bootstrap_indices:
Fitness.bootstrapBARCODE_INDICES()
sample_t0 = Fitness.get_tzero_sample()
# bootstrap read counts if needed
if do_bootstrap_read_counts:
sample_stress = Fitness.bootstrapSampleReadCounts(
sample_stress)
if fl_noise_t0:
sample_t0 = Fitness.bootstrapSampleReadCounts(sample_t0)
# add Poisson noise if needed
if do_poisson_noise:
sample_stress = Fitness.buildPoissonNoisedSample(sample_stress)
if fl_noise_t0:
sample_t0 = Fitness.buildPoissonNoisedSample(sample_t0)
fscores = Fitness.build_fscores(sample_stress, sample_t0)
gscores = Fitness.build_gscores(fscores, score_type)
for g_index, gscore in enumerate(gscores):
gene_scores[g_index][cycle_index] = gscore
# sort scores
for gs in gene_scores:
gs.sort()
return gene_scores
# nCycles = 100
# Fitness.cleanGeneScores()
# sample = Fitness.get_sample(sampleIndex)
# fs = Fitness.build_fscores(sample, Fitness.get_tzero_sample())
# gs = Fitness.build_gscores(fs, scoreType)
# if doNoise:
# gscoresBI = Fitness.build_noised_gscores(nCycles, sample, doBootstrapIndeces=True,
# doBootstrapReadCounts=False, doPoissonNoise=False, flNoiseT0=False)
# gscoresPN = Fitness.build_noised_gscores(nCycles, sample, doBootstrapIndeces=False,
# doBootstrapReadCounts=False, doPoissonNoise=True, flNoiseT0=False)
# else:
# gscoresBI = [[0] * nCycles] * len(gs)
# gscoresPN = [[0] * nCycles] * len(gs)
# updateGENE_scores(gs, gscoresBI, gscoresPN, sample)
# buildStat()
@staticmethod
def build_gstat(sample_index, score_type):
N_CYCLES = 100
MIN_VAR_EFF_FCOUNT = 5
gstats = []
ss = Fitness.get_sample(sample_index)
ts = Fitness.get_tzero_sample()
fs = Fitness.build_fscores(ss, ts)
gs = Fitness.build_gscores(fs, score_type)
print('\tBootstrap fragments:')
gs_bootstrap = Fitness.build_noised_gscores(N_CYCLES, ss, score_type,
do_bootstrap_indices=True,
do_bootstrap_read_counts=False,
do_poisson_noise=False,
fl_noise_t0=False)
print('\tPoisson noise:')
gs_poisson = Fitness.build_noised_gscores(N_CYCLES, ss, score_type,
do_bootstrap_indices=False,
do_bootstrap_read_counts=False,
do_poisson_noise=True,
fl_noise_t0=False)
# Do first pass and collect basic stat
for i, gene in enumerate(Fitness.GENES):
gstat = {}
gstat['gene_name'] = gene['name']
gstat['locus_tag'] = gene['locusTag']
gstat['score_type'] = Fitness.SCORE_TYPE_NAMES[score_type]
gstat['score'] = gs[i]
gstat['gscore_bootstrap_avg'] = np.mean(gs_bootstrap[i])
gstat['gscore_bootstrap_var'] = np.var(gs_bootstrap[i])
gstat['gscore_poisson_avg'] = np.mean(gs_poisson[i])
gstat['gscore_poisson_var'] = np.var(gs_poisson[i])
gstat['gscore_var'] = max(
gstat['gscore_bootstrap_var'], gstat['gscore_poisson_var'])
gstat['fcount'] = len(gene['barcodeIndeces'])
gstat['gscore_var_moderated'] = np.nan
gstat['tscore'] = np.nan
gstat['pvalue'] = np.nan
gstat['qvalue'] = np.nan
gstat['set_qvalue'] = np.nan
# gstat['fread_counts'] = []
# for b_index in gene['barcodeIndeces']:
# gstat['fread_counts'].append(ss[b_index])
gstats.append(gstat)
# Effective variance - calculated only across "well-defined" genes: genes covered by > 5 fragments
# with score > 0
gscore_var_effs = []
for i, gstat in enumerate(gstats):
if gstat['fcount'] > MIN_VAR_EFF_FCOUNT and np.abs(gs[i]) > 0:
gscore_var_effs.append(gstat['gscore_var'])
gscore_var_eff = np.mean(gscore_var_effs)
# Do second pass and calculate tscore and pvalue
pvalues = []
pvalue_gis = []
for i, gstat in enumerate(gstats):
if gstat['fcount'] > 0:
gstat['gscore_var_moderated'] = (
(gstat['fcount'] - 1) * gstat['gscore_var'] + gscore_var_eff) / gstat['fcount']
gstat['tscore'] = gs[i] / \
np.sqrt(gstat['gscore_var_moderated'] +
Fitness.GSCORE_VARIENCE_ALPHA)
gstat['pvalue'] = stats.t.sf(
np.abs(gstat['tscore']), gstat['fcount'] - 1) * 2
if gstat['tscore'] != 0 and gstat['fcount'] > 1:
pvalues.append(gstat['pvalue'])
pvalue_gis.append(i)
# else:
# gstat['gscore_var_moderated'] = 0
# gstat['tscore'] = 0
# gstat['pvalue'] = 1
# Calculate qvalues
pvalues = np.array(pvalues)
qvals, pi0 = qvalues(pvalues, return_pi0=True)
for i, qvalue in enumerate(qvals):
g_index = pvalue_gis[i]
gstats[g_index]['qvalue'] = qvalue
return (gstats, gscore_var_eff, pi0)
@staticmethod
def calculate_set_qvalues(gstats_set):
pvalues = []
gstat_set_indices = []
gstat_gene_indices = []
for gstat_index, gstats in enumerate(gstats_set):
for gene_index, gstat in enumerate(gstats):
if gstat['tscore'] != 0 and gstat['fcount'] > 1:
pvalues.append(gstat['pvalue'])
gstat_set_indices.append(gstat_index)
gstat_gene_indices.append(gene_index)
pvalues = np.array(pvalues)
qvals, pi0 = qvalues(pvalues, return_pi0=True)
for i, qvalue in enumerate(qvals):
gstat_index = gstat_set_indices[i]
gene_index = gstat_gene_indices[i]
gstats_set[gstat_index][gene_index]['set_qvalue'] = qvalue
return (gstats_set, pi0)
|
python
|
# coding: utf8
import clinica.pipelines.engine as cpe
class T1VolumeParcellation(cpe.Pipeline):
"""T1VolumeParcellation - Computation of mean GM concentration for a set of regions.
Returns:
A clinica pipeline object containing the T1VolumeParcellation pipeline.
"""
def check_custom_dependencies(self):
"""Check dependencies that can not be listed in the `info.json` file."""
def check_pipeline_parameters(self):
"""Check pipeline parameters."""
from clinica.utils.atlas import T1_VOLUME_ATLASES
from clinica.utils.group import check_group_label
self.parameters.setdefault("group_label", None)
check_group_label(self.parameters["group_label"])
self.parameters.setdefault("atlases", T1_VOLUME_ATLASES)
def get_input_fields(self):
"""Specify the list of possible inputs of this pipeline.
Returns:
A list of (string) input fields name.
"""
return ["file_list", "atlas_list"]
def get_output_fields(self):
"""Specify the list of possible outputs of this pipeline.
Returns:
A list of (string) output fields name.
"""
def build_input_node(self):
"""Build and connect an input node to the pipeline."""
import os
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from clinica.utils.exceptions import ClinicaCAPSError, ClinicaException
from clinica.utils.input_files import t1_volume_template_tpm_in_mni
from clinica.utils.inputs import clinica_file_reader
from clinica.utils.stream import cprint
from clinica.utils.ux import (
print_groups_in_caps_directory,
print_images_to_process,
)
# Check that group already exists
if not os.path.exists(
os.path.join(
self.caps_directory, "groups", f"group-{self.parameters['group_label']}"
)
):
print_groups_in_caps_directory(self.caps_directory)
raise ClinicaException(
f"Group {self.parameters['group_label']} does not exist. "
"Did you run t1-volume or t1-volume-create-dartel pipeline?"
)
try:
gm_mni = clinica_file_reader(
self.subjects,
self.sessions,
self.caps_directory,
t1_volume_template_tpm_in_mni(self.parameters["group_label"], 1, True),
)
except ClinicaException as e:
final_error_str = "Clinica faced error(s) while trying to read files in your CAPS directory.\n"
final_error_str += str(e)
raise ClinicaCAPSError(final_error_str)
read_parameters_node = npe.Node(
name="LoadingCLIArguments",
interface=nutil.IdentityInterface(
fields=self.get_input_fields(), mandatory_inputs=True
),
)
read_parameters_node.inputs.file_list = gm_mni
read_parameters_node.inputs.atlas_list = self.parameters["atlases"]
if len(self.subjects):
print_images_to_process(self.subjects, self.sessions)
cprint("The pipeline will last a few seconds per image.")
self.connect(
[
(read_parameters_node, self.input_node, [("file_list", "file_list")]),
(read_parameters_node, self.input_node, [("atlas_list", "atlas_list")]),
]
)
def build_output_node(self):
"""Build and connect an output node to the pipeline."""
def build_core_nodes(self):
"""Build and connect the core nodes of the pipeline."""
import nipype.interfaces.io as nio
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from ..t1_volume_parcellation import (
t1_volume_parcellation_utils as parcellation_utils,
)
atlas_stats_node = npe.MapNode(
nutil.Function(
input_names=["in_image", "atlas_list"],
output_names=["atlas_statistics"],
function=parcellation_utils.atlas_statistics,
),
name="atlas_stats_node",
iterfield=["in_image"],
)
outputnode = npe.Node(
nutil.IdentityInterface(fields=["atlas_statistics"]),
name="outputnode",
mandatory_inputs=True,
)
datasink = npe.Node(nio.DataSink(), name="datasink")
datasink.inputs.base_directory = self.caps_directory
datasink.inputs.parameterization = True
datasink.inputs.regexp_substitutions = [
(
r"(.*)(atlas_statistics)/.*/(sub-(.*)_ses-(.*)_T1.*)$",
r"\1/subjects/sub-\4/ses-\5/t1/spm/dartel/group-"
+ self.parameters["group_label"]
+ r"/\2/\3",
)
]
# Connection
# ==========
# fmt: off
self.connect(
[
(self.input_node, atlas_stats_node, [("file_list", "in_image")]),
(self.input_node, atlas_stats_node, [("atlas_list", "atlas_list")]),
(atlas_stats_node, outputnode, [("atlas_statistics", "atlas_statistics")]),
(outputnode, datasink, [("atlas_statistics", "atlas_statistics")]),
]
)
# fmt: on
|
python
|
import os
import unittest
from tempfile import TemporaryDirectory
import numpy as np
from l5kit.visualization import write_video
class TestVideoVisualizationHelpers(unittest.TestCase):
def test_write_video(self) -> None:
# Just a smoke test
images = (np.random.rand(5, 512, 512, 3) * 255).astype(np.uint8)
with TemporaryDirectory() as d:
video_filepath = os.path.join(d, "test_video.mp4")
write_video(video_filepath, images, (512, 512))
self.assertTrue(os.path.isfile(video_filepath))
def test_write_video_with_resize(self) -> None:
# Just a smoke test
images = (np.random.rand(5, 256, 256, 3) * 255).astype(np.uint8)
with TemporaryDirectory() as d:
video_filepath = os.path.join(d, "test_video.mp4")
write_video(video_filepath, images, (512, 512))
self.assertTrue(os.path.isfile(video_filepath))
def test_write_video_bw(self) -> None:
# Just a smoke test
images = (np.random.rand(5, 512, 512) * 255).astype(np.uint8)
with TemporaryDirectory() as d:
video_filepath = os.path.join(d, "test_video.mp4")
write_video(video_filepath, images, (512, 512))
self.assertTrue(os.path.isfile(video_filepath))
|
python
|
"""This module contains implementations of 'array list' data structure"""
from typing import TypeVar, Generic, List, Callable
Item = TypeVar('Item')
class ArrayList(Generic[Item]):
def __init__(self, size: int, realloc_coeff: int, potential_formula: Callable[[int, int], int]):
if size < 0:
raise ValueError('n must be positive')
self.__initial_size = size
self.__realloc_coeff = realloc_coeff
self.__index = -1
self.__data: List[Item] = [None for _ in range(size)]
self.__potential_formula = potential_formula
self.__append_actual_cost = 1
def __getitem__(self, key: int) -> Item:
return self.__data[key]
def __occupied_cells(self) -> List[Item]:
return [item for item in self.__data if item is not None]
def __potential(self) -> int:
return self.__potential_formula(len(self.__occupied_cells()), len(self.__data))
def potential(self) -> int:
return self.__potential()
def append(self, item: Item) -> int:
self.__index += 1
potential_before = self.__potential()
delta_n = 0
if self.__index == self.__initial_size:
self.__initial_size *= self.__realloc_coeff
delta_n = self.__initial_size - len(self.__data)
self.__data = self.__data[:] + [None for _ in range(delta_n)]
self.__data[self.__index] = item
return self.__append_actual_cost + delta_n + (self.__potential() - potential_before)
def __len__(self) -> int:
return len(self.__data)
|
python
|
"""
pip install selenium
"""
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win6; x644) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36"
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
CHROME_DRIVER_PATH = ROOT_DIR + "chromedriver.exe"
START_URL = "https://images.google.com"
class Bot(object):
def __init__(self):
self.options = webdriver.ChromeOptions()
# self.options.add_argument(f'--proxy-server={Storage.get_random_proxy()}')
self.options.add_argument("--window-size=1920, 1080")
self.options.add_argument(f'user-agent={USER_AGENT}')
self.options.add_experimental_option("excludeSwitches", ["enable-automation"])
self.options.add_experimental_option('useAutomationExtension', False)
self.options.add_argument("--disable-blink-features")
self.options.add_argument("--disable-blink-features=AutomationControlled")
self.driver = webdriver.Chrome(executable_path=CHROME_DRIVER_PATH, options=self.options)
self.w = WebDriverWait(self.driver, 20)
self.short_w = WebDriverWait(self.driver, 5)
self.long_w = WebDriverWait(self.driver, 35)
self.driver.get(START_URL)
self.get_data()
def get_data(self):
articles = self.w.until(EC.visibility_of_all_elements_located((By.XPATH, "//article")))
next_button = self.w.until(EC.element_to_be_clickable((By.XPATH, "//a[@aria-label='Next Page']")))
next_button.click()
|
python
|
import time
import numpy as np
class Learner(object):
'''
Define interface of methods that must be implemented by all inhereting classes.
'''
def __init__(self, discount_factor=1, learning_rate=1):
self._values = {}
self._prev_values = {}
self._discount_factor = discount_factor
self._learning_rate = learning_rate
self._learning_discount = learning_rate
self._curr_episode = 0
self._last_state = None
self._all_states = set()
self._all_actions = set()
def _set_value(self, state, action, val):
''' Helper method to override the value of specific <state, action> '''
self._all_states.add(state)
self._all_actions.add(action)
self._values[(state, action)] = val
def _update_value(self, state, action, val):
'''
Helper method to add/subtract value estimated for specific <state, action> using the
current learning rate
'''
self._all_states.add(state)
self._all_actions.add(action)
self._values[(state, action)] = val * self._learning_rate + \
(1.0 - self._learning_rate) * self._values.get((state, action), 0)
def _copy_values(self):
return {k:v for k, v in self._values.items()}
def get_states(self):
return set(self._all_states)
def get_actions(self):
return set(self._all_actions)
def val(self, state, action):
'''
Retrieves the estimated value of taking an `action` from this `state`, or zero if this
<state, action> has no learned value.
Parameters
----------
state : int
Integer used to identify a unique state.
action : int
Integer used to identify a unique action.
Returns
-------
val : float
Estimated value of the `state` reached after taking `action`.
'''
return self._values.get((state, action), 0)
def init_episode(self, init_state=None):
'''
Called after a terminal state is reached and a new episode is started. This method is
automatically called when the first observation is recorded using `fit()` or an observation
is recorded with a state value of `None`.
Examples
--------
>>> learner = Learner()
>>>
>>> # Fitting an entire episode at once will automatically call `init_episode()`:
>>> learner.fit([(0, 0, 0), (1, 0, 0.1), (2, 0, 0.5), (3, 0, -1)])
>>>
>>> # The prior line is equivalent to:
>>> learner.init_episode()
>>> learner.fit([(0, 0, 0), (2, 0, 0.3), (3, 0, -1)])
>>>
>>> # A new episode can alternatively be initialized by passing a state with value `None`:
>>> learner.fit((None, 0, 0))
>>>
>>> # This also works for passing multiple episodes as a single iterable:
>>> learner.fit([
... (0, 0, 0), (1, 0, 0.1), (2, 0, 0.5), (3, 0, -1),
... (None, 0, 0),
... (0, 0, 0), (2, 0, 0.3), (3, 0, -1),
... ])
'''
self._curr_episode += 1
self._learning_rate = self._learning_discount ** self._curr_episode
self._last_state = init_state
def fit(self, X): # pylint: disable=invalid-name
'''
Fit the learner with the provided data in the form of <state, action, reward>. The reward
and action for the first state in an episode are always ignored.
Parameters
----------
X : Tuple of <state, action, reward> or array-like of <state, action, reward>, where
`state` is an integer representing the current state (that the agent just landed on),
`action` is an integer representing the action taken by the agent to migrate from the
previews state to `state`, and reward is a number (int or float type) representing the
reward given for reaching the `state` after taking `action`.
Examples
--------
>>> learner1 = Learner()
>>>
>>> # Fitting an entire episode at once:
>>> learner1.fit([(0, 0, 0), (1, 0, 0.1), (2, 0, 0.5), (3, 0, -1)])
>>>
>>> # Equivalent to fitting each transition within the episode separately:
>>> learner2 = Learner()
>>> learner2.fit((0, 0, 0))
>>> learner2.fit((1, 0, 0.1))
>>> learner2.fit((2, 0, 0.5))
>>> learner2.fit((3, 0, -1))
>>>
>>> all([learner1.val(i, 0) == learner2.val(i, 0) for i in range(4)])
... True
'''
if not hasattr(X, '__iter__'):
raise ValueError('Parameter must be tuple of <state, reward> or iterable of tuples'
'of <state, reward>')
elif all((hasattr(tup, '__iter__') and len(tup) == 3 for tup in X)):
self.init_episode()
else:
if self._last_state is None:
self.init_episode()
X = [X]
for (state, action, reward) in X:
self._all_states.add(state)
self._all_actions.add(action)
if self._last_state is not None:
self._learn_incr(self._last_state, action, reward, state)
self._last_state = state
def _learn_incr(self, prev_state, action, reward, curr_state): # pylint: disable=unused-argument
''' Incrementally update the value estimates after observing a transition between states '''
self._update_value(prev_state, action, reward * self._learning_rate)
def _value_iteration(self, T, R, atol=1E-3, max_iter=1000, max_time=0): # pylint: disable=too-many-arguments, invalid-name
'''
Given transition matrix T and reward matrix R, compute value of <state, action> vectors
using value iteration algorithm.
'''
n_states = max(self.get_states()) + 1
n_actions = max(self.get_actions()) + 1
curr_values = np.zeros((n_actions, n_states))
# Iterate and increase state values by the discounted adjacent state values
stopwatch = time.time() + max_time
prev_values = curr_values.copy()
for i in range(max_iter): # pylint: disable=unused-variable
for action in range(n_actions):
for state1 in range(n_states):
curr_values[action, state1] = (T[action, state1] * R[action, state1]).sum()
for state2 in range(n_states):
curr_values[action, state1] += T[action, state1, state2] * \
self._discount_factor * prev_values[action, state2]
if ((prev_values - curr_values) ** 2).mean() < atol or \
(max_time > 0 and stopwatch < time.time()):
break
prev_values = curr_values.copy()
return curr_values
def converge(self, atol=1E-5, max_iter=1000, max_time=0):
''' Train over already fitted data over and over until convergence '''
raise NotImplementedError(
'Classes inhereting from Learner must override Learner.converge()')
|
python
|
MOD = 1_000_000_007
L = [0, 2, 8]
n = int(input())
for i in range(3, n+1):
L.append((L[-1]*3 + 2)%MOD)
print(L[n]%MOD)
|
python
|
### mb-soil-moisture.py v1.2
### Show soil moisture on micro:bit display using resistive and capacitive sensors
### Tested with BBC micro:bit v1.5 and MicroPython v1.9.2-34-gd64154c73
### on a Cytron Edu:bit
### MIT License
### Copyright (c) 2021 Kevin J. Walters
### Permission is hereby granted, free of charge, to any person obtaining a copy
### of this software and associated documentation files (the "Software"), to deal
### in the Software without restriction, including without limitation the rights
### to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
### copies of the Software, and to permit persons to whom the Software is
### furnished to do so, subject to the following conditions:
### The above copyright notice and this permission notice shall be included in all
### copies or substantial portions of the Software.
### THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
### IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
### FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
### AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
### LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
### OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
### SOFTWARE.
### This is a micro:bit version of
### https://github.com/kevinjwalters/circuitpython-examples/blob/master/pico/soil-moisture.py
### featured in
### https://www.instructables.com/Soil-Moisture-Sensing-With-the-Maker-Pi-Pico/
import utime
from microbit import display, pin0, pin1, pin13, pin16, sleep
import neopixel
### Detach Music Bit and Sound Bit from P0 and P1 if using Edu:bit and
### Traffic Light Bit to allow brief powering of the resistive sensor
SOIL_RES_SIG_PIN = pin0
SOIL_CAP_SIG_PIN = pin1
SOIL_RES_PWR_PIN = pin16
RES_SETTLE_TIME_MS = 50 ### 50ms
NEOPIXEL_PIN = pin13
### Values for (dry, wet) based on values from CircuitPython version
RES_RANGE = (1023, 156)
CAP_RANGE = (593, 343)
BLACK = (0, 0, 0) ### black (off)
GOOD_COLOUR = (0, 30, 0) ### green
DRY_COLOUR = (45, 18, 0) ### orange
TOODRY_COLOUR = (60, 0, 0) ### red (used for flashing too)
TOOWET_COLOUR = (40, 0, 40) ### magneta
### The BBC micro:bit 5x5 LED display can show ten
### brightness levels in MicroPython 0-9
### (The underlying DAL supports a wider range 0-255)
MAX_BRIGHTNESS = 9
### The Edu:bit's RGB Bit
NUM_PIXELS = 4
pixels = neopixel.NeoPixel(pin13, NUM_PIXELS)
def fill_pixels(new_colour):
"""Set all the RGB pixels to new_colour."""
for idx in range(NUM_PIXELS):
pixels[idx] = new_colour
pixels.show()
def bar_chart(value,
columns=(0, 1, 2, 3, 4),
*, max_value=100):
"""Draw a vertical bar chart based on percentage value
using variable brightness levels on display."""
### Nine brightness levels on 5x5 LED matrix equates
### to 45 pixel steps - start at bottow row (no 4) and
### light pixels until value px_steps value is reached
px_steps = round(value * 45 / max_value)
for y in range(4, -1, -1):
for x in columns:
### The min/max here limit values from 0 to 9
display.set_pixel(x, y,
min(MAX_BRIGHTNESS, max(0, px_steps)))
px_steps -= MAX_BRIGHTNESS
def moisture_to_color(percents):
"""Take multiple values and returns RGB colour and flashing boolean.
The smallest percentage is used for dryness colour.
"""
p_colour = GOOD_COLOUR
p_flash = False
for percent in percents:
if percent <= 35:
p_colour = TOODRY_COLOUR
elif percent <= 45 and p_colour != TOODRY_COLOUR:
p_colour = DRY_COLOUR
elif percent >= 80:
p_colour = TOOWET_COLOUR
if percent <= 20:
p_flash = True
return (p_colour, p_flash)
def adc_to_moisture(raw_adc, arid_value, sodden_value):
"""Convert a micro:bit 0-1024 ADC value into a moisture percentage
using crude linear model."""
a_lower = min(arid_value, sodden_value)
a_range = abs(sodden_value - arid_value)
inverted = arid_value > sodden_value
fraction = (raw_adc - a_lower) / a_range
if inverted:
fraction = 1.0 - fraction
return min(100.0, max(0.0, fraction * 100.0))
def get_res_moisture(power_pin=True):
if power_pin:
SOIL_RES_PWR_PIN.write_digital(1)
sleep(RES_SETTLE_TIME_MS)
res_adc = adc_to_moisture(SOIL_RES_SIG_PIN.read_analog(), *RES_RANGE)
if power_pin:
SOIL_RES_PWR_PIN.write_digital(0)
return res_adc
def get_cap_moisture():
return adc_to_moisture(SOIL_CAP_SIG_PIN.read_analog(), *CAP_RANGE)
flash_toggle = False
scroll_delay=250 ### default is 150ms
while True:
### Read both values and display them slowly
res_perc = get_res_moisture()
cap_perc = get_cap_moisture()
display.scroll("R " + str(round(res_perc)),
delay=scroll_delay)
sleep(2000)
display.scroll("C " + str(round(cap_perc)),
delay=scroll_delay)
sleep(2000)
### Now show both values on bar charts for 20 seconds
### but only read fresh capacitive values
start_time_ms = utime.ticks_ms()
while True:
cap_perc = get_cap_moisture()
### Note extra brackets, one tuple parameter is passed
colour, flash = moisture_to_color((res_perc, cap_perc))
bar_chart(res_perc, (0, 1)) ### Left two columns
bar_chart(cap_perc, (3, 4)) ### Right two columns
if flash:
if flash_toggle:
fill_pixels(colour)
else:
fill_pixels(BLACK)
flash_toggle = not flash_toggle
else:
fill_pixels(colour)
now_time_ms = utime.ticks_ms()
if utime.ticks_diff(now_time_ms, start_time_ms) > 20000:
break
sleep(500)
|
python
|
# -*- coding: utf-8 -*-
import json
def save_json(file_path, data):
"""
Load data to json file.
>>> save_json("./JsonAccessorDoctest.json", "{"Fuji": {"Id": 30, "server": ["JP", "CN"]}}") # doctest: +SKIP
"""
with open(file_path, 'w', encoding='utf8') as outfile:
json.dump(data, outfile, indent=4, separators=(',', ': '), sort_keys=True, ensure_ascii=False)
def load_json(file_path):
"""
Load data from json file.
>>> result = load_json("./JsonAccessorDoctest.json")
>>> result
{'Fuji': {'Id': 30, 'server': ['JP', 'CN']}}
>>> len(result['Fuji']['server'])
2
"""
with open(file_path, 'r', encoding='utf8') as json_data:
return json.loads(json_data.read())
if __name__ == "__main__":
import doctest
doctest.testmod(report=True)
print("Complete doctest.")
|
python
|
"""
This lambda compares new batches to previous batches to detect
which records are missing from the new one. These indicate that
a membership has lapsed.
"""
import datetime
import json
import os
import boto3
from actionnetwork_activist_sync.actionnetwork import ActionNetwork
from actionnetwork_activist_sync.logging import get_logger
from actionnetwork_activist_sync.state_model import State
logger = get_logger('lambda_lapsed')
dry_run = os.environ.get('DRY_RUN') != '0'
dynamodb_client = boto3.client('dynamodb')
secrets_client = boto3.client('secretsmanager')
api_key = os.environ['ACTIONNETWORK_API_KEY']
if api_key.startswith('arn'):
secret = secrets_client.get_secret_value(SecretId=api_key)
secret_dict = json.loads(secret['SecretString'])
api_key = secret_dict['ACTIONNETWORK_API_KEY']
logger.debug('Using API key from Secrets Manager')
else:
logger.debug('Using API key from Env')
if os.environ.get('ENVIRONMENT') == 'local':
import localstack_client.session
dynamodb_client = localstack_client.session.Session().client('dynamodb')
cur_batch = datetime.date.today().strftime('%Y%U')
last_week = datetime.date.today() - datetime.timedelta(weeks=1)
prev_batch = last_week.strftime('%Y%U')
def lambda_handler(event, context):
"""
This lambda is intended to get triggered on a schdule via CloudWatch.
"""
removed = 0
cur_count = State.count(hash_key=cur_batch)
prev_count = State.count(hash_key=prev_batch)
cur_items = State.query(hash_key=cur_batch, filter_condition=State.status == State.PROCESSED)
logger.info(
'Loaded current items.',
extra={'cur_batch': cur_batch, 'num_items': cur_count})
prev_items = State.query(hash_key=prev_batch, filter_condition=State.status == State.PROCESSED)
logger.info(
'Loaded previous items.',
extra={'prev_batch': prev_batch, 'num_items': prev_count})
cur_emails = [c.email for c in cur_items]
prev_emails = [p.email for p in prev_items]
if cur_count == 0 or len(cur_emails) == 0:
errMsg = 'No current batch, something is probably wrong. Aborting.'
logger.error(errMsg)
raise RuntimeError(errMsg)
if prev_count == 0 or len(prev_emails) == 0:
errMsg = 'No previous batch. If this is not the first week, then something is probably wrong. Aborting.'
logger.error(errMsg)
raise RuntimeError(errMsg)
logger.info(
'Checking previous email list against current',
extra={
'cur_email_count': len(cur_emails),
'prev_email_count': len(prev_emails)
}
)
action_network = get_actionnetwork(api_key)
for prev_email in prev_emails:
if prev_email not in cur_emails:
logger.info(
'Turing is_member off for lapsed member',
extra={'email': prev_email}
)
if not dry_run:
action_network.remove_member_by_email(prev_email)
removed += 1
logger.info(
'Finished removing lapsed members.',
extra={
'removed': removed,
'cur_count': cur_count,
'prev_count': prev_count
})
return (removed, cur_count, prev_count)
def get_actionnetwork(api_k):
"""Creates an ActionNetwork object.
This function is a helper for mocking in tests"""
return ActionNetwork(api_k)
|
python
|
"""
vp_overlap.py
Do calculations for overlap type functionals
"""
import numpy as np
from scipy.special import erf
def vp_overlap(self):
const = 2
#Calculate overlap
self.E.S = self.grid.integrate((np.sum(self.na_frac, axis=1) * np.sum(self.nb_frac, axis=1))**(0.5))
self.E.F = erf( const * self.E.S)
if not self.ens:
iksa = [self.KSa]
iksb = [self.KSb]
else:
iksa = [self.KSa, self.KSA]
iksb = [self.KSb, self.KSB]
for KSa in iksa:
#Functional derivative of the overlap
KSa.V.dSdn = KSa.scale * 0.5 * (self.nb_frac / self.na_frac)**0.5
if self.optPartition is True:
KSa.V.dSdn = np.repeat(KSa.V.dSdn, 2, axis=1)
#Remove any Nans
KSa.V.dSdn[ np.isinf(KSa.V.dSdn) ] = 0.0
KSa.V.dSdn[ np.isnan(KSa.V.dSdn) ] = 0.0
KSa.V.dFdn = 2 * np.pi**(-0.5) * np.exp( -(const * self.E.S)**2 ) * const * KSa.V.dSdn
for KSb in iksb:
#Functional derivative of the overlap
KSb.V.dSdn = KSb.scale * 0.5 * (self.na_frac / self.nb_frac)**0.5
if self.optPartition is True:
KSb.V.dSdn = np.repeat(KSb.V.dSdn, 2, axis=1)
#Remove any Nans
KSb.V.dSdn[ np.isinf(KSb.V.dSdn) ] = 0.0
KSb.V.dSdn[ np.isnan(KSb.V.dSdn) ] = 0.0
KSb.V.dFdn = 2 * np.pi**(-0.5) * np.exp( -(const * self.E.S)**2 ) * const * KSb.V.dSdn
|
python
|
from datetime import datetime
def voto(ano):
global idade
idade = datetime.now().year - ano
if idade < 16:
return 'NEGADO'
elif idade < 18:
return 'OPCIONAL'
else:
return 'OBRIGATORIO'
idade = 0
tip = voto(int(input('Em que ano vc nasceu?: ')))
print(f'com {idade} anos, O seu voto é {tip}')
|
python
|
# -*- coding: utf-8 -*-
import base64
import urlparse
import urllib
import hashlib
import re
from resources.lib.modules import client
from resources.lib.modules import directstream
from resources.lib.modules import trakt
from resources.lib.modules import pyaes
RES_4K = ['4k', 'hd4k', 'hd4k ', '4khd', '4khd ', 'uhd', 'ultrahd', 'ultra hd', 'ultra high', '2160', '2160p', '2160i', 'hd2160', '2160hd',
'2160 ', '2160p ', '2160i ', 'hd2160 ', '2160hd ', '1716p', '1716i', 'hd1716', '1716hd', '1716p ', '1716i ', 'hd1716 ',
'1716hd ', '2664p', '2664i', 'hd2664', '2664hd', '2664p ', '2664i ', 'hd2664 ', '2664hd ', '3112p', '3112i', 'hd3112',
'3112hd', '3112p ', '3112i ', 'hd3112 ', '3112hd ', '2880p', '2880i', 'hd2880', '2880hd', '2880p ', '2880i ', 'hd2880 ',
'2880hd ']
RES_2K = ['2k', 'hd2k', 'hd2k ', '2khd', '2khd ', '2048p', '2048i', 'hd2048', '2048hd', '2048p ', '2048i ', 'hd2048 ', '2048hd ',
'1332p', '1332i', 'hd1332', '1332hd', '1332p ', '1332i ', 'hd1332 ', '1332hd ', '1556p', '1556i', 'hd1556', '1556hd',
'1556p ', '1556i ', 'hd1556 ', '1556hd ', ]
RES_1080 = ['1080', '1080p', '1080i', 'hd1080', '1080hd', '1080 ', '1080p ', '1080i ', 'hd1080 ', '1080hd ', '1200p', '1200i', 'hd1200',
'1200hd', '1200p ', '1200i ', 'hd1200 ', '1200hd ']
RES_HD = ['720', '720p', '720i', 'hd720', '720hd', 'hd', '720 ', '720p ', '720i ', 'hd720 ', '720hd ']
RES_SD = ['576', '576p', '576i', 'sd576', '576sd', '576 ', '576p ', '576i ', 'sd576 ', '576sd ', '480', '480p', '480i', 'sd480', '480sd',
'480 ', '480p ', '480i ', 'sd480 ', '480sd ', '360', '360p', '360i', 'sd360', '360sd', '360 ', '360p ', '360i ', 'sd360 ', '360sd ',
'240', '240p', '240i', 'sd240', '240sd', '240 ', '240p ', '240i ', 'sd240 ', '240sd ']
SCR = ['dvdscr', 'screener', 'scr', 'r5', 'r6', 'dvdscr ', 'r5 ', 'r6 ']
CAM = ['camrip', 'cam rip', 'tsrip', 'ts rip', 'hdcam', 'hd cam', 'hdts', 'hd ts', 'dvdcam', 'dvd cam', 'dvdts', 'dvd ts', 'cam', 'telesync',
'tele sync', 'ts', 'camrip ', 'tsrip ', 'hcam', 'hdts ', 'dvdcam ', 'dvdts ', 'telesync ']
CODEC_H265 = ['hevc', 'h265', 'h.265', 'x265', 'x.265 ', '265 ']
CODEC_H264 = ['avc', 'h264', 'h.264', 'x264', 'x.264', '264 ']
CODEC_XVID = ['xvid', 'xvid ']
CODEC_DIVX = ['divx', 'divx ', 'div2', 'div2 ', 'div3', 'div3 ']
CODEC_MPEG = ['mp4', 'mpeg', 'm4v', 'mpg', 'mpg1', 'mpg2', 'mpg3', 'mpg4', 'mp4 ', 'mpeg ', 'msmpeg', 'msmpeg4', 'mpegurl',
'm4v ', 'mpg ', 'mpg1 ', 'mpg2 ', 'mpg3 ', 'mpg4 ', 'msmpeg ', 'msmpeg4 ']
CODEC_AVI = ['avi']
CODEC_MKV = ['mkv', 'mkv ', '.mkv', 'matroska', 'matroska ']
AUDIO_8CH = ['ch8', '8ch', 'ch7', '7ch', '7 1', 'ch7 1', '7 1ch', 'ch8 ', '8ch ', 'ch7 ', '7ch ', '.ddp']
AUDIO_6CH = ['ch6', '6ch', 'ch6', '6ch', '6 1', 'ch6 1', '6 1ch', '5 1', 'ch5 1', '5 1ch', '5.1.', 'ch6 ', '6ch ', 'ch6 ', '6ch ']
AUDIO_2CH = ['ch2', '2ch', 'stereo', 'dualaudio', 'dual-audio', 'dual', '2 0', 'ch2 0', '2 0ch', 'ch2 ', '2ch ', 'stereo ', 'dual audio', 'dual ']
AUDIO_1CH = ['ch1', '1ch', 'mono', 'monoaudio', 'ch1 0', '1 0ch', 'ch1 ', '1ch ', 'mono ']
VIDEO_3D = ['3d', 'sbs', 'hsbs', 'sidebyside', 'side by side', 'stereoscopic', 'tab', 'htab', 'topandbottom', 'top and bottom']
def is_anime(content, type, type_id):
try:
r = trakt.getGenre(content, type, type_id)
return 'anime' in r or 'animation' in r
except:
return False
def get_release_quality(release_name, release_link=None):
if release_name is None:
return
try:
release_name = release_name.encode('utf-8')
except:
pass
try:
quality = None
release_name = release_name.upper()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', release_name)
fmt = re.split('\.|\(|\)|\[|\]|\s|-', fmt)
fmt = [i.lower() for i in fmt]
if any(value in fmt for value in RES_4K):
quality = "4K"
elif any(value in fmt for value in RES_1080):
quality = "1080p"
elif any(value in fmt for value in RES_HD):
quality = "720p"
elif any(value in fmt for value in RES_SD):
quality = "480p"
elif any(value in fmt for value in SCR):
quality = 'SCR'
elif any(value in fmt for value in CAM):
quality = 'CAM'
if not quality:
if release_link:
release_link = release_link.lower()
try:
release_link = release_link.encode('utf-8')
except:
pass
if any(value in release_link for value in RES_4K):
quality = "4K"
elif any(value in release_link for value in RES_1080):
quality = "1080p"
elif any(value in release_link for value in RES_HD):
quality = "720p"
elif any(value in release_link for value in RES_SD):
quality = "480p"
elif any(value in release_link for value in SCR):
quality = 'SCR'
elif any(value in release_link for value in CAM):
quality = 'CAM'
else:
quality = 'SD'
info = []
if any(value in release_name for value in VIDEO_3D):
info.append('3D')
if any(value in fmt for value in CODEC_H265):
info.append('HEVC')
return quality, info
except:
return 'SD', []
def getFileType(url):
try:
url = url.lower()
except:
url = str(url)
type = ''
if any(value in url for value in ['bluray', 'blu-ray']):
type += ' BLURAY /'
if any(value in url for value in ['.web-dl', '.webdl']):
type += ' WEB-DL /'
if any(value in url for value in ['hdrip', 'hd-rip']):
type += ' HDRip /'
if any(value in url for value in ['bd-r', 'bd.r', 'bdr', 'bd-rip', 'bd.rip', 'bdrip']):
type += ' BD-R /'
if any(value in url for value in ['.dd5.1', 'dolby-digital', 'dolby.digital']):
type += ' DOLBYDIGITAL /'
if any(value in url for value in ['.ddex', 'dolby-ex', 'dd-ex']):
type += ' DOLBYDIGITAL-EX /'
if any(value in url for value in ['dolby-digital-plus', 'dolby.digital.plus']):
type += ' DOLBYDIGITAL-Plus /'
if any(value in url for value in ['truehd', '.ddhd']):
type += ' DOLBY-TRUEHD /'
if 'atmos' in url:
type += ' DOLBY-ATMOS /'
if '.dts.' in url:
type += ' DTS /'
if any(value in url for value in ['dts-hd', 'dtshd']):
type += ' DTS-HD /'
if any(value in url for value in ['dts-es', 'dtses']):
type += ' DTS-ES /'
if any(value in url for value in ['dts-neo', 'dtsneo']):
type += ' DTS-NEO /'
if '.thx.' in url:
type += ' THX /'
if any(value in url for value in ['.thx-ex', 'thxex']):
type += ' THX-EX /'
if any(value in url for value in AUDIO_8CH):
type += ' 8CH /'
if any(value in url for value in AUDIO_6CH):
type += ' 6CH /'
if 'xvid' in url:
type += ' XVID /'
if 'divx' in url:
type += ' DIVX /'
if any(value in url for value in CODEC_MPEG):
type += ' MPEG /'
if '.avi' in url:
type += ' AVI /'
if 'ac3' in url:
type += ' AC3 /'
if any(value in url for value in CODEC_H264):
type += ' x264 /'
if any(value in url for value in CODEC_H265):
type += ' x265 /'
if any(value in url for value in CODEC_MKV):
type += ' MKV /'
if 'subs' in url:
if type != '':
type += ' - WITH SUBS'
else:
type = 'SUBS'
type = type.rstrip('/')
return type
def check_sd_url(release_link):
release_link = release_link.lower()
try:
release_link = release_link.encode('utf-8')
except:
pass
try:
if '2160' in release_link:
quality = '4K'
elif '4k' in release_link:
quality = '4K'
elif 'uhd' in release_link:
quality = '4K'
elif '1080' in release_link:
quality = '1080p'
elif '720' in release_link:
quality = '720p'
elif 'hd.' in release_link:
quality = '720p'
elif '.hd' in release_link:
quality = '720p'
elif 'HD' in release_link:
quality = '720p'
elif 'hdtv' in release_link:
quality = '720p'
elif 'bluray' in release_link:
quality = '720p'
elif 'BluRay' in release_link:
quality = '720p'
elif '.BluRay.' in release_link:
quality = '720p'
elif 'webrip' in release_link:
quality = '720p'
elif '.WEBRip.' in release_link:
quality = '720p'
elif any(i in ['dvdscr', 'r5', 'r6'] for i in release_link):
quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in release_link):
quality = 'CAM'
else:
quality = 'SD'
return quality
except:
return 'SD'
def check_direct_url(url):
try:
if '2160' in url:
quality = '4K'
elif '4k' in url:
quality = '4K'
elif '1080p' in url:
quality = '1080p'
elif '1080' in url:
quality = '1080p'
elif '720p' in url:
quality = '720p'
elif '720' in url:
quality = '720p'
elif 'hd' in url:
quality = '720p'
elif '.hd' in url:
quality = '720p'
elif 'HD' in url:
quality = '720p'
elif 'hdtv' in url:
quality = '720p'
elif 'bluray' in url:
quality = '720p'
elif 'BluRay' in url:
quality = '720p'
elif '480p' in url:
quality = '480p'
elif '480' in url:
quality = '480p'
elif any(i in ['dvdscr', 'r5', 'r6'] for i in url):
quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in url):
quality = 'CAM'
else:
quality = 'SD'
return quality
except:
return 'SD'
def check_url(url):
try:
if '2160p' in url:
quality = '4K'
elif '2160' in url:
quality = '4K'
elif '4k' in url:
quality = '4K'
elif 'uhd' in url:
quality = '4K'
elif '1080p' in url:
quality = '1080p'
elif '1080' in url:
quality = '1080p'
elif '720p' in url:
quality = '720p'
elif '720' in url:
quality = '720p'
elif '.hd.' in url:
quality = '720p'
elif 'hd' in url:
quality = '720p'
elif 'HD' in url:
quality = '720p'
elif 'hdtv' in url:
quality = '720p'
elif 'BluRay' in url:
quality = '720p'
elif '.BluRay.' in url:
quality = '720p'
elif '.WEBRip.' in url:
quality = '720p'
elif '480p' in url:
quality = 'SD'
elif '480' in url:
quality = 'SD'
elif any(i in ['dvdscr', 'r5', 'r6'] for i in url):
quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in url):
quality = 'CAM'
else:
quality = 'SD'
return quality
except:
return 'SD'
def label_to_quality(label):
try:
try:
label = int(re.search('(\d+)', label).group(1))
except:
label = 0
if label >= 2160:
return '4K'
elif label >= 1440:
return '1440p'
elif label >= 1080:
return '1080p'
elif 720 <= label < 1080:
return '720p'
elif label < 720:
return 'SD'
except:
return 'SD'
def strip_domain(url):
try:
if url.lower().startswith('http') or url.startswith('/'):
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def is_host_valid(url, domains):
try:
host = __top_domain(url)
hosts = [domain.lower() for domain in domains if host and host in domain.lower()]
if hosts and '.' not in host:
host = hosts[0]
if hosts and any([h for h in ['google', 'picasa', 'blogspot'] if h in host]):
host = 'gvideo'
if hosts and any([h for h in ['akamaized','ocloud'] if h in host]):
host = 'CDN'
return any(hosts), host
except:
return False, ''
def __top_domain(url):
elements = urlparse.urlparse(url)
domain = elements.netloc or elements.path
domain = domain.split('@')[-1].split(':')[0]
regex = "(?:www\.)?([\w\-]*\.[\w\-]{2,3}(?:\.[\w\-]{2,3})?)$"
res = re.search(regex, domain)
if res:
domain = res.group(1)
domain = domain.lower()
return domain
def aliases_to_array(aliases, filter=None):
try:
if not filter:
filter = []
if isinstance(filter, str):
filter = [filter]
return [x.get('title') for x in aliases if not filter or x.get('country') in filter]
except:
return []
def append_headers(headers):
return '|%s' % '&'.join(['%s=%s' % (key, urllib.quote_plus(headers[key])) for key in headers])
def get_size(url):
try:
size = client.request(url, output='file_size')
if size == '0':
size = False
size = convert_size(size)
return size
except:
return False
def convert_size(size_bytes):
import math
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
if size_name[i] == 'B' or size_name[i] == 'KB':
return None
return "%s %s" % (s, size_name[i])
def check_directstreams(url, hoster='', quality='SD'):
urls = []
host = hoster
if 'google' in url or any(x in url for x in ['youtube.', 'docid=']):
urls = directstream.google(url)
if not urls:
tag = directstream.googletag(url)
if tag: urls = [{'quality': tag[0]['quality'], 'url': url}]
if urls:
host = 'gvideo'
elif 'ok.ru' in url:
urls = directstream.odnoklassniki(url)
if urls: host = 'vk'
elif 'vk.com' in url:
urls = directstream.vk(url)
if urls: host = 'vk'
elif any(x in url for x in ['akamaized', 'blogspot', 'ocloud.stream']):
urls = [{'url': url}]
if urls: host = 'CDN'
direct = True if urls else False
if not urls:
urls = [{'quality': quality, 'url': url}]
return urls, host, direct
def evp_decode(cipher_text, passphrase, salt=None):
cipher_text = base64.b64decode(cipher_text)
if not salt:
salt = cipher_text[8:16]
cipher_text = cipher_text[16:]
data = evpKDF(passphrase, salt)
decrypter = pyaes.Decrypter(pyaes.AESModeOfOperationCBC(data['key'], data['iv']))
plain_text = decrypter.feed(cipher_text)
plain_text += decrypter.feed()
return plain_text
def evpKDF(passwd, salt, key_size=8, iv_size=4, iterations=1, hash_algorithm="md5"):
target_key_size = key_size + iv_size
derived_bytes = ""
number_of_derived_words = 0
block = None
hasher = hashlib.new(hash_algorithm)
while number_of_derived_words < target_key_size:
if block is not None:
hasher.update(block)
hasher.update(passwd)
hasher.update(salt)
block = hasher.digest()
hasher = hashlib.new(hash_algorithm)
for _i in range(1, iterations):
hasher.update(block)
block = hasher.digest()
hasher = hashlib.new(hash_algorithm)
derived_bytes += block[0: min(len(block), (target_key_size - number_of_derived_words) * 4)]
number_of_derived_words += len(block) / 4
return {"key": derived_bytes[0: key_size * 4], "iv": derived_bytes[key_size * 4:]}
|
python
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from copy import deepcopy
import errno
import fnmatch
import logging
import os
import random
import shutil
import string
import subprocess
import yaml
import sys
from ansible_vault import Vault
def _is_py3():
return True if sys.version_info >= (3, 0) else False
if _is_py3():
from ansible_vault_rekey.vaultstring import VaultString
else:
from vaultstring import VaultString
"""Main module."""
yaml.add_representer(VaultString, VaultString.to_yaml, Dumper=yaml.Dumper)
yaml.add_constructor(VaultString.yaml_tag, VaultString.yaml_constructor)
log = logging.getLogger()
log.setLevel(logging.DEBUG)
# log_console = logging.StreamHandler()
# log_console.setLevel(logging.DEBUG)
# log.addHandler(log_console)
def get_dict_value(data, address):
'''Accepts a dictionary and an "address" (a list representing a nested dict value's key)
and returns the value at that "address"
>>> d = {'mailserver_users': [{'somekey': 'someval'}, ...], ...}
>>> a = ['mailserver_users', 0, 'somekey']
>>> get_dict_value(d, a)
'someval'
'''
d = deepcopy(data)
for key in address:
try:
d = d[key]
except KeyError:
return None
return d
def put_dict_value(data, address, value):
'''Accepts a dictionary and an "address" (a list representing a nested dict value's key)
and sets the value at that "address".
>>> d = {'mailserver_users': [{...}, {...}], ...}
>>> a = ['mailserver_users', 1, 'newkey']
>>> put_dict_value(d, a, 'newval')
{..., 'mailserver_users': [{...}, {'newkey': 'newval', ...}]}
'''
# i had like 15 lines here before finding this: https://stackoverflow.com/a/13688108/596204
for key in address[:-1]:
data = data[key] # dive another layer deep
data[address[-1]] = value # set nested obj's value
return data # return modified outer obj
def generate_password(length=128):
return ''.join(random.choice(string.ascii_letters + string.digits + string.punctuation) for _ in range(length))
def write_password_file(path, password=None, overwrite=False):
password = generate_password() if not password else password
if os.path.isfile(path) and not overwrite:
log.error('Cowardly refusing to overwrite an existing password file at {}'.format(path))
return False
with open(path, 'w+') as f:
f.write(password)
return True
def restore_files(files, target_path, prefix='.'):
restored = []
for f in files:
relpath = os.path.realpath(f)[len(os.path.realpath(prefix))+1:]
newpath = os.path.join(target_path, relpath)
try:
os.makedirs(os.path.dirname(newpath))
except OSError as e:
if e.errno != errno.EEXIST:
raise
shutil.copy(f, newpath)
restored.append(newpath)
return restored
def backup_files(files, backup_path, prefix='.'):
for f in files:
relpath = os.path.realpath(f)[len(os.path.realpath(prefix))+1:]
newpath = os.path.join(backup_path, relpath)
try:
os.makedirs(os.path.dirname(newpath))
except OSError as e:
if e.errno != errno.EEXIST:
raise
shutil.copy(f, newpath)
return find_files(backup_path)
def find_files(path, pattern='*.y*ml'):
exclude = ['.rekey-backups']
for root, dirs, files in os.walk(path):
dirs[:] = [d for d in dirs if d not in exclude] # this tells python to modify dirs in place
for name in files: # without creating a new list
if fnmatch.fnmatch(name, pattern):
yield os.path.realpath(os.path.join(root, name))
def is_file_secret(path):
with open(path) as f:
return True if f.readline().startswith('$ANSIBLE_VAULT;1.1;AES256') else False
def rekey_file(path, password_file, new_password_file):
cmd = "ansible-vault rekey --vault-password-file {} --new-vault-password-file {} {}".format(
password_file, new_password_file, path)
subprocess.check_call(cmd, shell=True)
return True
def decrypt_file(path, password_file, newpath=None):
'''Decrypts an Ansible Vault YAML file and returns a dict. Set newpath to
write the result somewhere.'''
# log.debug('decrypt_file({}, {}, {})'.format(path, password_file, newpath))
if is_file_secret(path):
# log.debug('file is fully encrypted')
with open(password_file) as f:
vault = Vault(f.read().strip())
# log.debug('vault fetched with password file: {}'.format(password_file))
with open(path) as f:
r = vault.load(f.read())
# log.debug('loaded file: {}'.format(r))
else:
r = parse_yaml(path)
for s in find_yaml_secrets(r):
v = get_dict_value(r, s)
plaintext = v.decrypt(open(password_file).read().strip())
put_dict_value(r, s, plaintext)
if not r:
raise ValueError('The Vault library extracted nothing from the file. Is it actually encrypted?')
if newpath:
if not os.path.isdir(os.path.dirname(newpath)):
os.makedirs(os.path.dirname(newpath))
with open(newpath, 'w+') as f:
f.write(yaml.dump(r))
return r
def encrypt_file(path, password_file, newpath=None, secrets=None):
'''Encrypts an Ansible Vault YAML file. Returns encrypted data. Set newpath to
write the result somewhere. Set secrets to specify inline secret addresses.'''
log.debug('Reading decrypted data from {}...'.format(path))
data = parse_yaml(path)
if not data:
raise ValueError('The YAML file "{}" could not be parsed'.format(path))
else:
log.debug('Got vars: {}'.format(data))
with open(password_file) as f:
p = f.read().strip()
log.debug('Read pass from {}: {}'.format(password_file, p))
if secrets:
# newdata = data.copy()
secrets = list(secrets)
log.debug('Received {} secrets: {}'.format(len(secrets), secrets))
for address in secrets:
plaintext = get_dict_value(data, address)
log.debug('Re-encrypting "{}" at {} with new password...'.format(plaintext, address))
put_dict_value(data, address,
VaultString.encrypt(plaintext=plaintext, password=p))
if newpath:
log.debug('Writing {} to {}...'.format(data, newpath, p))
write_yaml(newpath, data)
return data
else:
vault = Vault(p)
encrypted = vault.dump(data)
with open(newpath, 'w') as f:
f.write(encrypted)
return encrypted
def parse_yaml(path):
with open(path) as f:
return yaml.load(f, Loader=yaml.Loader)
def write_yaml(path, data):
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path, 'w+') as f:
f.write(yaml.dump(data, default_flow_style=False))
def find_yaml_secrets(data, path=None):
'''Generator which results a list of YAML key paths formatted as lists.
>>> for i in find_yaml_secrets(data):
... print(i)
...
['test_password'] # data['test_password']
['mailserver_users', 0, 'password'] # data['mailserver_users'][0]['password']
'''
path = [] if not path else path
if data.__class__ is VaultString:
yield path
if isinstance(data, list):
counter = 0
for item in data:
newpath = path + [counter]
result = find_yaml_secrets(item, newpath)
if result:
for r in result:
yield r
counter += 1
# log.debug(data)
if isinstance(data, dict) or isinstance(data, OrderedDict):
for k, v in data.items():
newpath = path + [k]
result = find_yaml_secrets(v, newpath)
if result:
for r in result:
yield r
|
python
|
def a(z):
print(z + z)
a(0)
a('e')
a([0])
|
python
|
# Generated by Django 2.2.5 on 2019-10-26 18:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField()),
('title', models.CharField(max_length=100, verbose_name='title')),
],
options={
'verbose_name': 'category',
'verbose_name_plural': 'categories',
},
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField()),
('title', models.CharField(max_length=100, verbose_name='title')),
],
options={
'verbose_name': 'region',
'verbose_name_plural': 'regions',
},
),
migrations.CreateModel(
name='ClassifiedAd',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField(blank=True, max_length=100, null=True)),
('header', models.CharField(max_length=100)),
('body', models.TextField()),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('date_posted', models.DateTimeField(auto_now_add=True)),
('date_updated', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Profile')),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='classified.Category')),
('region', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='classified.Region')),
],
options={
'ordering': ('-date_updated',),
},
),
]
|
python
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
RESOLUTION = ["Performance Tuning of Cortex XSOAR Server: https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-0/"
"cortex-xsoar-admin/cortex-xsoar-overview/performance-tuning-of-cortex-xsoar-server"]
def analyzeData(res):
lowFound = 0
medFound = 0
lowRes = False
medRes = False
highRes = False
for item in res:
if not lowRes:
if item['data'][0] >= 70:
lowFound += 1
if lowFound >= 30:
lowRes = True
else:
lowFound = 0
if not medRes:
if item['data'][0] >= 80:
medFound += 1
if medFound >= 10:
medRes = True
else:
medFound = 0
if not highRes:
if item['data'][0] >= 90:
highRes = True
if lowRes or medRes or highRes:
addActions = []
if highRes:
addActions.append({'category': 'Memory analysis', 'severity': 'High',
'description': "Memory has reached 90%", "resolution": f"{RESOLUTION[0]}"})
if medRes:
addActions.append({'category': 'Memory analysis', 'severity': 'Medium',
'description': "Memory has reached 80% for 10 minutes", "resolution": f"{RESOLUTION[0]}"})
if lowRes:
addActions.append({'category': 'Memory analysis', 'severity': 'Low',
'description': "Memory has reached 70% for 30 minutes", "resolution": f"{RESOLUTION[0]}"})
return addActions
else:
return None
# Main
incident = demisto.incidents()[0]
accountName = incident.get('account')
accountName = f"acc_{accountName}/" if accountName != "" else ""
args = demisto.args()
isWidget = argToBoolean(args.get('isWidget', True))
stats = demisto.executeCommand(
"demisto-api-post",
{
"uri": f"{accountName}/statistics/widgets/query",
"body": {
"size": 1440,
"dataType": "system",
"params": {
"timeFrame": "minutes",
"format": "HH:mm",
},
"query": "memory.usedPercent",
"dateRange": {
"period": {
"byFrom": "hours",
"fromValue": 24
}
},
"widgetType": "line"
}
})
res = stats[0]["Contents"]["response"]
output = []
counter = 0
higher = 0
if isWidget is True:
buildNumber = demisto.executeCommand("DemistoVersion", {})[0]['Contents']['DemistoVersion']['buildNumber']
# in local development instances, the build number will be "REPLACE_THIS_WITH_CI_BUILD_NUM"
buildNumber = f'{buildNumber}' if buildNumber != "REPLACE_THIS_WITH_CI_BUILD_NUM" else "618658"
if int(buildNumber) >= 618657:
# Line graph:
for entry in res:
higher = max(entry["data"][0], higher)
if counter % 2 == 0:
output.append({"name": counter, "data": [higher]})
higher = 0
counter += 1
data = {
"Type": 17,
"ContentsFormat": "line",
"Contents": {
"stats": output,
"params": {
"timeFrame": "minutes",
"format": "HH:mm",
"layout": "vertical"
}
}
}
else:
# Bar graph:
now = datetime.utcnow()
then = now - timedelta(days=1)
for entry in res:
higher = max(entry["data"][0], higher)
if counter % 60 == 0:
then = then + timedelta(hours=1)
name = then.strftime("%H:%M")
output.append({"name": name, "data": [higher]})
higher = 0
counter += 1
data = {
"Type": 17,
"ContentsFormat": "bar",
"Contents": {
"stats": output,
"params": {
"layout": "horizontal"
}
}
}
demisto.results(data)
else:
addActions = analyzeData(res)
results = CommandResults(
readable_output="analyzeCPUUsage Done",
outputs_prefix="HealthCheck.ActionableItems",
outputs=addActions)
return_results(results)
|
python
|
class DSN:
def __init__(self,
user,
password,
database="test", host="localhost", port: int = 3306, charset="utf8"):
self.user = user
self.password = password
self.host = host
self.port = port
self.database = database
self.charset = charset
|
python
|
from vart.sampler.walkers import Walkers
class SamplerBase(object):
def __init__(self,nwalkers=1000, nstep=1000, nelec=1, ndim=3,
step_size = 3, domain = {'min':-2,'max':2},
move='all'):
self.nwalkers = nwalkers
self.nstep = nstep
self.step_size = step_size
self.domain = domain
self.move = move
self.nelec = nelec
self.ndim = ndim
self.walkers = Walkers(nwalkers,nelec,ndim,domain)
def set_ndim(self,ndim):
self.ndim = ndim
def set_initial_guess(self,guess):
self.initial_guess = guess
def generate(self,pdf):
raise NotImplementedError()
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021 Johns Hopkins University (Jiatong Shi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import codecs
import sys
def get_parser():
parser = argparse.ArgumentParser(description="language identification scoring")
parser.add_argument("--ref", type=str, help="input reference", required=True)
parser.add_argument("--hyp", type=str, help="input hypotheses", required=True)
parser.add_argument(
"--out",
type=argparse.FileType("w"),
default=sys.stdout,
help="The output filename. " "If omitted, then output to sys.stdout",
)
return parser
def main(args):
args = get_parser().parse_args(args)
scoring(args.ref, args.hyp, args.out)
def scoring(ref, hyp, out):
ref_file = codecs.open(ref, "r", encoding="utf-8")
hyp_file = codecs.open(hyp, "r", encoding="utf-8")
utt_num = 0
correct = 0
while True:
ref_utt = ref_file.readline()
hyp_utt = hyp_file.readline()
if not ref_utt or not hyp_utt:
break
[rec_id, lid, utt_id] = ref_utt.strip().split()
[hrec_id, hlid, hutt_id] = hyp_utt.strip().split()
assert (rec_id == hrec_id and utt_id == hutt_id) and "Mismatch in trn id"
if lid == hlid:
correct += 1
utt_num += 1
out.write(
"Language Identification Scoring: Accuracy {:.4f} ({}/{})".format(
(correct / float(utt_num)), correct, utt_num
)
)
if __name__ == "__main__":
main(sys.argv[1:])
|
python
|
# -*- coding: utf-8 -*-
import pytest
import csv
import sys
import os
from filecmp import cmp
from feature_extractor.feature_extractor import FeatureExtractor
__author__ = "Harvey Bastidas"
__copyright__ = "Harvey Bastidas"
__license__ = "mit"
class Conf:
""" This method initialize the configuration variables for a plugin """
def __init__(self):
""" Component Tests Constructor """
fname = os.path.join(os.path.dirname(__file__), "data/test_input.csv")
self.input_file = fname
""" Test dataset filename """
fname = os.path.join(os.path.dirname(__file__), "data/test_output.csv")
self.output_file = fname
""" Output dataset filename """
self.list_plugins = False
self.core_plugin = "heuristic_ts"
self.ema_fast = 0
self.ema_slow = 1
self.forward_ticks = 5
self.use_current = False
class TestFeatureExtractor:
""" Component Tests """
def setup_method(self, test_method):
""" Component Tests Constructor """
self.conf = Conf()
self.rows_d, self.cols_d = self.get_size_csv(self.conf.input_file)
""" Get the number of rows and columns of the test dataset """
try:
os.remove(self.conf.output_file)
except:
print("No test output file found.")
pass
def get_size_csv(self, csv_file):
""" Get the number of rows and columns of a test dataset, used in all tests.
Args:
csv_file (string): Path and filename of a test dataset
Returns:
(int,int): number of rows, number of columns
"""
rows = list(csv.reader(open(csv_file)))
return (len(rows), len(rows[0]))
def test_C01T01_list_plugins(self):
""" Asses that plugin list has more than zero installed plugins """
self.conf.list_plugins = True
self.fe = FeatureExtractor(self.conf)
""" FeatureExtractor instance """
# assertion
assert (len(self.fe.discovered_core_plugins) > 0)
def test_C01T02_plugin_load(self):
""" Loads HeuristicTS using parameters from setup_method() and Asses that output file has 1 column and num_ticks - forward_ticks """
self.fe = FeatureExtractor(self.conf)
# get the number of rows and cols from out_file
rows_o, cols_o = self.get_size_csv(self.conf.output_file)
# assertion
assert (cols_o == 1) and (rows_o == self.fe.ep_core.rows_d - self.fe.ep_core.conf.forward_ticks)
def test_C01T03_cmdline_plugin_load(self):
""" same as C01T02, but via command-line """
os.system("feature_extractor --core_plugin heuristic_ts --input_file "
+ self.conf.input_file
+ " --output_file "
+ self.conf.output_file
+ " --forward_ticks "
+ str(self.conf.forward_ticks)
)
# get the size of the output dataset
rows_d, cols_d = self.get_size_csv(self.conf.input_file)
# get the size of the output dataset
rows_o, cols_o = self.get_size_csv(self.conf.output_file)
# assert if the number of rows an colums is less than the input dataset and > 0
assert (cols_o == 1) and (rows_o == rows_d - self.conf.forward_ticks)
|
python
|
# coding:utf-8
from . import api
from ..models import User,Bucketlist
from flask import request, jsonify, abort, make_response
@api.route('/bucketlists/', methods=['POST', 'GET'])
def bucketlists():
# get the access token
access_token = request.headers.get('Authorization')
if access_token:
user_id = User.decode_token(access_token)
if not isinstance(user_id, str):
# Go ahead and handle the request, the user is authed
if request.method == "POST":
name = str(request.data.get('name', ''))
if name:
bucketlist = Bucketlist(name=name, created_by=user_id)
bucketlist.save()
response = jsonify({
'id': bucketlist.id,
'name': bucketlist.name,
'date_created': bucketlist.date_created,
'date_modified': bucketlist.date_modified,
'created_by': user_id
})
return make_response(response), 201
else:
# GET
# get all the bucketlists for this user
bucketlists = Bucketlist.get_all(user_id)
results = []
for bucketlist in bucketlists:
obj = {
'id': bucketlist.id,
'name': bucketlist.name,
'date_created': bucketlist.date_created,
'date_modified': bucketlist.date_modified,
'created_by': bucketlist.created_by
}
results.append(obj)
return make_response(jsonify(results)), 200
else:
# user is not legit, so the payload is an error message
message = user_id
response = {
'message': message
}
return make_response(jsonify(response)), 401
@api.route('/bucketlists/<int:id>', methods=['GET', 'PUT', 'DELETE'])
def bucketlist_manipulation(id, **kwargs):
access_token = request.headers.get('Authorization')
if access_token:
user_id = User.decode_token(access_token)
if not isinstance(user_id, str):
bucketlist = Bucketlist.query.filter_by(id=id).first()
if not bucketlist:
# Raise an HTTPException with a 404 not found status code
abort(404)
if request.method == "DELETE":
bucketlist.delete()
return {
"message": "bucketlist {} deleted".format(bucketlist.id)
}, 200
elif request.method == 'PUT':
name = str(request.data.get('name', ''))
bucketlist.name = name
bucketlist.save()
response = {
'id': bucketlist.id,
'name': bucketlist.name,
'date_created': bucketlist.date_created,
'date_modified': bucketlist.date_modified,
'created_by': bucketlist.created_by
}
return make_response(jsonify(response)), 200
else:
# GET
response = jsonify({
'id': bucketlist.id,
'name': bucketlist.name,
'date_created': bucketlist.date_created,
'date_modified': bucketlist.date_modified,
'created_by': bucketlist.created_by
})
return make_response(response), 200
else:
# user is not legit, so the payload is an error message
message = user_id
response = {
'message': message
}
return make_response(jsonify(response)), 401
|
python
|
# Standard Library
import asyncio
import json
import logging
import os
import time
from datetime import datetime
# Third Party
import kubernetes.client
from elasticsearch import AsyncElasticsearch, exceptions
from elasticsearch.helpers import async_streaming_bulk
from kubernetes import client, config
from kubernetes.client.rest import ApiException
from nats_wrapper import NatsWrapper
from prepare_training_logs import PrepareTrainingLogs
MINIO_SERVER_URL = os.environ["MINIO_SERVER_URL"]
MINIO_ACCESS_KEY = os.environ["MINIO_ACCESS_KEY"]
MINIO_SECRET_KEY = os.environ["MINIO_SECRET_KEY"]
NATS_SERVER_URL = os.environ["NATS_SERVER_URL"]
ES_ENDPOINT = os.environ["ES_ENDPOINT"]
ES_USERNAME = os.environ["ES_USERNAME"]
ES_PASSWORD = os.environ["ES_PASSWORD"]
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(message)s")
config.load_incluster_config()
configuration = kubernetes.client.Configuration()
api_instance = kubernetes.client.BatchV1Api()
logging.info("Cluster config has been been loaded")
## TODO: for nulog_spec, rather than define it here, maybe should load it from yaml file.
nulog_spec = {
"name": "nulog-train",
"container_name": "nulog-train",
"image_name": "amartyarancher/nulog-train:v0.1",
"image_pull_policy": "Always",
"labels": {"app": "nulog-train"},
"restart_policy": "Never",
"requests": {"memory": "1Gi", "cpu": 1},
"limits": {"nvidia.com/gpu": 1},
"env": [],
}
nulog_spec["env"] = [
client.V1EnvVar(name="MINIO_SERVER_URL", value=MINIO_SERVER_URL),
client.V1EnvVar(name="MINIO_ACCESS_KEY", value=MINIO_ACCESS_KEY),
client.V1EnvVar(name="MINIO_SECRET_KEY", value=MINIO_SECRET_KEY),
client.V1EnvVar(name="NATS_SERVER_URL", value=NATS_SERVER_URL),
]
startup_time = time.time()
NAMESPACE = os.environ["JOB_NAMESPACE"]
DEFAULT_TRAINING_INTERVAL = 1800 # 1800 seconds aka 30mins
es = AsyncElasticsearch(
[ES_ENDPOINT],
port=9200,
http_auth=(ES_USERNAME, ES_PASSWORD),
verify_certs=False,
use_ssl=True,
)
async def update_es_job_status(
request_id: str,
job_status: str,
op_type: str = "update",
index: str = "training_signal",
):
"""
this method updates the status of jobs in elasticsearch.
"""
script = "ctx._source.status = '{}';".format(job_status)
docs_to_update = [
{
"_id": request_id,
"_op_type": op_type,
"_index": index,
"script": script,
}
]
logging.info("ES job {} status update : {}".format(request_id, job_status))
try:
async for ok, result in async_streaming_bulk(es, docs_to_update):
action, result = result.popitem()
if not ok:
logging.error("failed to %s document %s" % ())
except Exception as e:
logging.error(e)
async def es_training_signal_coroutine(signals_queue: asyncio.Queue):
"""
collect job training signal from elasticsearch, and add to job queue.
"""
query_body = {"query": {"bool": {"must": {"match": {"status": "submitted"}}}}}
index = "training_signal"
current_time = int(datetime.timestamp(datetime.now()))
job_payload = {
"model_to_train": "nulog",
"time_intervals": [
{
"start_ts": (current_time - DEFAULT_TRAINING_INTERVAL) * (10 ** 9),
"end_ts": current_time * (10 ** 9),
}
],
}
signal_index_exists = False
try:
signal_index_exists = await es.indices.exists(index)
if not signal_index_exists:
signal_created = await es.indices.create(index=index)
except exceptions.TransportError as e:
logging.error(e)
while True:
try:
user_signals_response = await es.search(
index=index, body=query_body, size=100
)
user_signal_hits = user_signals_response["hits"]["hits"]
if len(user_signal_hits) > 0:
for hit in user_signal_hits:
signals_queue_payload = {
"source": "elasticsearch",
"_id": hit["_id"],
"model": "nulog-train",
"signal": "start",
"payload": job_payload,
}
await update_es_job_status(
request_id=hit["_id"], job_status="scheduled"
)
await signals_queue.put(signals_queue_payload)
except (exceptions.NotFoundError, exceptions.TransportError) as e:
logging.error(e)
await asyncio.sleep(60)
def job_not_currently_running(job_name, namespace=NAMESPACE):
try:
jobs = api_instance.list_namespaced_job(namespace, timeout_seconds=60)
except ApiException as e:
logging.error(
"Exception when calling BatchV1Api->list_namespaced_job: %s\n" % e
)
for job in jobs.items:
if job.metadata.name == job_name:
return False
return True
async def kube_delete_empty_pods(signals_queue, namespace=NAMESPACE, phase="Succeeded"):
deleteoptions = client.V1DeleteOptions()
# We need the api entry point for pods
api_pods = client.CoreV1Api()
# List the pods
try:
pods = api_pods.list_namespaced_pod(namespace, timeout_seconds=60)
except ApiException as e:
logging.error("Exception when calling CoreV1Api->list_namespaced_pod: %s\n" % e)
for pod in pods.items:
podname = pod.metadata.name
try:
if pod.status.phase == phase:
api_response = api_pods.delete_namespaced_pod(
podname, namespace, body=deleteoptions
)
if "nulog-train" in podname:
signals_queue_payload = {
"model": "nulog-train",
"signal": "finish",
"payload": None,
}
await signals_queue.put(signals_queue_payload)
else:
logging.info(
"Pod: {} still not done... Phase: {}".format(
podname, pod.status.phase
)
)
except ApiException as e:
logging.error(
"Exception when calling CoreV1Api->delete_namespaced_pod: %s\n" % e
)
return
def run_job(job_details):
resource_specifications = client.V1ResourceRequirements(
requests=job_details["requests"], limits=job_details["limits"]
)
container = client.V1Container(
name=job_details["container_name"],
image=job_details["image_name"],
image_pull_policy=job_details["image_pull_policy"],
env=job_details["env"],
resources=resource_specifications,
)
template = client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(labels=job_details["labels"]),
spec=client.V1PodSpec(
restart_policy=job_details["restart_policy"], containers=[container]
),
)
spec = client.V1JobSpec(template=template, backoff_limit=4)
job = client.V1Job(
api_version="batch/v1",
kind="Job",
metadata=client.V1ObjectMeta(name=job_details["name"]),
spec=spec,
)
api_instance.create_namespaced_job(body=job, namespace=NAMESPACE)
async def clear_jobs(signals_queue):
namespace = NAMESPACE
state = "finished"
while True:
await asyncio.sleep(300)
deleteoptions = client.V1DeleteOptions()
try:
jobs = api_instance.list_namespaced_job(namespace, timeout_seconds=60)
except ApiException as e:
logging.error(
"Exception when calling BatchV1Api->list_namespaced_job: %s\n" % e
)
# Now we have all the jobs, lets clean up
# We are also logging the jobs we didn't clean up because they either failed or are still running
for job in jobs.items:
jobname = job.metadata.name
jobstatus = job.status.conditions
if job.status.succeeded == 1:
# Clean up Job
logging.info(
"Cleaning up Job: {}. Finished at: {}".format(
jobname, job.status.completion_time
)
)
try:
# What is at work here. Setting Grace Period to 0 means delete ASAP. Otherwise it defaults to
# some value I can't find anywhere. Propagation policy makes the Garbage cleaning Async
api_response = api_instance.delete_namespaced_job(
jobname,
namespace,
body=deleteoptions,
grace_period_seconds=0,
propagation_policy="Background",
)
except ApiException as e:
logging.error(
"Exception when calling BatchV1Api->delete_namespaced_job: %s\n"
% e
)
else:
if jobstatus is None and job.status.active == 1:
jobstatus = "active"
logging.info(
"Job: {} not cleaned up. Current status: {}".format(
jobname, jobstatus
)
)
# Now that we have the jobs cleaned, let's clean the pods
await kube_delete_empty_pods(signals_queue)
async def manage_kubernetes_training_jobs(signals_queue):
nulog_next_job_to_run = None ## TODO: (minor) should replace this with a queue? in case there are multiple pending jobs
while True:
payload = await signals_queue.get()
if payload is None:
break
signal = payload["signal"]
model_to_train = payload["model"]
model_payload = payload["payload"]
if signal == "start":
if job_not_currently_running(model_to_train):
PrepareTrainingLogs("/tmp").run(model_payload["time_intervals"])
if model_to_train == "nulog-train":
if "source" in payload and payload["source"] == "elasticsearch":
await update_es_job_status(
request_id=payload["_id"], job_status="trainingStarted"
)
run_job(nulog_spec)
## es_update_status = training_inprogress
else:
if model_to_train == "nulog-train":
nulog_next_job_to_run = model_payload
logging.info(
"Nulog model currently being trained. Job will run after this model's training has been completed"
)
if "source" in payload and payload["source"] == "elasticsearch":
await update_es_job_status(
request_id=payload["_id"], job_status="pendingInQueue"
)
## es_update_status = pending_in_queue
else:
if nulog_next_job_to_run:
PrepareTrainingLogs("/tmp").run(nulog_next_job_to_run["time_intervals"])
run_job(nulog_spec)
nulog_next_job_to_run = None
async def consume_nats_drain_signal(queue, signals_queue):
while True:
payload = await queue.get()
if payload is None:
break
try:
decoded_payload = json.loads(payload)
# process the payload
if decoded_payload["model_to_train"] == "nulog":
signals_queue_payload = {
"model": "nulog-train",
"signal": "start",
"payload": decoded_payload,
}
await signals_queue.put(signals_queue_payload)
logging.info("Just received signal to begin running the jobs")
except Exception as e:
logging.error(e)
async def consume_payload_coroutine(loop, jobs_queue):
nw = NatsWrapper(loop)
while True:
if nw.first_run_or_got_disconnected_or_error:
logging.info("Need to (re)connect to NATS")
nw.re_init()
await nw.connect()
await nw.subscribe(nats_subject="train", payload_queue=jobs_queue)
nw.first_run_or_got_disconnected_or_error = False
await asyncio.sleep(1)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
jobs_queue = asyncio.Queue(loop=loop)
signals_queue = asyncio.Queue(loop=loop)
consumer_coroutine = consume_payload_coroutine(loop, jobs_queue)
consume_nats_drain_signal_coroutine = consume_nats_drain_signal(
jobs_queue, signals_queue
)
clear_jobs_coroutine = clear_jobs(signals_queue)
manage_kubernetes_jobs_coroutine = manage_kubernetes_training_jobs(signals_queue)
es_signal_coroutine = es_training_signal_coroutine(signals_queue)
loop.run_until_complete(
asyncio.gather(
consumer_coroutine,
consume_nats_drain_signal_coroutine,
clear_jobs_coroutine,
manage_kubernetes_jobs_coroutine,
es_signal_coroutine,
)
)
try:
loop.run_forever()
finally:
loop.close()
|
python
|
from django import template
from django.contrib.auth.models import User
from ..forms import UserCreateForm
register = template.Library()
@register.inclusion_tag('registration/_signup.html')
def signup_form():
return {'form': UserCreateForm(instance=User())}
|
python
|
coordinates_E0E1E1 = ((128, 86),
(129, 81), (129, 84), (129, 86), (130, 82), (130, 86), (130, 97), (130, 116), (130, 119), (131, 69), (131, 82), (131, 84), (131, 86), (131, 96), (131, 97), (131, 112), (131, 114), (131, 116), (131, 117), (131, 119), (132, 70), (132, 71), (132, 83), (132, 85), (132, 87), (132, 96), (132, 103), (132, 110), (132, 114), (132, 115), (133, 75), (133, 82), (133, 83), (133, 85), (133, 87), (133, 88), (133, 96), (133, 98), (133, 102), (133, 105), (133, 108), (133, 112), (134, 76), (134, 82), (134, 84), (134, 85), (134, 86), (134, 89), (134, 96), (134, 98), (134, 101), (134, 103), (134, 110), (134, 129), (135, 77), (135, 81), (135, 82), (135, 83), (135, 84), (135, 85), (135, 86), (135, 87), (135, 90), (135, 95), (135, 97), (135, 98), (135, 102), (135, 103), (135, 104), (135, 105), (135, 106), (135, 109), (135, 130),
(136, 77), (136, 79), (136, 82), (136, 83), (136, 84), (136, 85), (136, 86), (136, 87), (136, 88), (136, 90), (136, 95), (136, 96), (136, 97), (136, 98), (136, 99), (136, 101), (136, 102), (136, 103), (136, 104), (136, 105), (136, 106), (136, 107), (136, 109), (136, 130), (136, 131), (137, 77), (137, 92), (137, 93), (137, 95), (137, 96), (137, 97), (137, 98), (137, 99), (137, 100), (137, 101), (137, 102), (137, 103), (137, 104), (137, 105), (137, 106), (137, 108), (137, 130), (137, 132), (138, 77), (138, 80), (138, 81), (138, 82), (138, 83), (138, 84), (138, 85), (138, 86), (138, 87), (138, 88), (138, 89), (138, 95), (138, 96), (138, 97), (138, 98), (138, 99), (138, 100), (138, 101), (138, 102), (138, 103), (138, 104), (138, 105), (138, 106), (138, 108), (138, 131), (138, 133), (139, 76), (139, 77), (139, 79),
(139, 90), (139, 91), (139, 92), (139, 93), (139, 94), (139, 95), (139, 96), (139, 97), (139, 98), (139, 99), (139, 100), (139, 101), (139, 102), (139, 103), (139, 104), (139, 105), (139, 106), (139, 108), (139, 131), (139, 133), (140, 73), (140, 74), (140, 78), (140, 92), (140, 94), (140, 95), (140, 96), (140, 97), (140, 98), (140, 99), (140, 100), (140, 101), (140, 102), (140, 103), (140, 104), (140, 105), (140, 106), (140, 108), (140, 133), (141, 71), (141, 77), (141, 93), (141, 95), (141, 96), (141, 97), (141, 98), (141, 99), (141, 100), (141, 101), (141, 102), (141, 103), (141, 104), (141, 105), (141, 106), (141, 108), (141, 132), (141, 133), (142, 72), (142, 74), (142, 75), (142, 77), (142, 93), (142, 95), (142, 96), (142, 97), (142, 98), (142, 99), (142, 100), (142, 101), (142, 102), (142, 103), (142, 104),
(142, 105), (142, 106), (142, 108), (142, 132), (142, 135), (143, 73), (143, 76), (143, 92), (143, 94), (143, 95), (143, 96), (143, 97), (143, 98), (143, 99), (143, 100), (143, 101), (143, 102), (143, 103), (143, 104), (143, 105), (143, 106), (143, 108), (143, 131), (143, 134), (144, 74), (144, 91), (144, 93), (144, 94), (144, 95), (144, 96), (144, 97), (144, 98), (144, 99), (144, 100), (144, 101), (144, 102), (144, 103), (144, 104), (144, 105), (144, 106), (144, 107), (144, 109), (144, 122), (144, 124), (144, 131), (144, 132), (145, 74), (145, 75), (145, 91), (145, 94), (145, 95), (145, 96), (145, 97), (145, 98), (145, 99), (145, 100), (145, 101), (145, 102), (145, 103), (145, 104), (145, 105), (145, 106), (145, 107), (145, 109), (145, 118), (145, 120), (145, 121), (145, 130), (145, 131), (146, 74), (146, 91), (146, 94),
(146, 95), (146, 96), (146, 97), (146, 98), (146, 99), (146, 100), (146, 101), (146, 102), (146, 103), (146, 104), (146, 105), (146, 106), (146, 107), (146, 108), (146, 110), (146, 116), (146, 122), (146, 123), (146, 125), (146, 131), (147, 94), (147, 96), (147, 97), (147, 98), (147, 99), (147, 100), (147, 101), (147, 102), (147, 103), (147, 104), (147, 105), (147, 106), (147, 107), (147, 108), (147, 109), (147, 112), (147, 113), (147, 114), (147, 118), (147, 119), (147, 120), (147, 121), (147, 122), (147, 123), (147, 124), (147, 125), (147, 127), (147, 128), (147, 130), (148, 94), (148, 96), (148, 97), (148, 98), (148, 99), (148, 100), (148, 101), (148, 102), (148, 103), (148, 104), (148, 105), (148, 106), (148, 107), (148, 108), (148, 109), (148, 110), (148, 115), (148, 116), (148, 117), (148, 118), (148, 119), (148, 120), (148, 121),
(148, 122), (148, 123), (148, 124), (148, 125), (148, 130), (149, 94), (149, 96), (149, 97), (149, 98), (149, 99), (149, 100), (149, 101), (149, 102), (149, 103), (149, 104), (149, 105), (149, 106), (149, 107), (149, 108), (149, 109), (149, 110), (149, 111), (149, 112), (149, 113), (149, 114), (149, 115), (149, 116), (149, 117), (149, 118), (149, 119), (149, 120), (149, 121), (149, 122), (149, 123), (149, 124), (149, 125), (149, 126), (149, 127), (149, 129), (150, 94), (150, 96), (150, 97), (150, 98), (150, 99), (150, 100), (150, 101), (150, 102), (150, 103), (150, 104), (150, 105), (150, 106), (150, 107), (150, 108), (150, 109), (150, 110), (150, 111), (150, 112), (150, 113), (150, 114), (150, 115), (150, 116), (150, 117), (150, 118), (150, 119), (150, 120), (150, 121), (150, 122), (150, 123), (150, 124), (150, 125), (150, 126), (150, 127),
(150, 129), (150, 138), (151, 93), (151, 95), (151, 96), (151, 97), (151, 98), (151, 99), (151, 100), (151, 101), (151, 102), (151, 103), (151, 104), (151, 105), (151, 106), (151, 107), (151, 108), (151, 109), (151, 110), (151, 111), (151, 112), (151, 113), (151, 114), (151, 115), (151, 116), (151, 117), (151, 118), (151, 119), (151, 120), (151, 121), (151, 122), (151, 123), (151, 124), (151, 125), (151, 126), (151, 127), (151, 129), (151, 138), (152, 92), (152, 94), (152, 95), (152, 96), (152, 97), (152, 98), (152, 99), (152, 100), (152, 101), (152, 102), (152, 103), (152, 104), (152, 105), (152, 106), (152, 107), (152, 108), (152, 109), (152, 110), (152, 111), (152, 112), (152, 113), (152, 114), (152, 115), (152, 116), (152, 117), (152, 118), (152, 119), (152, 120), (152, 121), (152, 122), (152, 123), (152, 124), (152, 125), (152, 126),
(152, 127), (152, 129), (152, 137), (153, 89), (153, 90), (153, 93), (153, 94), (153, 95), (153, 96), (153, 97), (153, 98), (153, 99), (153, 100), (153, 101), (153, 102), (153, 103), (153, 104), (153, 105), (153, 106), (153, 107), (153, 108), (153, 109), (153, 110), (153, 111), (153, 112), (153, 113), (153, 114), (153, 115), (153, 116), (153, 117), (153, 118), (153, 121), (153, 122), (153, 123), (153, 124), (153, 125), (153, 126), (153, 127), (153, 129), (153, 137), (154, 85), (154, 86), (154, 87), (154, 88), (154, 91), (154, 92), (154, 93), (154, 94), (154, 95), (154, 96), (154, 97), (154, 98), (154, 99), (154, 100), (154, 101), (154, 102), (154, 103), (154, 104), (154, 105), (154, 106), (154, 107), (154, 108), (154, 109), (154, 110), (154, 111), (154, 112), (154, 113), (154, 114), (154, 115), (154, 116), (154, 119), (154, 122),
(154, 123), (154, 124), (154, 125), (154, 126), (154, 127), (154, 128), (154, 130), (154, 137), (155, 81), (155, 83), (155, 84), (155, 87), (155, 90), (155, 91), (155, 92), (155, 93), (155, 94), (155, 95), (155, 96), (155, 97), (155, 98), (155, 99), (155, 100), (155, 101), (155, 102), (155, 103), (155, 104), (155, 105), (155, 106), (155, 107), (155, 108), (155, 109), (155, 110), (155, 111), (155, 112), (155, 113), (155, 114), (155, 115), (155, 117), (155, 121), (155, 123), (155, 124), (155, 125), (155, 126), (155, 127), (155, 128), (155, 129), (155, 131), (155, 134), (155, 137), (156, 80), (156, 88), (156, 90), (156, 91), (156, 92), (156, 93), (156, 94), (156, 95), (156, 100), (156, 101), (156, 102), (156, 103), (156, 104), (156, 105), (156, 106), (156, 107), (156, 108), (156, 109), (156, 110), (156, 111), (156, 112), (156, 113),
(156, 114), (156, 116), (156, 122), (156, 124), (156, 125), (156, 126), (156, 127), (156, 128), (156, 129), (156, 130), (156, 132), (156, 133), (156, 136), (156, 137), (157, 90), (157, 92), (157, 96), (157, 97), (157, 98), (157, 100), (157, 101), (157, 102), (157, 103), (157, 104), (157, 105), (157, 106), (157, 107), (157, 108), (157, 109), (157, 110), (157, 111), (157, 112), (157, 113), (157, 114), (157, 115), (157, 116), (157, 123), (157, 125), (157, 126), (157, 127), (157, 128), (157, 129), (157, 130), (157, 131), (157, 134), (157, 135), (157, 136), (157, 137), (157, 139), (157, 140), (157, 141), (157, 142), (157, 144), (158, 90), (158, 93), (158, 94), (158, 95), (158, 100), (158, 102), (158, 103), (158, 104), (158, 105), (158, 106), (158, 107), (158, 108), (158, 109), (158, 110), (158, 111), (158, 112), (158, 113), (158, 114), (158, 115),
(158, 116), (158, 124), (158, 127), (158, 128), (158, 129), (158, 130), (158, 131), (158, 132), (158, 133), (158, 134), (158, 135), (158, 136), (158, 137), (158, 138), (158, 141), (158, 144), (159, 90), (159, 92), (159, 100), (159, 102), (159, 103), (159, 104), (159, 105), (159, 106), (159, 107), (159, 108), (159, 109), (159, 110), (159, 111), (159, 112), (159, 113), (159, 114), (159, 116), (159, 125), (159, 128), (159, 129), (159, 130), (159, 131), (159, 132), (159, 133), (159, 134), (159, 135), (159, 136), (159, 137), (159, 139), (159, 142), (159, 144), (160, 101), (160, 103), (160, 104), (160, 105), (160, 106), (160, 107), (160, 108), (160, 109), (160, 110), (160, 111), (160, 112), (160, 113), (160, 114), (160, 115), (160, 117), (160, 127), (160, 129), (160, 130), (160, 131), (160, 132), (160, 133), (160, 134), (160, 135), (160, 136), (160, 138),
(161, 102), (161, 105), (161, 106), (161, 107), (161, 108), (161, 109), (161, 110), (161, 111), (161, 112), (161, 113), (161, 114), (161, 116), (161, 128), (161, 130), (161, 131), (161, 132), (161, 133), (161, 134), (161, 135), (161, 136), (161, 138), (162, 103), (162, 106), (162, 107), (162, 108), (162, 109), (162, 110), (162, 111), (162, 112), (162, 113), (162, 114), (162, 116), (162, 128), (162, 130), (162, 131), (162, 132), (162, 133), (162, 134), (162, 135), (162, 137), (163, 106), (163, 107), (163, 108), (163, 109), (163, 110), (163, 111), (163, 112), (163, 113), (163, 115), (163, 128), (163, 130), (163, 131), (163, 132), (163, 133), (163, 134), (163, 135), (163, 137), (164, 106), (164, 108), (164, 109), (164, 110), (164, 111), (164, 112), (164, 114), (164, 127), (164, 129), (164, 131), (164, 132), (164, 133), (164, 134), (164, 135), (164, 138),
(165, 107), (165, 109), (165, 110), (165, 111), (165, 113), (165, 126), (165, 130), (165, 132), (165, 133), (165, 134), (165, 135), (165, 136), (165, 139), (166, 108), (166, 110), (166, 111), (166, 113), (166, 123), (166, 126), (166, 131), (166, 133), (166, 135), (167, 109), (167, 111), (167, 112), (167, 114), (167, 123), (167, 125), (167, 131), (167, 135), (168, 109), (168, 111), (168, 112), (168, 114), (168, 123), (168, 125), (168, 133), (168, 135), (169, 109), (169, 111), (169, 114), (169, 124), (169, 125), (169, 134), (169, 136), (170, 110), (170, 113), (170, 115), (170, 124), (170, 125), (170, 136), (171, 110), (171, 111), (171, 115), (171, 125), (171, 136), (171, 138), (172, 110), (172, 115), (172, 125), (172, 136), (172, 139), (172, 140), (172, 141), (172, 142), (172, 143), (172, 145), (173, 125), (173, 136), (173, 139), (173, 145), (173, 146),
(174, 116), (174, 125), (174, 136), (174, 138), (175, 125), (175, 136), (175, 138), (176, 125), (176, 136), (176, 138), (177, 124), (177, 126), (177, 136), (177, 137), (178, 124), (178, 126), (179, 124), (179, 126), )
coordinates_E1E1E1 = ((68, 119),
(69, 119), (69, 120), (70, 119), (70, 121), (70, 130), (71, 103), (71, 119), (71, 122), (71, 129), (71, 130), (72, 103), (72, 105), (72, 109), (72, 119), (72, 121), (72, 123), (72, 124), (72, 125), (72, 126), (72, 127), (72, 128), (72, 130), (73, 103), (73, 106), (73, 108), (73, 118), (73, 119), (73, 120), (73, 121), (73, 122), (73, 130), (74, 102), (74, 108), (74, 118), (74, 120), (74, 121), (74, 122), (74, 123), (74, 124), (74, 125), (74, 126), (74, 127), (74, 128), (74, 130), (74, 135), (75, 102), (75, 105), (75, 117), (75, 119), (75, 120), (75, 121), (75, 122), (75, 123), (75, 124), (75, 125), (75, 126), (75, 127), (75, 128), (75, 129), (75, 130), (75, 131), (75, 132), (75, 133), (75, 136), (75, 144), (76, 95), (76, 102), (76, 116), (76, 118), (76, 119), (76, 120), (76, 121),
(76, 122), (76, 123), (76, 124), (76, 125), (76, 126), (76, 127), (76, 128), (76, 129), (76, 130), (76, 136), (76, 143), (77, 87), (77, 88), (77, 95), (77, 96), (77, 101), (77, 102), (77, 117), (77, 118), (77, 119), (77, 120), (77, 121), (77, 122), (77, 123), (77, 124), (77, 125), (77, 126), (77, 127), (77, 128), (77, 129), (77, 130), (77, 131), (77, 132), (77, 133), (77, 134), (77, 135), (77, 137), (77, 144), (78, 87), (78, 89), (78, 96), (78, 98), (78, 99), (78, 101), (78, 115), (78, 117), (78, 118), (78, 119), (78, 120), (78, 121), (78, 122), (78, 123), (78, 124), (78, 125), (78, 126), (78, 127), (78, 128), (78, 129), (78, 130), (78, 131), (78, 132), (78, 133), (78, 134), (78, 135), (78, 136), (78, 139), (78, 140), (78, 142), (79, 87), (79, 90), (79, 96), (79, 101),
(79, 114), (79, 116), (79, 117), (79, 118), (79, 119), (79, 120), (79, 121), (79, 122), (79, 123), (79, 124), (79, 125), (79, 126), (79, 127), (79, 128), (79, 129), (79, 130), (79, 131), (79, 132), (79, 133), (79, 134), (79, 135), (79, 136), (79, 137), (79, 140), (80, 88), (80, 91), (80, 96), (80, 98), (80, 99), (80, 101), (80, 114), (80, 120), (80, 121), (80, 122), (80, 123), (80, 124), (80, 125), (80, 126), (80, 127), (80, 128), (80, 129), (80, 130), (80, 131), (80, 132), (80, 133), (80, 134), (80, 135), (80, 136), (80, 139), (81, 90), (81, 92), (81, 96), (81, 98), (81, 99), (81, 101), (81, 114), (81, 115), (81, 117), (81, 118), (81, 119), (81, 122), (81, 123), (81, 124), (81, 125), (81, 126), (81, 127), (81, 128), (81, 129), (81, 130), (81, 131), (81, 132), (81, 133),
(81, 134), (81, 135), (81, 137), (82, 91), (82, 93), (82, 94), (82, 95), (82, 96), (82, 97), (82, 98), (82, 99), (82, 101), (82, 121), (82, 122), (82, 123), (82, 124), (82, 125), (82, 126), (82, 127), (82, 128), (82, 129), (82, 130), (82, 131), (82, 132), (82, 133), (82, 134), (82, 136), (83, 92), (83, 96), (83, 97), (83, 98), (83, 99), (83, 100), (83, 102), (83, 122), (83, 124), (83, 125), (83, 126), (83, 127), (83, 128), (83, 129), (83, 130), (83, 131), (83, 132), (83, 133), (83, 135), (84, 92), (84, 94), (84, 95), (84, 96), (84, 97), (84, 98), (84, 99), (84, 100), (84, 101), (84, 103), (84, 122), (84, 124), (84, 125), (84, 126), (84, 127), (84, 128), (84, 129), (84, 130), (84, 131), (84, 132), (84, 133), (84, 135), (85, 92), (85, 94), (85, 95), (85, 96),
(85, 97), (85, 98), (85, 99), (85, 100), (85, 101), (85, 104), (85, 122), (85, 124), (85, 125), (85, 126), (85, 127), (85, 128), (85, 129), (85, 130), (85, 131), (85, 132), (85, 133), (85, 135), (85, 145), (85, 146), (86, 91), (86, 93), (86, 94), (86, 95), (86, 96), (86, 97), (86, 98), (86, 99), (86, 100), (86, 101), (86, 102), (86, 105), (86, 121), (86, 123), (86, 124), (86, 125), (86, 126), (86, 127), (86, 128), (86, 129), (86, 130), (86, 131), (86, 132), (86, 133), (86, 134), (86, 136), (86, 144), (87, 89), (87, 92), (87, 93), (87, 94), (87, 95), (87, 96), (87, 97), (87, 98), (87, 99), (87, 100), (87, 101), (87, 102), (87, 103), (87, 106), (87, 120), (87, 122), (87, 123), (87, 124), (87, 125), (87, 126), (87, 127), (87, 128), (87, 129), (87, 130), (87, 131),
(87, 132), (87, 133), (87, 134), (87, 135), (87, 137), (87, 143), (87, 145), (88, 89), (88, 91), (88, 92), (88, 93), (88, 94), (88, 95), (88, 96), (88, 97), (88, 98), (88, 99), (88, 100), (88, 101), (88, 102), (88, 103), (88, 104), (88, 120), (88, 122), (88, 123), (88, 124), (88, 125), (88, 126), (88, 127), (88, 128), (88, 129), (88, 130), (88, 131), (88, 132), (88, 133), (88, 134), (88, 135), (88, 136), (88, 139), (88, 140), (88, 141), (88, 142), (88, 144), (89, 90), (89, 92), (89, 93), (89, 94), (89, 95), (89, 96), (89, 97), (89, 98), (89, 99), (89, 100), (89, 101), (89, 102), (89, 103), (89, 104), (89, 105), (89, 109), (89, 119), (89, 121), (89, 122), (89, 123), (89, 124), (89, 125), (89, 126), (89, 127), (89, 128), (89, 129), (89, 130), (89, 131), (89, 135),
(89, 136), (89, 137), (89, 144), (90, 91), (90, 93), (90, 94), (90, 95), (90, 96), (90, 97), (90, 98), (90, 99), (90, 100), (90, 101), (90, 102), (90, 103), (90, 104), (90, 105), (90, 106), (90, 107), (90, 109), (90, 120), (90, 121), (90, 122), (90, 123), (90, 124), (90, 125), (90, 126), (90, 127), (90, 128), (90, 129), (90, 130), (90, 133), (90, 134), (90, 137), (90, 138), (90, 139), (90, 140), (90, 143), (91, 92), (91, 94), (91, 95), (91, 96), (91, 97), (91, 98), (91, 99), (91, 100), (91, 101), (91, 102), (91, 103), (91, 104), (91, 105), (91, 106), (91, 107), (91, 109), (91, 116), (91, 119), (91, 120), (91, 121), (91, 122), (91, 123), (91, 124), (91, 125), (91, 126), (91, 127), (91, 128), (91, 129), (91, 130), (91, 131), (91, 135), (91, 138), (91, 139), (91, 142),
(92, 92), (92, 94), (92, 95), (92, 96), (92, 97), (92, 98), (92, 99), (92, 100), (92, 101), (92, 102), (92, 103), (92, 104), (92, 105), (92, 106), (92, 107), (92, 108), (92, 109), (92, 110), (92, 111), (92, 112), (92, 113), (92, 115), (92, 118), (92, 119), (92, 120), (92, 121), (92, 122), (92, 123), (92, 124), (92, 125), (92, 126), (92, 127), (92, 128), (92, 130), (92, 137), (92, 140), (93, 76), (93, 91), (93, 93), (93, 94), (93, 95), (93, 96), (93, 97), (93, 98), (93, 99), (93, 100), (93, 101), (93, 102), (93, 103), (93, 104), (93, 105), (93, 106), (93, 107), (93, 108), (93, 109), (93, 116), (93, 117), (93, 118), (93, 119), (93, 120), (93, 121), (93, 122), (93, 123), (93, 124), (93, 125), (93, 126), (93, 127), (93, 128), (93, 130), (93, 138), (93, 139), (94, 77),
(94, 88), (94, 90), (94, 91), (94, 92), (94, 93), (94, 94), (94, 95), (94, 96), (94, 97), (94, 98), (94, 99), (94, 100), (94, 101), (94, 102), (94, 103), (94, 104), (94, 105), (94, 106), (94, 107), (94, 108), (94, 109), (94, 110), (94, 111), (94, 112), (94, 113), (94, 114), (94, 115), (94, 116), (94, 117), (94, 118), (94, 119), (94, 120), (94, 121), (94, 122), (94, 123), (94, 124), (94, 125), (94, 126), (94, 127), (94, 128), (94, 130), (94, 139), (95, 78), (95, 87), (95, 91), (95, 94), (95, 95), (95, 96), (95, 97), (95, 98), (95, 99), (95, 100), (95, 101), (95, 102), (95, 103), (95, 104), (95, 105), (95, 106), (95, 107), (95, 108), (95, 109), (95, 110), (95, 111), (95, 112), (95, 113), (95, 114), (95, 115), (95, 116), (95, 117), (95, 118), (95, 119), (95, 120),
(95, 121), (95, 122), (95, 123), (95, 124), (95, 125), (95, 126), (95, 127), (95, 128), (95, 130), (96, 78), (96, 79), (96, 87), (96, 89), (96, 90), (96, 92), (96, 93), (96, 94), (96, 95), (96, 96), (96, 97), (96, 98), (96, 99), (96, 100), (96, 101), (96, 102), (96, 103), (96, 104), (96, 105), (96, 106), (96, 107), (96, 108), (96, 109), (96, 110), (96, 111), (96, 112), (96, 113), (96, 114), (96, 115), (96, 116), (96, 117), (96, 118), (96, 119), (96, 120), (96, 121), (96, 122), (96, 123), (96, 124), (96, 125), (96, 130), (96, 131), (97, 79), (97, 80), (97, 86), (97, 88), (97, 89), (97, 91), (97, 94), (97, 96), (97, 97), (97, 98), (97, 99), (97, 100), (97, 101), (97, 102), (97, 103), (97, 104), (97, 105), (97, 106), (97, 107), (97, 108), (97, 109), (97, 110),
(97, 111), (97, 112), (97, 113), (97, 114), (97, 115), (97, 116), (97, 117), (97, 118), (97, 119), (97, 120), (97, 121), (97, 122), (97, 123), (97, 124), (97, 125), (97, 126), (97, 127), (97, 128), (97, 129), (97, 131), (98, 80), (98, 86), (98, 89), (98, 90), (98, 94), (98, 96), (98, 97), (98, 98), (98, 99), (98, 100), (98, 101), (98, 102), (98, 103), (98, 104), (98, 105), (98, 106), (98, 107), (98, 108), (98, 109), (98, 110), (98, 111), (98, 112), (98, 113), (98, 114), (98, 115), (98, 116), (98, 117), (98, 118), (98, 119), (98, 120), (98, 121), (98, 122), (98, 123), (98, 125), (98, 131), (99, 81), (99, 82), (99, 85), (99, 86), (99, 87), (99, 89), (99, 93), (99, 95), (99, 96), (99, 97), (99, 98), (99, 99), (99, 100), (99, 101), (99, 102), (99, 103), (99, 104),
(99, 105), (99, 106), (99, 107), (99, 108), (99, 109), (99, 110), (99, 111), (99, 112), (99, 113), (99, 114), (99, 115), (99, 116), (99, 117), (99, 118), (99, 119), (99, 120), (99, 121), (99, 122), (99, 123), (99, 125), (99, 131), (99, 132), (100, 81), (100, 83), (100, 86), (100, 87), (100, 88), (100, 89), (100, 90), (100, 91), (100, 92), (100, 94), (100, 95), (100, 96), (100, 97), (100, 98), (100, 99), (100, 100), (100, 101), (100, 102), (100, 103), (100, 104), (100, 105), (100, 106), (100, 107), (100, 108), (100, 109), (100, 110), (100, 111), (100, 112), (100, 113), (100, 114), (100, 115), (100, 116), (100, 117), (100, 118), (100, 119), (100, 120), (100, 121), (100, 125), (100, 132), (100, 133), (101, 82), (101, 85), (101, 86), (101, 87), (101, 88), (101, 89), (101, 90), (101, 93), (101, 94), (101, 95),
(101, 96), (101, 97), (101, 98), (101, 99), (101, 100), (101, 101), (101, 102), (101, 103), (101, 104), (101, 105), (101, 106), (101, 107), (101, 108), (101, 109), (101, 110), (101, 111), (101, 112), (101, 113), (101, 117), (101, 118), (101, 119), (101, 120), (101, 122), (101, 123), (101, 125), (101, 132), (101, 133), (102, 82), (102, 84), (102, 85), (102, 86), (102, 87), (102, 88), (102, 89), (102, 90), (102, 91), (102, 92), (102, 93), (102, 94), (102, 95), (102, 96), (102, 97), (102, 98), (102, 99), (102, 100), (102, 101), (102, 102), (102, 103), (102, 104), (102, 105), (102, 106), (102, 107), (102, 108), (102, 109), (102, 110), (102, 111), (102, 112), (102, 114), (102, 115), (102, 116), (102, 118), (102, 119), (102, 121), (102, 132), (102, 134), (103, 69), (103, 71), (103, 73), (103, 82), (103, 84), (103, 85), (103, 86),
(103, 87), (103, 88), (103, 89), (103, 90), (103, 91), (103, 92), (103, 93), (103, 94), (103, 95), (103, 96), (103, 97), (103, 98), (103, 99), (103, 100), (103, 101), (103, 102), (103, 103), (103, 104), (103, 105), (103, 106), (103, 107), (103, 108), (103, 109), (103, 110), (103, 111), (103, 113), (103, 120), (103, 132), (103, 135), (104, 70), (104, 74), (104, 75), (104, 76), (104, 82), (104, 84), (104, 85), (104, 86), (104, 87), (104, 88), (104, 89), (104, 90), (104, 91), (104, 97), (104, 98), (104, 99), (104, 100), (104, 101), (104, 102), (104, 103), (104, 104), (104, 105), (104, 106), (104, 107), (104, 108), (104, 109), (104, 110), (104, 112), (104, 118), (104, 120), (104, 132), (104, 134), (105, 72), (105, 73), (105, 74), (105, 77), (105, 78), (105, 79), (105, 80), (105, 81), (105, 82), (105, 83), (105, 84),
(105, 85), (105, 86), (105, 87), (105, 88), (105, 89), (105, 90), (105, 93), (105, 94), (105, 95), (105, 96), (105, 98), (105, 99), (105, 100), (105, 101), (105, 102), (105, 103), (105, 104), (105, 105), (105, 106), (105, 107), (105, 108), (105, 109), (105, 110), (105, 112), (105, 119), (105, 132), (106, 75), (106, 76), (106, 77), (106, 82), (106, 83), (106, 84), (106, 85), (106, 86), (106, 87), (106, 88), (106, 89), (106, 91), (106, 97), (106, 99), (106, 100), (106, 101), (106, 102), (106, 103), (106, 104), (106, 105), (106, 106), (106, 107), (106, 108), (106, 109), (106, 110), (106, 112), (107, 79), (107, 81), (107, 82), (107, 83), (107, 84), (107, 85), (107, 86), (107, 87), (107, 88), (107, 90), (107, 98), (107, 100), (107, 101), (107, 102), (107, 103), (107, 104), (107, 105), (107, 106), (107, 107), (107, 108),
(107, 109), (107, 110), (107, 111), (107, 113), (107, 131), (108, 81), (108, 83), (108, 84), (108, 85), (108, 86), (108, 87), (108, 89), (108, 99), (108, 101), (108, 102), (108, 103), (108, 104), (108, 105), (108, 106), (108, 107), (108, 108), (108, 109), (108, 110), (108, 111), (108, 113), (108, 131), (109, 81), (109, 83), (109, 84), (109, 85), (109, 86), (109, 88), (109, 99), (109, 101), (109, 102), (109, 103), (109, 108), (109, 109), (109, 110), (109, 111), (109, 112), (109, 114), (110, 81), (110, 83), (110, 84), (110, 85), (110, 87), (110, 98), (110, 101), (110, 102), (110, 105), (110, 106), (110, 114), (111, 81), (111, 83), (111, 84), (111, 85), (111, 87), (111, 98), (111, 101), (111, 103), (111, 108), (111, 110), (111, 111), (111, 112), (111, 113), (111, 115), (112, 81), (112, 83), (112, 84), (112, 86), (112, 101),
(112, 102), (112, 115), (112, 116), (113, 81), (113, 83), (113, 84), (113, 86), (113, 101), (113, 102), (113, 116), (113, 117), (114, 81), (114, 83), (114, 84), (114, 86), (114, 102), (114, 117), (115, 81), (115, 86), (115, 102), (116, 81), (116, 83), (116, 86), (116, 102), (117, 81), (117, 85), (117, 87), (117, 102), (118, 86), (118, 88), (119, 87), )
coordinates_771286 = ((149, 125),
(150, 126), (151, 125), (151, 126), (152, 125), (152, 126), )
coordinates_781286 = ((92, 127),
(92, 128), (93, 125), (93, 128), (94, 124), (94, 128), (95, 123), (95, 125), (95, 126), (95, 128), (96, 123), (96, 128), (97, 124), (97, 126), )
coordinates_EFE68C = ((156, 119),
(157, 118), (157, 120), (158, 118), (159, 118), (159, 119), (159, 120), (159, 123), (160, 119), (160, 121), (160, 124), (161, 119), (161, 121), (161, 122), (161, 123), (161, 125), (162, 118), (162, 120), (162, 121), (162, 122), (162, 123), (162, 124), (162, 126), (163, 117), (163, 119), (163, 120), (163, 121), (163, 126), (164, 116), (164, 118), (164, 119), (164, 120), (164, 121), (164, 122), (164, 123), (164, 125), (165, 116), (165, 118), (165, 119), (165, 121), (166, 116), (166, 118), (166, 120), (166, 128), (166, 129), (167, 116), (167, 118), (167, 119), (167, 121), (167, 127), (167, 129), (168, 116), (168, 118), (168, 119), (168, 121), (168, 127), (168, 129), (169, 117), (169, 119), (169, 120), (169, 122), (169, 127), (169, 129), (170, 117), (170, 119), (170, 120), (170, 122), (170, 127), (170, 129), (171, 117), (171, 119), (171, 120), (171, 122),
(171, 127), (171, 129), (172, 118), (172, 120), (172, 121), (172, 123), (172, 127), (172, 129), (173, 118), (173, 120), (173, 121), (173, 123), (173, 127), (173, 129), (174, 118), (174, 120), (174, 121), (174, 123), (174, 127), (174, 128), (175, 117), (175, 119), (175, 120), (175, 122), (175, 128), (176, 117), (176, 119), (176, 120), (176, 122), (177, 116), (177, 118), (177, 119), (177, 120), (177, 122), (178, 116), (178, 118), (178, 119), (178, 120), (178, 122), (179, 116), (179, 118), (179, 119), (179, 120), (179, 122), (180, 118), (180, 120), (180, 121), (180, 122), (181, 118), (181, 120), (181, 121), (181, 122), (181, 124), (181, 126), (182, 117), (182, 119), (182, 120), (182, 121), (183, 116), (183, 122), (183, 123), (183, 125), (184, 117), (184, 118), (184, 121), )
coordinates_31CD32 = ((162, 139),
(163, 140), (164, 140), (164, 144), (165, 141), (165, 145), (166, 137), (166, 138), (166, 143), (166, 146), (167, 137), (167, 139), (167, 143), (167, 145), (167, 147), (168, 137), (168, 141), (168, 144), (168, 145), (168, 146), (168, 148), (169, 131), (169, 138), (169, 140), (169, 143), (169, 147), (169, 149), (170, 131), (170, 133), (170, 144), (170, 145), (170, 148), (170, 150), (171, 131), (171, 134), (171, 147), (171, 149), (171, 151), (172, 131), (172, 134), (172, 147), (172, 149), (172, 151), (173, 131), (173, 134), (173, 148), (173, 151), (174, 131), (174, 134), (174, 141), (174, 143), (174, 148), (174, 151), (175, 130), (175, 132), (175, 134), (175, 140), (175, 144), (175, 145), (175, 146), (175, 147), (175, 148), (175, 149), (175, 151), (176, 130), (176, 132), (176, 134), (176, 140), (176, 142), (176, 143), (176, 145), (176, 150), (177, 129),
(177, 131), (177, 132), (177, 134), (177, 140), (177, 142), (177, 144), (177, 147), (177, 150), (178, 128), (178, 130), (178, 131), (178, 132), (178, 134), (178, 139), (178, 141), (178, 143), (179, 128), (179, 130), (179, 131), (179, 132), (179, 133), (179, 134), (179, 136), (179, 137), (179, 140), (179, 141), (179, 143), (180, 128), (180, 130), (180, 131), (180, 132), (180, 133), (180, 134), (180, 139), (180, 140), (180, 141), (180, 143), (181, 128), (181, 130), (181, 131), (181, 132), (181, 133), (181, 134), (181, 135), (181, 136), (181, 137), (181, 138), (181, 139), (181, 140), (181, 142), (182, 128), (182, 130), (182, 131), (182, 132), (182, 133), (182, 134), (182, 135), (182, 139), (182, 141), (183, 127), (183, 130), (183, 131), (183, 132), (183, 133), (183, 134), (183, 137), (183, 140), (184, 127), (184, 135), (184, 139), (185, 130), (185, 132),
(185, 134), )
coordinates_F0E68C = ((60, 126),
(60, 128), (60, 129), (60, 130), (61, 131), (61, 133), (62, 126), (62, 128), (62, 129), (62, 130), (62, 134), (63, 126), (63, 128), (63, 129), (63, 130), (63, 131), (63, 132), (63, 134), (64, 126), (64, 128), (64, 129), (64, 130), (64, 131), (64, 132), (64, 133), (64, 135), (65, 126), (65, 128), (65, 129), (65, 130), (65, 131), (65, 132), (65, 133), (65, 134), (65, 136), (65, 137), (65, 139), (66, 126), (66, 128), (66, 129), (66, 130), (66, 131), (66, 132), (66, 133), (66, 134), (66, 135), (66, 139), (67, 126), (67, 128), (67, 132), (67, 133), (67, 134), (67, 135), (67, 136), (67, 137), (67, 138), (67, 140), (68, 126), (68, 127), (68, 130), (68, 131), (68, 132), (68, 133), (68, 134), (68, 135), (68, 136), (68, 137), (68, 138), (68, 139), (68, 141), (69, 128), (69, 132), (69, 134),
(69, 135), (69, 136), (69, 137), (69, 138), (69, 139), (69, 141), (70, 126), (70, 128), (70, 132), (70, 134), (70, 135), (70, 136), (70, 137), (70, 138), (70, 139), (70, 141), (71, 132), (71, 137), (71, 138), (71, 140), (72, 132), (72, 134), (72, 135), (72, 138), (72, 140), (73, 132), (73, 133), (73, 137), (73, 139), (74, 138), (74, 139), (75, 138), )
coordinates_32CD32 = ((63, 140),
(64, 140), (64, 142), (64, 143), (64, 144), (64, 145), (65, 141), (65, 146), (65, 147), (65, 148), (66, 142), (66, 144), (66, 145), (66, 150), (67, 143), (67, 145), (67, 147), (67, 148), (67, 152), (68, 143), (68, 145), (68, 146), (68, 149), (68, 150), (69, 143), (69, 145), (69, 149), (69, 151), (69, 153), (70, 143), (70, 145), (70, 148), (70, 150), (70, 151), (70, 153), (71, 143), (71, 145), (71, 148), (71, 150), (71, 151), (71, 153), (72, 142), (72, 145), (72, 146), (72, 148), (72, 149), (72, 150), (72, 151), (72, 153), (73, 142), (73, 143), (73, 148), (73, 149), (73, 150), (73, 151), (73, 152), (73, 153), (73, 155), (74, 141), (74, 143), (74, 146), (74, 147), (74, 148), (74, 149), (74, 150), (74, 151), (74, 152), (74, 153), (74, 155), (75, 141), (75, 147), (75, 149), (75, 150),
(75, 151), (75, 152), (75, 153), (75, 155), (76, 140), (76, 147), (76, 149), (76, 150), (76, 151), (76, 152), (76, 153), (76, 155), (77, 146), (77, 147), (77, 148), (77, 149), (77, 154), (77, 155), (78, 145), (78, 150), (78, 151), (78, 152), (78, 154), (79, 144), (79, 147), (79, 149), (79, 154), (80, 142), (80, 144), (80, 145), (80, 146), (81, 140), (81, 142), (82, 139), (83, 138), (84, 137), )
coordinates_00FF7F = ((110, 69),
(110, 71), (111, 69), (111, 72), (111, 73), (111, 74), (112, 69), (112, 71), (112, 75), (113, 69), (113, 71), (113, 72), (113, 73), (113, 75), (114, 69), (114, 71), (114, 72), (114, 73), (114, 75), (115, 69), (115, 71), (115, 72), (115, 73), (115, 75), (116, 69), (116, 72), (116, 73), (116, 75), (117, 70), (117, 76), (118, 72), (118, 74), (118, 76), )
coordinates_CD5C5C = ((80, 152),
(81, 148), (81, 149), (81, 152), (82, 144), (82, 145), (82, 146), (82, 150), (82, 152), (83, 142), (83, 145), (83, 146), (83, 147), (83, 148), (83, 149), (83, 150), (83, 152), (84, 141), (84, 144), (84, 149), (84, 151), (85, 140), (85, 143), (85, 148), (85, 151), (86, 139), (86, 141), (86, 142), (86, 148), (86, 150), (87, 147), (87, 150), (88, 147), (88, 150), (89, 146), (89, 149), (90, 145), (90, 147), (90, 149), (91, 145), (91, 147), (91, 149), (92, 144), (92, 146), (92, 148), (93, 135), (93, 141), (93, 142), (93, 145), (93, 146), (93, 148), (94, 135), (94, 141), (94, 144), (94, 145), (94, 147), (95, 135), (95, 137), (95, 141), (95, 143), (95, 144), (95, 146), (96, 136), (96, 139), (96, 141), (96, 142), (96, 143), (96, 145), (97, 137), (97, 141), (97, 142), (97, 144), (98, 140),
(98, 141), (98, 143), (99, 138), (99, 141), (99, 143), (100, 139), (101, 142), )
coordinates_FEA501 = ((63, 107),
(63, 109), (63, 110), (63, 111), (63, 113), (64, 106), (64, 113), (65, 106), (65, 108), (65, 109), (65, 110), (65, 111), (65, 113), (66, 106), (66, 109), (66, 112), (66, 113), (67, 107), (67, 109), (67, 112), (67, 113), (68, 107), (68, 109), (68, 112), (69, 108), (69, 112), (70, 108), (70, 109), (70, 112), (71, 108), (71, 112), (72, 111), (72, 112), (73, 112), (74, 110), (74, 111), (75, 110), (75, 111), (76, 110), )
coordinates_DCF8A4 = ((88, 164),
(89, 159), (89, 163), (89, 164), (90, 159), (90, 164), (91, 159), (91, 161), (91, 163), (92, 159), (92, 163), (93, 159), (93, 161), (93, 163), (94, 159), (94, 161), (94, 163), (95, 158), (95, 160), (95, 161), (95, 162), (95, 163), (95, 164), (96, 158), (96, 160), (96, 161), (96, 162), (96, 164), (97, 158), (97, 160), (97, 161), (97, 162), (97, 164), (98, 158), (98, 160), (98, 161), (98, 162), (98, 164), (99, 158), (99, 160), (99, 161), (99, 162), (99, 164), (99, 169), (100, 157), (100, 159), (100, 160), (100, 161), (100, 162), (100, 163), (100, 164), (100, 165), (100, 168), (101, 157), (101, 159), (101, 160), (101, 161), (101, 162), (101, 163), (101, 164), (101, 167), (102, 152), (102, 155), (102, 158), (102, 159), (102, 160), (102, 161), (102, 162), (102, 163), (102, 164), (102, 166), (103, 152), (103, 154),
(103, 157), (103, 158), (103, 159), (103, 160), (103, 161), (103, 162), (103, 163), (103, 164), (103, 166), (104, 153), (104, 155), (104, 156), (104, 157), (104, 158), (104, 159), (104, 160), (104, 161), (104, 162), (104, 163), (104, 164), (104, 166), (105, 152), (105, 154), (105, 155), (105, 156), (105, 157), (105, 158), (105, 159), (105, 160), (105, 161), (105, 162), (105, 163), (105, 164), (105, 166), (106, 151), (106, 153), (106, 154), (106, 155), (106, 156), (106, 157), (106, 158), (106, 159), (106, 160), (106, 161), (106, 162), (106, 163), (106, 165), (107, 150), (107, 152), (107, 153), (107, 154), (107, 155), (107, 156), (107, 157), (107, 158), (107, 159), (107, 160), (107, 161), (107, 162), (107, 163), (107, 165), (108, 149), (108, 151), (108, 152), (108, 153), (108, 154), (108, 155), (108, 156), (108, 157), (108, 158), (108, 159), (108, 160),
(108, 161), (108, 162), (108, 164), (109, 149), (109, 151), (109, 152), (109, 153), (109, 154), (109, 155), (109, 156), (109, 157), (109, 158), (109, 159), (109, 160), (109, 161), (109, 162), (109, 164), (110, 149), (110, 151), (110, 152), (110, 153), (110, 154), (110, 155), (110, 156), (110, 157), (110, 158), (110, 159), (110, 160), (110, 161), (110, 162), (110, 164), (111, 149), (111, 151), (111, 152), (111, 153), (111, 154), (111, 155), (111, 156), (111, 157), (111, 158), (111, 159), (111, 160), (111, 161), (111, 162), (111, 164), (112, 149), (112, 151), (112, 152), (112, 153), (112, 154), (112, 155), (112, 156), (112, 157), (112, 158), (112, 159), (113, 149), (113, 151), (113, 152), (113, 153), (113, 154), (113, 155), (113, 156), (113, 157), (113, 160), (113, 161), (113, 162), (113, 163), (113, 164), (113, 166), (114, 148), (114, 149), (114, 151),
(114, 152), (114, 153), (114, 154), (114, 155), (114, 156), (114, 159), (114, 167), (115, 148), (115, 150), (115, 151), (115, 152), (115, 153), (115, 154), (115, 155), (115, 157), (115, 168), (116, 148), (116, 150), (116, 151), (116, 152), (116, 153), (116, 154), (116, 156), (116, 170), (117, 146), (117, 148), (117, 149), (117, 150), (117, 151), (117, 152), (117, 153), (117, 154), (117, 156), (118, 147), (118, 153), (118, 154), (118, 156), (119, 148), (119, 150), (119, 151), (119, 152), (119, 154), (119, 156), (120, 154), (120, 155), (121, 154), (121, 155), (122, 154), )
coordinates_DBF8A4 = ((125, 154),
(126, 152), (126, 155), (127, 146), (127, 148), (127, 149), (127, 150), (127, 151), (127, 154), (127, 156), (128, 145), (128, 152), (128, 153), (128, 154), (128, 156), (129, 144), (129, 147), (129, 148), (129, 149), (129, 150), (129, 151), (129, 152), (129, 153), (129, 154), (129, 156), (130, 145), (130, 146), (130, 148), (130, 149), (130, 150), (130, 151), (130, 152), (130, 153), (130, 154), (130, 155), (130, 157), (131, 147), (131, 149), (131, 150), (131, 151), (131, 152), (131, 153), (131, 154), (131, 155), (131, 156), (131, 158), (132, 148), (132, 150), (132, 151), (132, 152), (132, 153), (132, 154), (132, 155), (132, 156), (132, 157), (132, 159), (133, 149), (133, 151), (133, 152), (133, 153), (133, 154), (133, 155), (133, 156), (133, 157), (133, 158), (133, 161), (133, 162), (133, 163), (133, 164), (134, 149), (134, 151), (134, 152), (134, 153),
(134, 154), (134, 155), (134, 156), (134, 157), (134, 158), (134, 159), (134, 165), (135, 150), (135, 152), (135, 153), (135, 154), (135, 155), (135, 156), (135, 157), (135, 158), (135, 159), (135, 160), (135, 161), (135, 162), (135, 163), (135, 165), (136, 150), (136, 152), (136, 153), (136, 154), (136, 155), (136, 156), (136, 157), (136, 158), (136, 159), (136, 160), (136, 161), (136, 162), (136, 163), (136, 165), (137, 150), (137, 152), (137, 153), (137, 154), (137, 155), (137, 156), (137, 157), (137, 158), (137, 159), (137, 160), (137, 161), (137, 162), (137, 163), (137, 164), (137, 166), (138, 151), (138, 153), (138, 154), (138, 155), (138, 156), (138, 157), (138, 158), (138, 159), (138, 160), (138, 161), (138, 162), (138, 163), (138, 164), (138, 165), (138, 167), (139, 151), (139, 153), (139, 154), (139, 155), (139, 156), (139, 157), (139, 158),
(139, 159), (139, 160), (139, 161), (139, 162), (139, 163), (139, 164), (139, 165), (139, 167), (140, 151), (140, 153), (140, 154), (140, 155), (140, 156), (140, 157), (140, 158), (140, 159), (140, 160), (140, 161), (140, 162), (140, 163), (140, 164), (140, 165), (140, 167), (141, 151), (141, 153), (141, 154), (141, 155), (141, 156), (141, 157), (141, 158), (141, 159), (141, 160), (141, 161), (141, 162), (141, 163), (141, 164), (141, 165), (141, 167), (142, 152), (142, 154), (142, 155), (142, 156), (142, 157), (142, 158), (142, 159), (142, 160), (142, 161), (142, 162), (142, 163), (142, 164), (142, 165), (142, 167), (143, 152), (143, 155), (143, 156), (143, 157), (143, 158), (143, 159), (143, 160), (143, 161), (143, 162), (143, 163), (143, 164), (143, 165), (143, 167), (144, 152), (144, 153), (144, 154), (144, 157), (144, 158), (144, 159), (144, 160),
(144, 161), (144, 162), (144, 163), (144, 164), (144, 166), (145, 155), (145, 157), (145, 158), (145, 159), (145, 160), (145, 161), (145, 162), (145, 163), (145, 165), (146, 157), (146, 159), (146, 160), (146, 161), (146, 162), (146, 163), (146, 165), (147, 157), (147, 159), (147, 160), (147, 161), (147, 162), (147, 163), (147, 165), (148, 158), (148, 160), (148, 161), (148, 162), (148, 163), (148, 165), (149, 158), (149, 160), (149, 161), (149, 162), (149, 163), (149, 165), (150, 159), (150, 161), (150, 162), (150, 163), (150, 165), (151, 159), (151, 161), (151, 162), (151, 163), (151, 165), (152, 159), (152, 161), (152, 162), (152, 163), (152, 164), (152, 166), (153, 159), (153, 161), (153, 162), (153, 163), (153, 164), (153, 166), (154, 159), (154, 161), (154, 164), (155, 159), (155, 164), (156, 160), (156, 164), (157, 159), (157, 160), (157, 164),
(158, 159), (158, 164), (159, 159), (159, 165), (160, 165), (161, 165), (161, 166), )
coordinates_60CC60 = ((74, 159),
(74, 161), (74, 162), (74, 163), (74, 164), (74, 166), (75, 158), (75, 167), (76, 157), (76, 159), (76, 160), (76, 161), (76, 162), (76, 163), (76, 164), (76, 165), (76, 166), (76, 168), (77, 157), (77, 159), (77, 160), (77, 161), (77, 162), (77, 163), (77, 164), (77, 165), (77, 166), (77, 167), (77, 169), (78, 156), (78, 158), (78, 159), (78, 160), (78, 161), (78, 162), (78, 163), (78, 164), (78, 165), (78, 166), (78, 167), (78, 168), (78, 170), (79, 156), (79, 158), (79, 159), (79, 160), (79, 161), (79, 162), (79, 163), (79, 164), (79, 165), (79, 166), (79, 167), (79, 168), (79, 169), (79, 171), (80, 155), (80, 157), (80, 158), (80, 159), (80, 160), (80, 161), (80, 162), (80, 163), (80, 164), (80, 165), (80, 166), (80, 167), (80, 168), (80, 169), (80, 171), (81, 155), (81, 157),
(81, 158), (81, 159), (81, 160), (81, 161), (81, 162), (81, 163), (81, 164), (81, 165), (81, 166), (81, 167), (81, 168), (81, 169), (81, 170), (81, 172), (82, 154), (82, 156), (82, 157), (82, 158), (82, 159), (82, 160), (82, 161), (82, 162), (82, 163), (82, 164), (82, 165), (82, 166), (82, 167), (82, 168), (82, 169), (82, 170), (82, 171), (82, 172), (83, 154), (83, 156), (83, 157), (83, 158), (83, 159), (83, 160), (83, 161), (83, 162), (83, 163), (83, 164), (83, 165), (83, 166), (83, 167), (83, 168), (83, 169), (83, 170), (83, 171), (83, 173), (84, 153), (84, 154), (84, 155), (84, 156), (84, 157), (84, 158), (84, 159), (84, 160), (84, 161), (84, 162), (84, 163), (84, 164), (84, 165), (84, 166), (84, 167), (84, 168), (84, 169), (84, 170), (84, 171), (84, 172), (84, 174), (85, 153),
(85, 155), (85, 156), (85, 157), (85, 158), (85, 160), (85, 161), (85, 162), (85, 166), (85, 167), (85, 168), (85, 169), (85, 170), (85, 171), (85, 172), (85, 174), (86, 153), (86, 155), (86, 156), (86, 157), (86, 158), (86, 161), (86, 166), (86, 167), (86, 168), (86, 169), (86, 170), (86, 171), (86, 172), (86, 173), (86, 174), (86, 175), (87, 152), (87, 154), (87, 155), (87, 156), (87, 157), (87, 162), (87, 166), (87, 168), (87, 169), (87, 170), (87, 171), (87, 172), (87, 173), (87, 175), (88, 152), (88, 154), (88, 155), (88, 157), (88, 161), (88, 166), (88, 168), (88, 169), (88, 170), (88, 171), (88, 172), (88, 173), (88, 175), (89, 152), (89, 154), (89, 155), (89, 157), (89, 161), (89, 166), (89, 168), (89, 169), (89, 170), (89, 171), (89, 172), (89, 173), (89, 174), (89, 175),
(89, 176), (90, 151), (90, 153), (90, 154), (90, 155), (90, 157), (90, 166), (90, 168), (90, 169), (90, 170), (90, 171), (90, 172), (90, 173), (90, 174), (90, 175), (90, 176), (91, 151), (91, 153), (91, 154), (91, 155), (91, 157), (91, 166), (91, 168), (91, 169), (91, 170), (91, 171), (91, 172), (91, 173), (91, 174), (91, 176), (92, 150), (92, 151), (92, 152), (92, 153), (92, 154), (92, 155), (92, 157), (92, 165), (92, 167), (92, 168), (92, 169), (92, 170), (92, 171), (92, 172), (92, 173), (92, 174), (92, 176), (93, 150), (93, 152), (93, 153), (93, 154), (93, 155), (93, 156), (93, 157), (93, 165), (93, 167), (93, 168), (93, 169), (93, 170), (93, 171), (93, 172), (93, 173), (93, 174), (93, 176), (94, 150), (94, 152), (94, 153), (94, 154), (94, 156), (94, 165), (94, 166), (94, 167),
(94, 168), (94, 169), (94, 170), (94, 171), (94, 172), (94, 173), (94, 174), (94, 175), (94, 177), (95, 149), (95, 151), (95, 152), (95, 153), (95, 154), (95, 156), (95, 166), (95, 168), (95, 169), (95, 170), (95, 171), (95, 172), (95, 173), (95, 174), (95, 175), (95, 177), (96, 148), (96, 150), (96, 151), (96, 152), (96, 153), (96, 154), (96, 156), (96, 166), (96, 171), (96, 172), (96, 173), (96, 174), (96, 175), (96, 177), (97, 147), (97, 149), (97, 150), (97, 151), (97, 152), (97, 153), (97, 154), (97, 156), (97, 166), (97, 169), (97, 170), (97, 171), (97, 172), (97, 173), (97, 174), (97, 176), (98, 146), (98, 148), (98, 149), (98, 150), (98, 152), (98, 153), (98, 154), (98, 155), (98, 156), (98, 166), (98, 172), (98, 173), (98, 174), (98, 176), (99, 145), (99, 147), (99, 148),
(99, 149), (99, 150), (99, 151), (99, 153), (99, 155), (99, 166), (99, 172), (99, 174), (99, 176), (100, 145), (100, 147), (100, 148), (100, 149), (100, 152), (100, 155), (100, 171), (100, 173), (100, 174), (100, 176), (101, 144), (101, 146), (101, 147), (101, 148), (101, 149), (101, 150), (101, 153), (101, 154), (101, 170), (101, 172), (101, 173), (101, 174), (101, 176), (102, 145), (102, 146), (102, 147), (102, 148), (102, 150), (102, 169), (102, 171), (102, 172), (102, 173), (102, 174), (102, 176), (103, 143), (103, 145), (103, 146), (103, 147), (103, 148), (103, 150), (103, 168), (103, 170), (103, 171), (103, 172), (103, 173), (103, 174), (103, 176), (104, 142), (104, 144), (104, 145), (104, 146), (104, 147), (104, 148), (104, 150), (104, 168), (104, 170), (104, 171), (104, 172), (104, 173), (104, 174), (104, 175), (104, 177), (105, 142),
(105, 144), (105, 145), (105, 146), (105, 147), (105, 149), (105, 168), (105, 170), (105, 171), (105, 172), (105, 173), (105, 174), (105, 175), (105, 177), (106, 141), (106, 143), (106, 144), (106, 145), (106, 146), (106, 148), (106, 168), (106, 170), (106, 171), (106, 172), (106, 173), (106, 174), (106, 176), (107, 140), (107, 142), (107, 143), (107, 144), (107, 145), (107, 147), (107, 167), (107, 169), (107, 170), (107, 171), (107, 172), (107, 173), (107, 175), (107, 176), (108, 139), (108, 141), (108, 142), (108, 143), (108, 144), (108, 145), (108, 147), (108, 166), (108, 168), (108, 169), (108, 170), (108, 171), (108, 172), (108, 173), (108, 175), (109, 137), (109, 140), (109, 141), (109, 142), (109, 143), (109, 144), (109, 145), (109, 147), (109, 166), (109, 168), (109, 169), (109, 170), (109, 171), (109, 172), (109, 173), (109, 174), (109, 176),
(110, 136), (110, 139), (110, 140), (110, 141), (110, 142), (110, 143), (110, 144), (110, 145), (110, 147), (110, 166), (110, 168), (110, 169), (110, 170), (110, 171), (110, 172), (110, 173), (110, 174), (110, 175), (110, 177), (111, 135), (111, 137), (111, 138), (111, 139), (111, 140), (111, 141), (111, 142), (111, 143), (111, 144), (111, 145), (111, 147), (111, 167), (111, 169), (111, 170), (111, 171), (111, 172), (111, 173), (111, 174), (111, 175), (111, 176), (111, 178), (112, 133), (112, 136), (112, 137), (112, 138), (112, 139), (112, 140), (112, 141), (112, 142), (112, 143), (112, 144), (112, 145), (112, 147), (112, 168), (112, 170), (112, 171), (112, 172), (112, 173), (112, 174), (112, 175), (112, 176), (112, 178), (113, 142), (113, 143), (113, 144), (113, 145), (113, 146), (113, 147), (113, 169), (113, 172), (113, 173), (113, 174), (113, 175),
(113, 176), (113, 178), (114, 133), (114, 135), (114, 136), (114, 137), (114, 138), (114, 139), (114, 140), (114, 144), (114, 146), (114, 170), (114, 172), (114, 173), (114, 174), (114, 175), (114, 176), (114, 177), (114, 179), (115, 142), (115, 143), (115, 146), (115, 160), (115, 161), (115, 162), (115, 163), (115, 165), (115, 173), (115, 174), (115, 175), (115, 176), (115, 177), (115, 179), (116, 145), (116, 159), (116, 166), (116, 173), (116, 175), (116, 176), (116, 177), (116, 179), (117, 158), (117, 162), (117, 163), (117, 164), (117, 165), (117, 168), (117, 171), (117, 172), (117, 173), (117, 174), (117, 175), (117, 176), (117, 177), (117, 178), (117, 180), (118, 158), (118, 160), (118, 161), (118, 165), (118, 166), (118, 167), (118, 170), (118, 171), (118, 172), (118, 173), (118, 174), (118, 175), (118, 176), (118, 177), (118, 178), (118, 179),
(118, 181), (119, 162), (119, 163), (119, 164), (119, 167), (119, 168), (119, 171), (119, 172), (119, 173), (119, 174), (119, 175), (119, 176), (119, 177), (119, 178), (119, 179), (119, 180), (119, 182), (120, 165), (120, 168), (120, 169), (120, 170), (120, 171), (120, 172), (120, 173), (120, 174), (120, 175), (120, 176), (120, 177), (120, 178), (120, 179), (120, 181), (121, 167), (121, 170), (121, 171), (121, 172), (121, 173), (121, 174), (121, 175), (121, 176), (121, 177), (121, 178), (122, 168), (122, 180), (123, 169), (123, 170), (123, 171), (123, 172), (123, 173), (123, 174), (123, 175), (123, 176), (123, 177), (123, 179), )
coordinates_5FCC60 = ((126, 168),
(126, 169), (126, 170), (126, 171), (126, 172), (126, 173), (126, 174), (126, 175), (126, 176), (126, 177), (126, 178), (126, 179), (126, 181), (127, 167), (127, 182), (128, 163), (128, 164), (128, 165), (128, 166), (128, 169), (128, 170), (128, 171), (128, 172), (128, 173), (128, 174), (128, 175), (128, 176), (128, 177), (128, 178), (128, 179), (128, 180), (128, 181), (128, 183), (129, 159), (129, 161), (129, 167), (129, 168), (129, 169), (129, 170), (129, 171), (129, 172), (129, 173), (129, 174), (129, 175), (129, 176), (129, 177), (129, 178), (129, 179), (129, 180), (129, 181), (129, 183), (130, 140), (130, 159), (130, 166), (130, 167), (130, 168), (130, 169), (130, 170), (130, 171), (130, 172), (130, 173), (130, 174), (130, 175), (130, 176), (130, 177), (130, 178), (130, 179), (130, 180), (130, 181), (130, 183), (131, 134), (131, 136), (131, 137),
(131, 138), (131, 139), (131, 140), (131, 141), (131, 142), (131, 161), (131, 163), (131, 164), (131, 165), (131, 166), (131, 167), (131, 168), (131, 169), (131, 170), (131, 171), (131, 172), (131, 173), (131, 174), (131, 175), (131, 176), (131, 177), (131, 178), (131, 179), (131, 181), (132, 135), (132, 140), (132, 143), (132, 144), (132, 145), (132, 168), (132, 169), (132, 170), (132, 171), (132, 172), (132, 173), (132, 174), (132, 175), (132, 176), (132, 177), (132, 178), (132, 180), (133, 136), (133, 138), (133, 139), (133, 140), (133, 141), (133, 142), (133, 147), (133, 168), (133, 170), (133, 171), (133, 172), (133, 173), (133, 174), (133, 175), (133, 176), (133, 177), (133, 178), (133, 180), (134, 137), (134, 139), (134, 140), (134, 141), (134, 142), (134, 143), (134, 144), (134, 145), (134, 147), (134, 168), (134, 170), (134, 171), (134, 172),
(134, 173), (134, 174), (134, 175), (134, 176), (134, 177), (134, 178), (134, 180), (135, 138), (135, 140), (135, 141), (135, 142), (135, 143), (135, 144), (135, 145), (135, 147), (135, 167), (135, 169), (135, 170), (135, 171), (135, 172), (135, 173), (135, 174), (135, 175), (135, 176), (135, 177), (135, 178), (135, 180), (136, 139), (136, 141), (136, 142), (136, 143), (136, 144), (136, 145), (136, 146), (136, 148), (136, 168), (136, 170), (136, 171), (136, 172), (136, 173), (136, 174), (136, 175), (136, 176), (136, 177), (136, 179), (137, 140), (137, 141), (137, 142), (137, 143), (137, 144), (137, 145), (137, 146), (137, 148), (137, 169), (137, 171), (137, 172), (137, 173), (137, 174), (137, 175), (137, 176), (137, 177), (137, 179), (138, 140), (138, 142), (138, 143), (138, 144), (138, 145), (138, 146), (138, 148), (138, 170), (138, 172), (138, 173),
(138, 174), (138, 175), (138, 176), (138, 178), (139, 141), (139, 143), (139, 144), (139, 145), (139, 146), (139, 147), (139, 149), (139, 170), (139, 172), (139, 173), (139, 174), (139, 175), (139, 176), (139, 178), (140, 141), (140, 143), (140, 144), (140, 145), (140, 146), (140, 147), (140, 149), (140, 169), (140, 171), (140, 172), (140, 173), (140, 174), (140, 175), (140, 177), (141, 142), (141, 144), (141, 145), (141, 146), (141, 147), (141, 149), (141, 169), (141, 171), (141, 172), (141, 173), (141, 174), (141, 175), (141, 177), (142, 142), (142, 144), (142, 145), (142, 146), (142, 147), (142, 149), (142, 169), (142, 171), (142, 172), (142, 173), (142, 174), (142, 175), (142, 177), (143, 143), (143, 145), (143, 146), (143, 147), (143, 148), (143, 150), (143, 169), (143, 171), (143, 172), (143, 173), (143, 174), (143, 175), (143, 177), (144, 143),
(144, 145), (144, 146), (144, 147), (144, 148), (144, 150), (144, 169), (144, 171), (144, 172), (144, 173), (144, 174), (144, 175), (144, 177), (145, 144), (145, 146), (145, 147), (145, 148), (145, 149), (145, 151), (145, 168), (145, 170), (145, 171), (145, 172), (145, 173), (145, 174), (145, 175), (145, 177), (146, 144), (146, 146), (146, 147), (146, 148), (146, 149), (146, 150), (146, 153), (146, 167), (146, 169), (146, 170), (146, 171), (146, 172), (146, 173), (146, 174), (146, 175), (146, 177), (147, 145), (147, 147), (147, 148), (147, 149), (147, 150), (147, 151), (147, 155), (147, 167), (147, 169), (147, 170), (147, 171), (147, 172), (147, 173), (147, 174), (147, 175), (147, 177), (148, 145), (148, 147), (148, 148), (148, 149), (148, 150), (148, 151), (148, 152), (148, 153), (148, 156), (148, 167), (148, 169), (148, 170), (148, 171), (148, 172),
(148, 173), (148, 174), (148, 175), (148, 177), (149, 146), (149, 148), (149, 149), (149, 150), (149, 151), (149, 152), (149, 153), (149, 154), (149, 156), (149, 167), (149, 169), (149, 170), (149, 171), (149, 172), (149, 173), (149, 174), (149, 175), (149, 177), (150, 146), (150, 148), (150, 149), (150, 150), (150, 151), (150, 152), (150, 153), (150, 154), (150, 155), (150, 157), (150, 167), (150, 169), (150, 170), (150, 171), (150, 172), (150, 173), (150, 174), (150, 175), (150, 177), (151, 147), (151, 149), (151, 150), (151, 151), (151, 152), (151, 153), (151, 154), (151, 155), (151, 157), (151, 167), (151, 169), (151, 170), (151, 171), (151, 172), (151, 173), (151, 174), (151, 175), (151, 177), (152, 147), (152, 149), (152, 150), (152, 151), (152, 152), (152, 153), (152, 154), (152, 155), (152, 157), (152, 168), (152, 170), (152, 171), (152, 172),
(152, 173), (152, 174), (152, 175), (152, 177), (153, 148), (153, 150), (153, 151), (153, 152), (153, 153), (153, 154), (153, 156), (153, 169), (153, 171), (153, 172), (153, 173), (153, 174), (153, 175), (153, 177), (154, 148), (154, 150), (154, 151), (154, 152), (154, 153), (154, 154), (154, 155), (154, 157), (154, 169), (154, 170), (154, 171), (154, 172), (154, 173), (154, 174), (154, 175), (154, 177), (155, 149), (155, 151), (155, 152), (155, 153), (155, 154), (155, 155), (155, 157), (155, 166), (155, 168), (155, 169), (155, 170), (155, 171), (155, 172), (155, 173), (155, 174), (155, 176), (156, 149), (156, 151), (156, 152), (156, 153), (156, 154), (156, 155), (156, 157), (156, 166), (156, 168), (156, 169), (156, 170), (156, 171), (156, 172), (156, 173), (156, 174), (156, 176), (157, 150), (157, 152), (157, 153), (157, 154), (157, 155), (157, 157),
(157, 166), (157, 167), (157, 168), (157, 169), (157, 170), (157, 171), (157, 172), (157, 173), (157, 174), (157, 176), (158, 150), (158, 152), (158, 153), (158, 154), (158, 155), (158, 157), (158, 162), (158, 167), (158, 169), (158, 170), (158, 171), (158, 172), (158, 173), (158, 174), (158, 176), (159, 151), (159, 153), (159, 154), (159, 156), (159, 161), (159, 167), (159, 169), (159, 170), (159, 171), (159, 172), (159, 173), (159, 174), (159, 176), (160, 151), (160, 153), (160, 154), (160, 156), (160, 161), (160, 163), (160, 168), (160, 170), (160, 171), (160, 172), (160, 173), (160, 174), (160, 176), (161, 152), (161, 154), (161, 156), (161, 157), (161, 160), (161, 163), (161, 168), (161, 170), (161, 171), (161, 172), (161, 173), (161, 174), (161, 176), (162, 152), (162, 154), (162, 155), (162, 157), (162, 160), (162, 163), (162, 168), (162, 170),
(162, 171), (162, 172), (162, 173), (162, 175), (163, 153), (163, 155), (163, 157), (163, 160), (163, 162), (163, 163), (163, 164), (163, 167), (163, 168), (163, 169), (163, 170), (163, 171), (163, 172), (163, 174), (164, 153), (164, 155), (164, 156), (164, 157), (164, 159), (164, 160), (164, 161), (164, 162), (164, 163), (164, 166), (164, 168), (164, 169), (164, 170), (164, 171), (164, 172), (164, 174), (165, 153), (165, 155), (165, 156), (165, 157), (165, 160), (165, 161), (165, 162), (165, 163), (165, 164), (165, 165), (165, 167), (165, 168), (165, 169), (165, 170), (165, 171), (165, 172), (165, 174), (166, 154), (166, 156), (166, 157), (166, 158), (166, 159), (166, 160), (166, 161), (166, 162), (166, 163), (166, 164), (166, 165), (166, 166), (166, 167), (166, 168), (166, 169), (166, 170), (166, 174), (167, 154), (167, 156), (167, 157), (167, 158),
(167, 159), (167, 160), (167, 161), (167, 162), (167, 163), (167, 164), (167, 165), (167, 166), (167, 167), (167, 168), (167, 169), (167, 173), (168, 154), (168, 156), (168, 157), (168, 158), (168, 159), (168, 160), (168, 161), (168, 162), (168, 163), (168, 164), (168, 165), (168, 166), (168, 167), (168, 168), (168, 170), (169, 154), (169, 156), (169, 157), (169, 158), (169, 159), (169, 160), (169, 161), (169, 162), (169, 163), (169, 164), (169, 165), (169, 166), (169, 167), (169, 169), (170, 157), (170, 158), (170, 159), (170, 160), (170, 161), (170, 162), (170, 163), (170, 164), (170, 165), (170, 166), (170, 168), (171, 155), (171, 158), (171, 159), (171, 160), (171, 161), (171, 162), (171, 163), (171, 164), (171, 167), (172, 157), (172, 166), (173, 158), (173, 161), (173, 162), (173, 164), )
coordinates_F4DEB3 = ((131, 66),
(131, 67), (132, 66), (132, 68), (133, 66), (133, 68), (134, 65), (134, 67), (134, 70), (134, 71), (134, 72), (134, 74), (135, 65), (135, 68), (135, 74), (136, 66), (136, 67), (136, 68), (136, 69), (136, 70), (136, 71), (136, 72), (136, 73), (136, 75), (137, 68), (137, 70), (137, 71), (137, 75), (138, 68), (138, 70), (138, 72), (138, 74), (139, 68), (139, 71), (140, 67), (140, 70), (140, 80), (140, 82), (140, 83), (140, 84), (140, 85), (140, 86), (140, 87), (140, 88), (141, 67), (141, 69), (141, 80), (141, 89), (141, 90), (142, 66), (142, 69), (142, 79), (142, 82), (142, 83), (142, 84), (142, 85), (142, 86), (142, 87), (142, 88), (142, 90), (143, 67), (143, 70), (143, 81), (143, 82), (144, 67), (144, 69), (144, 71), (144, 78), (144, 79), (144, 82), (144, 84), (144, 85), (144, 86),
(144, 87), (144, 89), (145, 67), (145, 69), (145, 70), (145, 72), (145, 77), (145, 78), (145, 81), (145, 82), (146, 68), (146, 70), (146, 72), (146, 77), (147, 68), (147, 70), (147, 71), (147, 72), (147, 77), (148, 69), (148, 71), (148, 72), (148, 74), (148, 77), (149, 69), (149, 71), (149, 72), (149, 73), (149, 76), (150, 70), (150, 75), (151, 70), (151, 73), (151, 74), )
coordinates_FE00FF = ((99, 127),
(100, 127), (101, 127), (101, 128), (102, 127), (102, 128), (103, 122), (103, 124), (103, 128), (104, 115), (104, 116), (104, 122), (104, 128), (105, 114), (105, 117), (105, 122), (105, 124), (105, 125), (105, 126), (105, 128), (106, 115), (106, 117), (106, 122), (106, 124), (106, 125), (106, 126), (106, 128), (107, 115), (107, 118), (107, 121), (107, 123), (107, 124), (107, 125), (107, 127), (108, 117), (108, 120), (108, 121), (108, 122), (108, 123), (108, 124), (108, 125), (108, 127), (109, 116), (109, 118), (109, 121), (109, 122), (109, 123), (109, 124), (109, 126), (110, 117), (110, 119), (110, 120), (110, 121), (110, 122), (110, 123), (110, 124), (110, 126), (111, 118), (111, 120), (111, 121), (111, 122), (111, 123), (111, 125), (112, 105), (112, 106), (112, 119), (112, 121), (112, 122), (112, 123), (112, 125), (113, 99), (113, 104), (113, 108),
(113, 109), (113, 110), (113, 111), (113, 113), (113, 120), (113, 122), (113, 123), (113, 125), (114, 99), (114, 104), (114, 106), (114, 114), (114, 119), (114, 120), (114, 121), (114, 122), (114, 124), (115, 98), (115, 104), (115, 106), (115, 107), (115, 108), (115, 109), (115, 110), (115, 111), (115, 112), (115, 113), (115, 115), (115, 118), (115, 119), (115, 120), (115, 121), (115, 122), (115, 124), (116, 98), (116, 100), (116, 104), (116, 105), (116, 106), (116, 107), (116, 108), (116, 109), (116, 110), (116, 111), (116, 112), (116, 113), (116, 114), (116, 117), (116, 119), (116, 120), (116, 121), (116, 124), (117, 97), (117, 100), (117, 104), (117, 106), (117, 107), (117, 108), (117, 109), (117, 110), (117, 111), (117, 112), (117, 113), (117, 114), (117, 115), (117, 123), (118, 97), (118, 100), (118, 104), (118, 106), (118, 107), (118, 108),
(118, 109), (118, 110), (118, 111), (118, 112), (118, 113), (118, 118), (118, 119), (118, 121), (119, 97), (119, 99), (119, 100), (119, 101), (119, 102), (119, 104), (119, 105), (119, 106), (119, 107), (119, 108), (119, 109), (119, 110), (119, 111), (119, 112), (119, 114), (119, 115), (119, 116), (119, 117), (120, 96), (121, 96), (121, 97), (121, 98), (121, 99), (121, 100), (121, 101), (121, 102), (121, 103), (121, 104), (121, 105), (121, 106), (121, 107), (121, 108), (121, 109), (121, 110), (121, 112), (122, 104), (122, 107), (122, 112), )
coordinates_26408B = ((124, 73),
(124, 75), (124, 76), (124, 77), (124, 78), (124, 81), (124, 85), (124, 87), (124, 89), (124, 90), (124, 94), (124, 96), (124, 97), (124, 98), (124, 99), (124, 100), (124, 101), (124, 102), (124, 103), (124, 104), (124, 107), (124, 108), (124, 109), (124, 110), (124, 111), (124, 112), (124, 113), (124, 114), (124, 116), (125, 71), (125, 79), (125, 82), (125, 83), (125, 84), (125, 90), (125, 94), (125, 105), (125, 106), (125, 118), (126, 70), (126, 73), (126, 74), (126, 75), (126, 76), (126, 77), (126, 78), (126, 85), (126, 86), (126, 87), (126, 88), (126, 89), (126, 90), (126, 91), (126, 92), (126, 93), (126, 94), (126, 95), (126, 96), (126, 97), (126, 98), (126, 99), (126, 100), (126, 101), (126, 102), (126, 103), (126, 104), (126, 105), (126, 106), (126, 107), (126, 108), (126, 109), (126, 110), (126, 111),
(126, 112), (126, 113), (126, 114), (126, 115), (126, 120), (127, 69), (127, 71), (127, 72), (127, 73), (127, 74), (127, 75), (127, 76), (127, 78), (127, 81), (127, 84), (127, 88), (127, 90), (127, 93), (127, 94), (127, 95), (127, 96), (127, 99), (127, 100), (127, 101), (127, 102), (127, 103), (127, 104), (127, 105), (127, 106), (127, 107), (127, 108), (127, 109), (127, 110), (127, 111), (127, 112), (127, 113), (127, 114), (127, 117), (127, 122), (127, 124), (128, 68), (128, 70), (128, 71), (128, 72), (128, 73), (128, 74), (128, 75), (128, 76), (128, 78), (128, 88), (128, 90), (128, 91), (128, 92), (128, 93), (128, 94), (128, 95), (128, 97), (128, 99), (128, 100), (128, 101), (128, 102), (128, 103), (128, 104), (128, 105), (128, 106), (128, 107), (128, 108), (128, 109), (128, 110), (128, 115), (128, 118), (128, 119),
(128, 120), (128, 124), (129, 68), (129, 69), (129, 72), (129, 73), (129, 74), (129, 75), (129, 76), (129, 77), (129, 79), (129, 88), (129, 90), (129, 91), (129, 92), (129, 93), (129, 94), (129, 95), (129, 99), (129, 101), (129, 102), (129, 105), (129, 106), (129, 107), (129, 108), (129, 111), (129, 112), (129, 114), (129, 121), (129, 124), (130, 71), (130, 76), (130, 77), (130, 78), (130, 80), (130, 89), (130, 91), (130, 92), (130, 93), (130, 95), (130, 99), (130, 103), (130, 104), (130, 110), (130, 122), (130, 124), (131, 72), (131, 74), (131, 75), (131, 78), (131, 80), (131, 89), (131, 91), (131, 92), (131, 94), (131, 99), (131, 105), (131, 108), (131, 121), (131, 124), (132, 76), (132, 80), (132, 89), (132, 91), (132, 92), (132, 94), (132, 100), (132, 101), (132, 121), (132, 124), (133, 78), (133, 80),
(133, 90), (133, 92), (133, 94), (133, 116), (133, 117), (133, 118), (133, 119), (133, 121), (133, 122), (133, 124), (134, 91), (134, 93), (134, 114), (134, 121), (134, 122), (134, 124), (135, 92), (135, 93), (135, 112), (135, 116), (135, 117), (135, 118), (135, 119), (135, 120), (135, 121), (135, 122), (135, 124), (136, 111), (136, 114), (136, 115), (136, 116), (136, 117), (136, 118), (136, 119), (136, 120), (136, 121), (136, 122), (136, 124), (137, 111), (137, 113), (137, 114), (137, 115), (137, 116), (137, 117), (137, 118), (137, 119), (137, 120), (137, 121), (137, 122), (137, 124), (138, 110), (138, 112), (138, 113), (138, 114), (138, 115), (138, 116), (138, 117), (138, 118), (138, 119), (138, 124), (139, 110), (139, 112), (139, 113), (139, 114), (139, 115), (139, 116), (139, 117), (139, 118), (139, 121), (140, 110), (140, 112), (140, 113),
(140, 114), (140, 115), (140, 116), (140, 117), (140, 119), (141, 110), (141, 112), (141, 113), (141, 114), (141, 115), (141, 116), (141, 118), (142, 111), (142, 113), (142, 114), (142, 115), (142, 117), (143, 111), (143, 113), (143, 114), (143, 117), (144, 111), (144, 116), (145, 112), (145, 114), )
coordinates_F5DEB3 = ((88, 75),
(88, 76), (89, 74), (90, 73), (90, 79), (91, 72), (91, 75), (91, 76), (91, 77), (91, 80), (92, 72), (92, 74), (92, 78), (92, 81), (92, 82), (92, 83), (92, 84), (92, 85), (92, 86), (92, 87), (92, 89), (93, 72), (93, 74), (93, 79), (93, 86), (94, 72), (94, 74), (94, 80), (94, 82), (94, 83), (94, 85), (95, 72), (95, 75), (95, 83), (95, 85), (96, 72), (96, 74), (96, 76), (96, 81), (96, 84), (97, 71), (97, 73), (97, 74), (97, 75), (97, 77), (97, 82), (97, 84), (98, 69), (98, 72), (98, 73), (98, 74), (98, 75), (98, 76), (98, 78), (98, 92), (99, 68), (99, 71), (99, 72), (99, 73), (99, 74), (99, 75), (99, 76), (99, 78), (100, 67), (100, 75), (100, 76), (100, 77), (100, 79), (101, 66), (101, 68), (101, 69), (101, 70), (101, 71),
(101, 72), (101, 73), (101, 74), (101, 78), (101, 80), (102, 66), (102, 68), (102, 75), (102, 76), (102, 80), (103, 66), (103, 67), (103, 78), (103, 80), (104, 66), (104, 67), (105, 66), (105, 68), (106, 67), (106, 70), (107, 67), (107, 71), (107, 72), (107, 73), (108, 68), (108, 70), (108, 71), (108, 75), (108, 76), (108, 77), (109, 73), (109, 79), (110, 75), (110, 77), (110, 79), (111, 76), (111, 79), (112, 77), (112, 79), (113, 77), (113, 79), (114, 77), (114, 79), (115, 77), (115, 79), (116, 78), (116, 79), (117, 78), (117, 79), )
coordinates_016400 = ((128, 126),
(129, 126), (129, 128), (130, 129), (131, 127), (131, 132), (132, 128), (132, 133), (133, 130), (133, 133), (133, 134), (134, 132), (134, 135), (135, 133), (135, 136), (136, 134), (137, 134), (137, 137), (138, 135), (138, 138), (139, 135), (139, 138), (140, 135), (140, 137), (140, 139), (141, 137), (141, 139), (142, 137), (142, 140), (143, 137), (144, 135), (144, 137), (144, 138), (144, 140), (145, 134), (145, 135), (146, 133), (146, 134), (147, 132), (147, 133), (148, 132), (148, 133), (149, 131), (149, 132), (150, 131), (150, 132), (151, 131), (151, 132), (152, 131), (153, 131), )
coordinates_B8EDC2 = ((115, 130),
(115, 131), (116, 129), (116, 133), (116, 134), (116, 135), (116, 136), (116, 137), (116, 138), (116, 140), (117, 127), (117, 130), (117, 131), (117, 141), (117, 142), (118, 127), (118, 129), (118, 130), (118, 131), (118, 132), (118, 133), (118, 134), (118, 135), (118, 136), (118, 137), (118, 138), (118, 139), (118, 140), (118, 144), (119, 127), (119, 129), (119, 130), (119, 131), (119, 132), (119, 133), (119, 134), (119, 135), (119, 136), (119, 137), (119, 138), (119, 139), (119, 140), (119, 141), (119, 142), (119, 145), (120, 126), (120, 128), (120, 129), (120, 130), (120, 131), (120, 132), (120, 133), (120, 134), (120, 135), (120, 136), (120, 137), (120, 138), (120, 139), (120, 140), (120, 141), (120, 142), (120, 143), (120, 144), (120, 146), (121, 125), (121, 127), (121, 128), (121, 129), (121, 130), (121, 131), (121, 132), (121, 133), (121, 134),
(121, 135), (121, 136), (121, 137), (121, 138), (121, 139), (121, 140), (121, 141), (121, 142), (121, 143), (121, 144), (121, 145), (122, 125), (122, 127), (122, 128), (122, 129), (122, 130), (122, 131), (122, 132), (122, 133), (122, 134), (122, 135), (122, 136), (122, 137), (122, 138), (122, 139), (122, 140), (122, 141), (122, 142), (122, 143), (122, 144), (122, 146), (123, 124), (123, 126), (123, 127), (123, 128), (123, 129), (123, 130), (123, 131), (123, 132), (123, 133), (123, 134), (123, 135), (123, 136), (123, 137), (123, 138), (123, 139), (123, 140), (123, 141), (123, 142), (123, 143), (123, 144), (123, 146), (124, 125), (124, 127), (124, 128), (124, 129), (124, 130), (124, 131), (124, 132), (124, 133), (124, 134), (124, 135), (124, 136), (124, 137), (124, 138), (124, 139), (124, 140), (124, 141), (124, 142), (124, 143), (124, 144), (124, 147),
(125, 126), (125, 128), (125, 129), (125, 130), (125, 131), (125, 132), (125, 133), (125, 134), (125, 135), (125, 136), (125, 137), (125, 138), (125, 139), (125, 140), (125, 141), (125, 142), (125, 143), (125, 146), (126, 126), (126, 130), (126, 131), (126, 132), (126, 133), (126, 134), (126, 135), (126, 136), (126, 137), (126, 138), (126, 139), (126, 140), (126, 141), (126, 142), (126, 145), (127, 128), (127, 131), (127, 132), (127, 133), (127, 134), (127, 135), (127, 136), (127, 137), (127, 143), (128, 130), (128, 138), (128, 139), (128, 140), (128, 142), (129, 131), (129, 133), (129, 134), (129, 135), (129, 136), (129, 137), )
coordinates_CC5B45 = ((146, 85),
(146, 86), (146, 87), (146, 89), (147, 81), (147, 83), (147, 84), (147, 90), (148, 81), (148, 85), (148, 86), (148, 87), (148, 88), (148, 89), (148, 92), (149, 81), (149, 83), (149, 84), (149, 85), (149, 86), (149, 87), (149, 88), (149, 89), (149, 90), (149, 92), (150, 81), (150, 83), (150, 84), (150, 85), (150, 86), (150, 87), (150, 91), (151, 80), (151, 81), (151, 83), (151, 88), (151, 90), (152, 80), (152, 84), (152, 85), (152, 86), (153, 78), (153, 81), (153, 82), (154, 77), (154, 80), (155, 76), (155, 78), (156, 75), (156, 78), (157, 75), (157, 77), (157, 78), (157, 79), (158, 75), (158, 81), (158, 82), (158, 83), (158, 84), (158, 85), (158, 86), (158, 88), (159, 76), (159, 78), (159, 79), (159, 80), (159, 88), (159, 98), (160, 82), (160, 84), (160, 85), (160, 86), (160, 94),
(160, 96), (160, 99), (160, 109), (160, 111), (161, 82), (161, 84), (161, 92), (161, 94), (161, 98), (161, 100), (161, 109), (162, 81), (162, 83), (162, 85), (162, 90), (162, 98), (162, 100), (162, 111), (163, 80), (163, 81), (163, 83), (163, 84), (163, 89), (163, 92), (163, 98), (163, 101), (164, 79), (164, 82), (164, 87), (164, 89), (164, 90), (164, 91), (164, 92), (164, 93), (164, 98), (164, 100), (164, 104), (165, 79), (165, 81), (165, 83), (165, 86), (165, 88), (165, 91), (165, 93), (165, 97), (165, 99), (165, 100), (165, 101), (165, 105), (166, 79), (166, 81), (166, 82), (166, 83), (166, 84), (166, 87), (166, 89), (166, 90), (166, 91), (166, 92), (166, 93), (166, 95), (166, 98), (166, 102), (166, 103), (166, 105), (167, 80), (167, 85), (167, 86), (167, 88), (167, 91), (167, 93), (167, 99),
(167, 100), (167, 101), (167, 102), (167, 103), (167, 104), (167, 106), (168, 81), (168, 83), (168, 84), (168, 87), (168, 91), (168, 93), (168, 94), (168, 95), (168, 98), (168, 102), (168, 104), (168, 105), (168, 107), (169, 84), (169, 85), (169, 87), (169, 90), (169, 92), (169, 93), (169, 94), (169, 96), (169, 102), (169, 104), (169, 105), (169, 107), (170, 87), (170, 89), (170, 91), (170, 92), (170, 93), (170, 95), (170, 102), (170, 104), (170, 105), (170, 107), (171, 87), (171, 90), (171, 91), (171, 92), (171, 93), (171, 95), (171, 101), (171, 103), (171, 104), (171, 105), (171, 106), (171, 108), (172, 88), (172, 89), (172, 90), (172, 91), (172, 92), (172, 93), (172, 94), (172, 96), (172, 99), (172, 105), (172, 106), (172, 108), (172, 113), (173, 88), (173, 90), (173, 91), (173, 92), (173, 93), (173, 94),
(173, 95), (173, 98), (173, 101), (173, 102), (173, 103), (173, 106), (173, 107), (173, 109), (173, 112), (173, 113), (174, 89), (174, 91), (174, 92), (174, 93), (174, 94), (174, 95), (174, 96), (174, 99), (174, 105), (174, 107), (174, 108), (174, 110), (174, 114), (175, 90), (175, 92), (175, 93), (175, 94), (175, 95), (175, 96), (175, 97), (175, 99), (175, 106), (175, 108), (175, 109), (175, 112), (175, 114), (176, 91), (176, 94), (176, 95), (176, 96), (176, 97), (176, 98), (176, 99), (176, 100), (176, 102), (176, 106), (176, 108), (176, 109), (176, 110), (176, 111), (176, 112), (176, 114), (177, 92), (177, 97), (177, 98), (177, 99), (177, 103), (177, 106), (177, 108), (177, 109), (177, 112), (177, 114), (178, 94), (178, 96), (178, 105), (178, 106), (178, 107), (178, 108), (178, 114), (179, 97), (179, 99), (179, 100),
(179, 101), (179, 102), (179, 106), (179, 107), (179, 109), (179, 112), (179, 114), (180, 104), (181, 105), (181, 108), )
coordinates_27408B = ((107, 93),
(107, 95), (107, 96), (108, 91), (108, 96), (109, 90), (109, 93), (109, 94), (109, 96), (110, 89), (110, 92), (110, 93), (110, 94), (110, 96), (111, 89), (111, 91), (111, 92), (111, 93), (111, 95), (112, 88), (112, 90), (112, 91), (112, 92), (112, 93), (112, 94), (112, 95), (113, 88), (113, 90), (113, 91), (113, 92), (113, 93), (113, 94), (113, 95), (113, 97), (114, 88), (114, 90), (114, 91), (114, 92), (114, 93), (114, 94), (114, 96), (115, 88), (115, 90), (115, 91), (115, 92), (115, 93), (115, 94), (115, 96), (116, 89), (116, 91), (116, 92), (116, 93), (116, 95), (117, 89), (117, 91), (117, 92), (117, 93), (117, 95), (118, 82), (118, 83), (118, 90), (118, 92), (119, 78), (119, 79), (119, 81), (119, 84), (119, 90), (119, 92), (119, 94), (120, 76), (120, 89), (120, 92), (121, 78),
(121, 80), (121, 81), (121, 82), (121, 84), (121, 85), (121, 86), (121, 87), (121, 88), (121, 89), (121, 90), (121, 92), )
coordinates_006400 = ((93, 132),
(94, 132), (94, 133), (95, 132), (95, 133), (96, 133), (96, 134), (97, 133), (98, 134), (98, 135), (99, 129), (99, 134), (99, 136), (100, 135), (100, 137), (101, 130), (101, 136), (101, 138), (102, 130), (102, 137), (102, 139), (103, 130), (103, 137), (103, 141), (104, 137), (104, 140), (105, 136), (105, 139), (106, 134), (106, 138), (107, 133), (107, 137), (108, 129), (108, 133), (108, 136), (109, 129), (109, 133), (110, 128), (110, 133), (111, 128), (111, 130), (111, 132), (112, 127), (112, 129), (112, 131), (113, 127), (113, 130), (114, 127), (115, 127), (116, 126), )
coordinates_E1B7E7 = ((120, 158),
(120, 160), (121, 157), (121, 163), (122, 156), (122, 158), (122, 159), (122, 160), (122, 161), (122, 165), (123, 156), (123, 158), (123, 159), (123, 160), (123, 161), (123, 162), (123, 163), (123, 167), (124, 156), (124, 158), (124, 159), (124, 160), (124, 161), (124, 162), (124, 163), (124, 168), (125, 157), (125, 159), (125, 160), (125, 161), (125, 164), (125, 165), (125, 167), (126, 162), (127, 158), (127, 161), )
coordinates_CD5B45 = ((62, 116),
(62, 118), (62, 119), (62, 120), (62, 121), (62, 123), (63, 115), (63, 123), (64, 115), (64, 117), (64, 120), (64, 121), (64, 123), (65, 102), (65, 104), (65, 115), (65, 117), (65, 118), (65, 121), (65, 123), (66, 99), (66, 101), (66, 104), (66, 115), (66, 117), (66, 120), (66, 123), (67, 98), (67, 101), (67, 102), (67, 104), (67, 115), (67, 116), (67, 121), (67, 123), (68, 95), (68, 98), (68, 100), (68, 101), (68, 105), (68, 115), (68, 116), (68, 122), (68, 123), (69, 90), (69, 91), (69, 92), (69, 93), (69, 94), (69, 96), (69, 98), (69, 99), (69, 100), (69, 101), (69, 103), (69, 105), (69, 115), (69, 116), (69, 122), (69, 123), (70, 87), (70, 89), (70, 95), (70, 98), (70, 99), (70, 101), (70, 105), (70, 106), (70, 115), (70, 117), (70, 123), (71, 86), (71, 90),
(71, 91), (71, 92), (71, 95), (71, 96), (71, 97), (71, 98), (71, 99), (71, 101), (71, 106), (71, 114), (71, 115), (71, 117), (72, 85), (72, 88), (72, 89), (72, 90), (72, 91), (72, 92), (72, 93), (72, 96), (72, 97), (72, 98), (72, 99), (72, 101), (72, 114), (72, 117), (73, 87), (73, 88), (73, 89), (73, 91), (73, 95), (73, 97), (73, 98), (73, 100), (73, 114), (73, 116), (74, 84), (74, 89), (74, 90), (74, 92), (74, 96), (74, 98), (74, 100), (74, 114), (74, 115), (75, 83), (75, 85), (75, 87), (75, 88), (75, 91), (75, 92), (75, 93), (75, 97), (75, 100), (75, 113), (75, 115), (76, 82), (76, 85), (76, 93), (76, 99), (76, 108), (76, 112), (76, 114), (77, 81), (77, 83), (77, 85), (77, 93), (77, 105), (77, 108), (77, 113), (78, 80), (78, 82),
(78, 84), (78, 91), (78, 93), (78, 104), (78, 107), (78, 108), (78, 109), (78, 110), (78, 112), (79, 79), (79, 81), (79, 82), (79, 83), (79, 85), (79, 92), (79, 94), (79, 103), (79, 105), (79, 106), (79, 107), (79, 108), (79, 112), (80, 78), (80, 80), (80, 81), (80, 82), (80, 83), (80, 84), (80, 86), (80, 94), (80, 103), (80, 105), (80, 106), (80, 107), (80, 108), (80, 109), (80, 110), (80, 112), (81, 78), (81, 80), (81, 81), (81, 82), (81, 83), (81, 84), (81, 85), (81, 87), (81, 103), (81, 105), (81, 106), (81, 107), (81, 108), (81, 109), (81, 110), (81, 112), (82, 78), (82, 81), (82, 82), (82, 83), (82, 84), (82, 85), (82, 86), (82, 88), (82, 103), (82, 105), (82, 106), (82, 107), (82, 108), (82, 109), (82, 110), (82, 111), (82, 113), (83, 79),
(83, 82), (83, 83), (83, 84), (83, 85), (83, 86), (83, 87), (83, 89), (83, 104), (83, 106), (83, 107), (83, 108), (83, 109), (83, 110), (83, 111), (83, 112), (83, 114), (83, 115), (83, 116), (83, 117), (83, 119), (84, 81), (84, 83), (84, 84), (84, 85), (84, 86), (84, 87), (84, 88), (84, 90), (84, 105), (84, 107), (84, 108), (84, 109), (84, 110), (84, 111), (84, 112), (84, 113), (84, 120), (85, 82), (85, 84), (85, 85), (85, 86), (85, 87), (85, 89), (85, 106), (85, 111), (85, 112), (85, 113), (85, 114), (85, 115), (85, 116), (85, 117), (85, 119), (86, 82), (86, 84), (86, 85), (86, 86), (86, 88), (86, 107), (86, 109), (86, 110), (86, 112), (86, 113), (86, 114), (86, 115), (86, 116), (86, 117), (86, 119), (87, 82), (87, 84), (87, 85), (87, 87), (87, 111),
(87, 112), (87, 113), (87, 114), (87, 115), (87, 116), (87, 118), (88, 82), (88, 84), (88, 85), (88, 86), (88, 87), (88, 112), (88, 114), (88, 115), (88, 117), (89, 83), (89, 87), (89, 112), (89, 116), (90, 85), (90, 86), (90, 88), (90, 89), (90, 111), (90, 113), (90, 115), )
coordinates_6395ED = ((132, 126),
(133, 127), (134, 126), (134, 127), (135, 126), (135, 127), (136, 126), (136, 128), (137, 126), (137, 128), (138, 126), (138, 127), (138, 129), (139, 126), (139, 127), (139, 129), (140, 126), (140, 127), (140, 129), (141, 126), (141, 127), (141, 129), (142, 126), (142, 127), (142, 129), (143, 126), (143, 127), (143, 129), (144, 129), (145, 127), (145, 128), )
coordinates_00FFFE = ((145, 142),
(146, 136), (146, 138), (146, 139), (146, 140), (146, 142), (147, 136), (147, 143), (148, 135), (148, 140), (148, 141), (148, 143), (149, 135), (149, 137), (149, 141), (149, 143), (150, 134), (150, 136), (150, 141), (150, 144), (151, 134), (151, 140), (151, 142), (151, 144), (152, 135), (152, 140), (152, 142), (152, 143), (152, 145), (153, 133), (153, 134), (153, 139), (153, 141), (153, 142), (153, 143), (153, 144), (153, 146), (154, 139), (154, 146), (155, 139), (155, 141), (155, 142), (155, 143), (155, 144), (155, 147), (156, 147), (157, 146), (157, 148), (158, 146), (158, 148), (159, 146), (159, 149), (160, 146), (160, 149), (161, 140), (161, 142), (161, 143), (161, 144), (161, 146), (161, 147), (161, 148), (161, 150), (162, 142), (162, 143), (162, 146), (162, 147), (162, 148), (162, 150), (163, 145), (163, 148), (163, 150), (164, 146), (164, 149),
(164, 151), (165, 147), (165, 151), (166, 148), (166, 151), (167, 149), (167, 152), (168, 152), (169, 152), )
coordinates_B4E7FA = ((121, 149),
(121, 152), (122, 149), (122, 152), (123, 149), (123, 152), (124, 149), (124, 152), )
coordinates_F98072 = ((140, 124),
(141, 121), (141, 124), (142, 120), (142, 122), (142, 124), (143, 119), )
|
python
|
# =============================================================================
#
# Jade Tree Personal Budgeting Application | jadetree.io
# Copyright (c) 2020 Asymworks, LLC. All Rights Reserved.
#
# =============================================================================
from flask.views import MethodView
from flask_socketio import emit
from jadetree.api.common import JTApiBlueprint, auth
from jadetree.database import db
from jadetree.domain.types import TransactionType
from jadetree.service import payee as payee_service
from .schema import PayeeDetailSchema, PayeeSchema
#: Authentication Service Blueprint
blp = JTApiBlueprint('payee', __name__, description='Payee Service')
@blp.route('/payees')
class PayeeList(MethodView):
'''API Endpoint for `Payee` Model'''
@auth.login_required
@blp.response(PayeeSchema(many=True))
def get(self):
'''Return list of Payees'''
return auth.current_user().payees
@auth.login_required
@blp.arguments(PayeeSchema)
@blp.response(PayeeSchema)
def post(self, json_data):
'''Create new Payee'''
payee = payee_service.create_payee(
db.session,
auth.current_user(),
**json_data,
)
emit(
'create',
{
'class': 'Payee',
'items': [PayeeSchema().dump(payee)],
},
namespace='/api/v1',
room=auth.current_user().uid_hash
)
return payee
@blp.route('/payees/<int:payee_id>')
class PayeeItem(MethodView):
'''API Endpoint for `Payee` Model'''
@auth.login_required
@blp.response(PayeeDetailSchema)
def get(self, payee_id):
'''Return list of Payees'''
p = payee_service._load_payee(
db.session,
auth.current_user(),
payee_id
)
# Get the most recent transaction line for the Payee
p.last_category_id = None
p.last_account_id = None
p.last_amount = None
p.last_memo = None
p.last_type = None
tl = payee_service.get_payee_last_txn(db.session, p.id)
if tl is not None:
if tl.type != TransactionType.Transfer:
p.last_category_id = tl.category_id
p.last_account_id = None
else:
p.last_category_id = None
p.last_account_id = tl.opposing_id
p.last_amount = tl.amount
p.last_memo = tl.memo
p.last_type = tl.type
return p
|
python
|
from setuptools import setup
import codecs
from fns import __version__
with codecs.open('README.md', 'r', 'utf-8') as readme:
long_description = readme.read()
setup(
name='fns',
version=__version__,
description='Revise receipt client',
author='Sergey Popov',
author_email='[email protected]',
license='MIT License',
url='https://github.com/poserg/revise-receipt-client/',
packages=[
'fns',
],
long_description=long_description,
install_requires=[
'requests',
],
)
|
python
|
from pyconvcli import PyConvCli
import os
def main():
cli = PyConvCli('pyconvcli_internal_cli',os.path.dirname(os.path.realpath(__file__)),'pyconvcli')
cli.run()
def visualize():
cli= PyConvCli('pyconvcli_internal_cli',os.path.dirname(os.path.realpath(__file__)),'pyconvcli')
args,parsers = cli.parse_args()
cli.parsers=parsers
cli.visualize()
|
python
|
class VectorUtils:
def __init__(self):
pass
@staticmethod
def norm_2(vec):
return vec[0]**2 + vec[1]**2 + vec[2]**2
@staticmethod
def norm(vec):
return VectorUtils.norm_2(vec)**(1./2)
|
python
|
import time # so we can use "sleep" to wait between actions
import RPi.GPIO as io # import the GPIO library we just installed but call it "io"
from ISStreamer.Streamer import Streamer # import the ISStreamer
## name the bucket and individual access_key
#streamer = Streamer(bucket_name="Locker Protector", bucket_key="locker_protector", access_key="YOUR_ACCESS_KEY_HERE")
io.setmode(io.BCM) # set GPIO mode to BCM
door_pin = 23 # enter the number of whatever GPIO pin your're using
io.setup(door_pin, io.IN, pull_up_down=io.PUD_UP) # use the built-in pull-up resistor
door=0 # initialize door
## Event loop
while True:
## if the switch is open
if io.input(door_pin):
#streamer.log("Door", "Open") # stream a message saying "Open"
#streamer.flush() # send the message immediately
door = 0 # set door to its initial value
time.sleep(1) # wait 1 second before the next action
## if the switch is closed and door does not equal 1
if (io.input(door_pin) == False and door != 1):
#streamer.log("Door", "Close") # stream a message saying "Close"
#streamer.flush() # send the message immediately
door = 1 # set door so that this loop won't act again until the switch has been opened
|
python
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from django.utils.translation import ugettext_lazy as _
title = _("Reporters and Groups")
tab_link = "/reporters"
|
python
|
# persistence.py
"""
Wrapper to edit the persistent settings
"""
import os
import sys
import pathlib
import argparse
import configparser
import thinkpad_tools_assets.classes
from thinkpad_tools_assets.cmd import commandline_parser
from thinkpad_tools_assets.utils import NotSudo
try:
if os.getuid() != 0:
raise NotSudo("Script must be run as superuser/sudo")
except NotSudo:
print("ERROR: This script must be run as superuser/sudo")
sys.exit(1)
USAGE_HEAD: str = '''\
thinkpad-tools persistence <verb>
Supported verbs are:
edit Edit the persistent settings
enable Enable persistent settings
disable Disable persistent settings
apply Apply the persistent settings
'''
USAGE_EXAMPLES: str = '''\
Examples:
thinkpad-tools persistence edit
thinkpad-tools persistence disable
thinkpad-tools persistence enable
thinkpad-tools persistence apply
'''
class PersistenceHandler(object):
"""
Handler for Undervolt related commands
"""
def __init__(self):
self.parser: argparse.ArgumentParser = argparse.ArgumentParser(
prog='thinkpad-tools persistence',
description='Edit persistence settings',
usage=USAGE_HEAD,
epilog=USAGE_EXAMPLES,
formatter_class=argparse.RawDescriptionHelpFormatter
)
self.parser.add_argument('verb', type=str, help='The action going to \
take')
def run(self, unparsed_args: list):
"""
Parse and execute the command
:param unparsed_args: Unparsed arguments for this property
:return: Nothing
"""
def invalid_property(prop_name: str, exit_code: int):
"""
Print error message and exit with exit code 1
:param prop_name: Name of the property
:param exit_code: Exit code
:return: Nothing, the problem exits with the given exit code
"""
print(
'Invalid command "%s", available properties: ' % prop_name +
', '.join(self.inner.__dict__.keys()),
file=sys.stderr
)
exit(exit_code)
# Parse arguments
args: argparse.Namespace = self.parser.parse_args(unparsed_args)
verb: str = str(args.verb).lower()
# Commands
if verb == 'edit':
try:
editor: str = os.environ['EDITOR']
except KeyError:
editor: str = "/usr/bin/nano"
os.system('sudo {editor} /etc/thinkpad-tools.ini'
.format(editor=editor))
return
if verb == "enable":
os.system('systemctl daemon-reload')
os.system('systemctl enable thinkpad-tools.service')
print("""To set persistent settings, please edit the file
'/etc/thinkpad-tools.ini'""")
print("Persistence enabled")
return
if verb == "disable":
os.system('systemctl daemon-reload')
os.system('systemctl disable thinkpad-tools.service')
print("Persistence disabled")
return
if verb == "apply":
config: configparser.ConfigParser = configparser.ConfigParser()
config.read('/etc/thinkpad-tools.ini')
for section in config.sections():
for (command, val) in config.items(section):
commandline_parser([section, "set-"+command, val])
return
# No match found
print('Command "%s" not found' % verb, file=sys.stderr)
exit(1)
|
python
|
from .interval import Interval, str_to_iv
from dataclasses import dataclass
from itertools import product
from math import prod
@dataclass(frozen=True)
class Cuboid:
points: list[Interval]
value: any = None
def volume(self) -> int:
return prod((x.get_length() for x in self.points))
def dims(self) -> int:
"""Number of dimensions of the cuboid"""
return len(self.points)
def __eq__(self,other:"Cuboid") -> bool:
if not isinstance(other,Cuboid): return False
if other.dims() != self.dims(): return False
return all(x==y for (x,y) in zip(self.points,other.points))
def __and__(self,other:"Cuboid") -> "Cuboid":
"""Return intersection of two cuboids of the same dimensionality"""
if not isinstance(other,Cuboid): return None
if other.dims() != self.dims(): return None
intersection = [a&b for (a,b) in zip(self.points,other.points)]
if None in intersection: return None
return Cuboid(intersection)
def __sub__(self,other:"Cuboid") -> list["Cuboid"]:
"""Return list of cuboids that add together to (self-other)"""
if not isinstance(other,Cuboid): return []
if not self.dims() == other.dims(): return []
intersection = self & other
if not intersection: return []
# List of list of segments of intervals in respective dimentions
sections = [[x&y] + (x-y) for x,y in zip(self.points,other.points)]
result = [Cuboid(p,self.value) for p in product(*sections) if Cuboid(p)!=intersection]
return result
def str_to_cb(s:str, val:any=None) -> Cuboid:
"""Converts a string of the type a..b c..e [...] into a Cuboid"""
_s = s.strip()
l = [str_to_iv(x) for x in _s.split()]
return Cuboid(l,val)
|
python
|
import argparse
from .display import LINE_SIZE
class Args:
def __init__(self, * args, mutex: list = [], group: list = [], **kargs):
self.args = args
self.kargs = kargs
self.group = group
self.mutex = mutex
self.arguments = None
def build(self, parser=None):
if parser is None:
parser = argparse.ArgumentParser(
prog='cuticle_analysis',
epilog='~'*LINE_SIZE,
usage='cuticle_analysis [options...]'
)
if len(self.args) or len(self.kargs):
parser.add_argument(*self.args, **self.kargs)
if len(self.group):
arg_group = parser.add_argument_group()
for arg in self.group:
arg.build(arg_group)
if len(self.mutex):
mutex_group = parser.add_mutually_exclusive_group()
for arg in self.mutex:
arg.build(mutex_group)
return parser
def get_args(self):
if self.arguments is None:
self.arguments, _ = self.build().parse_known_args()
return self.arguments
_builder = Args(
group=[
Args('--download-dataset', action='store_true',
help='download and unzip dataset'),
# int of id to store
Args('--cite', type=int, default=0,)
],
)
def get_args():
return _builder.get_args()
|
python
|
from datetime import datetime as dt
import requests, re
import pandas as pd
import numpy as np
import json
import time
import os
import tushare as ts
import logging
from bs4 import BeautifulSoup
logger = logging.getLogger('main.fetch')
def fetch_index(index_name):
path = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(path, index_name+'.csv')
try:
if (index_name == 'csi300'):
data = pd.read_csv(filename)
data.columns = ['symbol', 'company']
# data = data.drop(['lastsale', 'netchange', 'netchange', 'share_volume', 'Nasdaq100_points', 'Unnamed: 7'], axis=1)
data.index.name = 'symbol'
return data
except Exception as e:
logger.error('Failed to fetch index! {%s}' % e)
# raise fetchError('Fetching failed')
def get_daily_adjusted(config,ticker, type, today_only, index_name):
key = config.TS_KEY
pro = ts.pro_api(key)
for _ in range(3):
time.sleep(30)
try:
df = pro.query('daily', ts_code=ticker)
df.drop(["pre_close","change","pct_chg","amount"], axis=1, inplace=True)
df.rename(columns={"ts_code": "symbol", "trade_date": "date", "vol": "volume"}, inplace=True)
df['date'] = pd.to_datetime(df['date'])
df['date'] = df['date'].apply(lambda x: x.strftime('%Y-%m-%d'))
if today_only:
return df.loc[df.index.min()].to_frame().T # the latest quote
return df
except:
# logger.error('Failed to fetch %s' % ticker)
raise fetchError('Fetching failed')
class fetchError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def get_yahoo_finance_price(ticker, today_only):
if 'SH' in ticker:
url = 'https://finance.yahoo.com/quote/'+ticker.replace('SH','SS')+'/history?p='+ticker.replace('SH','SS')
else:
url = 'https://finance.yahoo.com/quote/'+ticker+'/history?p='+ticker
try:
html = requests.get(url, headers=_headers()).text
except:
time.sleep(2)
html = requests.get(url, headers=_headers()).text
try:
soup = BeautifulSoup(html,'html.parser')
soup_script = soup.find("script",text=re.compile("root.App.main")).text
matched = re.search("root.App.main\s+=\s+(\{.*\})",soup_script)
if matched:
json_script = json.loads(matched.group(1))
if today_only:
data = json_script['context']['dispatcher']['stores']['HistoricalPriceStore']['prices'][0]
df = pd.DataFrame({'date': dt.fromtimestamp(data['date']).strftime("%Y-%m-%d"),
'close': round(data['close'], 2),
'volume': int(str(data['volume'])[:-2]),
'open': round(data['open'], 2),
'high': round(data['high'], 2),
'low': round(data['low'], 2),
}, index=[0])
return df
else:
data = json_script['context']['dispatcher']['stores']['HistoricalPriceStore']['prices']
df = pd.DataFrame(data, columns=['date', 'close', 'volume', 'open', 'high', 'low'])
df['date'] = df['date'].apply(lambda x: dt.fromtimestamp(x).strftime("%Y-%m-%d")).dropna()
df['volume'] = df['volume'] // 100
print(df)
return df
except Exception as e:
raise fetchError('Fetching failed')
######################################## YAHOO Fetching #########
def get_yahoo_bvps(ticker):
url = 'https://finance.yahoo.com/quote/{0}/key-statistics?p={0}'.format(ticker)
try:
html = requests.get(url, headers=_headers()).text
except:
time.sleep(30)
html = requests.get(url, headers=_headers()).text
try:
soup = BeautifulSoup(html,'html.parser')
soup_script = soup.find("script",text=re.compile("root.App.main")).text
matched = re.search("root.App.main\s+=\s+(\{.*\})",soup_script)
if matched:
json_script = json.loads(matched.group(1))
cp = json_script['context']['dispatcher']['stores']['QuoteSummaryStore']['defaultKeyStatistics']['bookValue']['fmt']
return float(cp)
else:
return None
except:
return None
def get_yahoo_cr(ticker):
url = 'https://finance.yahoo.com/quote/{0}/key-statistics?p={0}'.format(ticker)
try:
html = requests.get(url, headers=_headers()).text
except:
time.sleep(30)
html = requests.get(url, headers=_headers()).text
try:
soup = BeautifulSoup(html,'html.parser')
soup_script = soup.find("script",text=re.compile("root.App.main")).text
matched = re.search("root.App.main\s+=\s+(\{.*\})",soup_script)
if matched:
json_script = json.loads(matched.group(1))
cr = json_script['context']['dispatcher']['stores']['QuoteSummaryStore']['financialData']['currentRatio']['fmt']
return float(cr)
else:
return None
except:
pass
def _headers():
return {"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-GB,en;q=0.9,en-US;q=0.8,ml;q=0.7",
"cache-control": "max-age=0",
"dnt": "1",
"sec-fetch-dest": "document",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "none",
"sec-fetch-user": "?1",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36"}
|
python
|
import os.path
from nbt.nbt import NBTFile
from PIL import Image
from config import *
def main():
# Read map files to sort by scale
print("Reading map files...")
mapfiles = []
for i in range(0,5):
mapfiles.append([])
i = 0
while os.path.isfile(inputDir + "map_" + str(i) + ".dat"):
# read file
nbt = NBTFile(inputDir + "map_" + str(i) + ".dat")
data = nbt["data"]
if int(data["dimension"].value) == 0:
scale = data["scale"].value
print(" Map " + str(i))
mapfiles[scale].append(i)
nbt = None
i = i + 1
# Process map files in order of scaling, and build image
print("Processing map files into image...")
imgwidth = xmax - xmin
imgheight = zmax - zmin
img = Image.new('RGBA', (imgwidth, imgheight))
colormap = getcolormap()
for s in reversed(range(0,5)):
for i in mapfiles[s]:
# get map info
nbt = NBTFile(inputDir + "map_" + str(i) + ".dat")
data = nbt["data"]
scale = 2 ** int(data["scale"].value)
cx = data["xCenter"].value - (64 * scale)
cz = data["zCenter"].value - (64 * scale)
print(" Map " + str(i) + ": Scale = " + str(scale) + ":1, Corner=" + str(cx) + "," + str(cz))
# get colors
for mx in range(0,128):
for mz in range(0,128):
# color at map pixel
colorindex = data["colors"][mx + mz * 128]
if colorindex < 4: # unexplored
continue
color = colormap[colorindex]
# iterate over actual world blocks
for px in range(0,scale):
for pz in range(0,scale):
coords = (cx + (mx * scale) + px, cz + (mz * scale) + pz)
imgx = coords[0] - xmin
imgy = coords[1] - zmin
if imgx >= 0 and imgx < imgwidth and imgy >= 0 and imgy < imgheight:
img.putpixel((imgx,imgy), color)
# clean up
nbt = None
data = None
print("Saving image...")
img.save(outputFile)
print("Done!")
def getcolormap():
colormap = {}
colormap[0] = (0, 0, 0)
colormap[1] = (0, 0, 0)
colormap[2] = (0, 0, 0)
colormap[3] = (0, 0, 0)
colormap[4] = (89, 125, 39)
colormap[5] = (109, 153, 48)
colormap[6] = (127, 178, 56)
colormap[7] = (67, 94, 29)
colormap[8] = (174, 164, 115)
colormap[9] = (213, 201, 140)
colormap[10] = (247, 233, 163)
colormap[11] = (130, 123, 86)
colormap[12] = (140, 140, 140)
colormap[13] = (171, 171, 171)
colormap[14] = (199, 199, 199)
colormap[15] = (105, 105, 105)
colormap[16] = (180, 0, 0)
colormap[17] = (220, 0, 0)
colormap[18] = (255, 0, 0)
colormap[19] = (135, 0, 0)
colormap[20] = (112, 112, 180)
colormap[21] = (138, 138, 220)
colormap[22] = (160, 160, 255)
colormap[23] = (84, 84, 135)
colormap[24] = (117, 117, 117)
colormap[25] = (144, 144, 144)
colormap[26] = (167, 167, 167)
colormap[27] = (88, 88, 88)
colormap[28] = (0, 87, 0)
colormap[29] = (0, 106, 0)
colormap[30] = (0, 124, 0)
colormap[31] = (0, 65, 0)
colormap[32] = (180, 180, 180)
colormap[33] = (220, 220, 220)
colormap[34] = (255, 255, 255)
colormap[35] = (135, 135, 135)
colormap[36] = (115, 118, 129)
colormap[37] = (141, 144, 158)
colormap[38] = (164, 168, 184)
colormap[39] = (86, 88, 97)
colormap[40] = (106, 76, 54)
colormap[41] = (130, 94, 66)
colormap[42] = (151, 109, 77)
colormap[43] = (79, 57, 40)
colormap[44] = (79, 79, 79)
colormap[45] = (96, 96, 96)
colormap[46] = (112, 112, 112)
colormap[47] = (59, 59, 59)
colormap[48] = (45, 45, 180)
colormap[49] = (55, 55, 220)
colormap[50] = (64, 64, 255)
colormap[51] = (33, 33, 135)
colormap[52] = (100, 84, 50)
colormap[53] = (123, 102, 62)
colormap[54] = (143, 119, 72)
colormap[55] = (75, 63, 38)
colormap[56] = (180, 177, 172)
colormap[57] = (220, 217, 211)
colormap[58] = (255, 252, 245)
colormap[59] = (135, 133, 129)
colormap[60] = (152, 89, 36)
colormap[61] = (186, 109, 44)
colormap[62] = (216, 127, 51)
colormap[63] = (114, 67, 27)
colormap[64] = (125, 53, 152)
colormap[65] = (153, 65, 186)
colormap[66] = (178, 76, 216)
colormap[67] = (94, 40, 114)
colormap[68] = (72, 108, 152)
colormap[69] = (88, 132, 186)
colormap[70] = (102, 153, 216)
colormap[71] = (54, 81, 114)
colormap[72] = (161, 161, 36)
colormap[73] = (197, 197, 44)
colormap[74] = (229, 229, 51)
colormap[75] = (121, 121, 27)
colormap[76] = (89, 144, 17)
colormap[77] = (109, 176, 21)
colormap[78] = (127, 204, 25)
colormap[79] = (67, 108, 13)
colormap[80] = (170, 89, 116)
colormap[81] = (208, 109, 142)
colormap[82] = (242, 127, 165)
colormap[83] = (128, 67, 87)
colormap[84] = (53, 53, 53)
colormap[85] = (65, 65, 65)
colormap[86] = (76, 76, 76)
colormap[87] = (40, 40, 40)
colormap[88] = (108, 108, 108)
colormap[89] = (132, 132, 132)
colormap[90] = (153, 153, 153)
colormap[91] = (81, 81, 81)
colormap[92] = (53, 89, 108)
colormap[93] = (65, 109, 132)
colormap[94] = (76, 127, 153)
colormap[95] = (40, 67, 81)
colormap[96] = (89, 44, 125)
colormap[97] = (109, 54, 153)
colormap[98] = (127, 63, 178)
colormap[99] = (67, 33, 94)
colormap[100] = (36, 53, 125)
colormap[101] = (44, 65, 153)
colormap[102] = (51, 76, 178)
colormap[103] = (27, 40, 94)
colormap[104] = (72, 53, 36)
colormap[105] = (88, 65, 44)
colormap[106] = (102, 76, 51)
colormap[107] = (54, 40, 27)
colormap[108] = (72, 89, 36)
colormap[109] = (88, 109, 44)
colormap[110] = (102, 127, 51)
colormap[111] = (54, 67, 27)
colormap[112] = (108, 36, 36)
colormap[113] = (132, 44, 44)
colormap[114] = (153, 51, 51)
colormap[115] = (81, 27, 27)
colormap[116] = (17, 17, 17)
colormap[117] = (21, 21, 21)
colormap[118] = (25, 25, 25)
colormap[119] = (13, 13, 13)
colormap[120] = (176, 168, 54)
colormap[121] = (215, 205, 66)
colormap[122] = (250, 238, 77)
colormap[123] = (132, 126, 40)
colormap[124] = (64, 154, 150)
colormap[125] = (79, 188, 183)
colormap[126] = (92, 219, 213)
colormap[127] = (48, 115, 112)
colormap[128] = (52, 90, 180)
colormap[129] = (63, 110, 220)
colormap[130] = (74, 128, 255)
colormap[131] = (39, 67, 135)
colormap[132] = (0, 153, 40)
colormap[133] = (0, 187, 50)
colormap[134] = (0, 217, 58)
colormap[135] = (0, 114, 30)
colormap[136] = (91, 60, 34)
colormap[137] = (111, 74, 42)
colormap[138] = (129, 86, 49)
colormap[139] = (68, 45, 25)
colormap[140] = (79, 1, 0)
colormap[141] = (96, 1, 0)
colormap[142] = (112, 2, 0)
colormap[143] = (59, 1, 0)
colormap[144] = (147, 124, 113)
colormap[145] = (180, 152, 138)
colormap[146] = (209, 177, 161)
colormap[147] = (110, 93, 85)
colormap[148] = (112, 57, 25)
colormap[149] = (137, 70, 31)
colormap[150] = (159, 82, 36)
colormap[151] = (84, 43, 19)
colormap[152] = (105, 61, 76)
colormap[153] = (128, 75, 93)
colormap[154] = (149, 87, 108)
colormap[155] = (78, 46, 57)
colormap[156] = (79, 76, 97)
colormap[157] = (96, 93, 119)
colormap[158] = (112, 108, 138)
colormap[159] = (59, 57, 73)
colormap[160] = (131, 93, 25)
colormap[161] = (160, 114, 31)
colormap[162] = (186, 133, 36)
colormap[163] = (98, 70, 19)
colormap[164] = (72, 82, 37)
colormap[165] = (88, 100, 45)
colormap[166] = (103, 117, 53)
colormap[167] = (54, 61, 28)
colormap[168] = (112, 54, 55)
colormap[169] = (138, 66, 67)
colormap[170] = (160, 77, 78)
colormap[171] = (84, 40, 41)
colormap[172] = (40, 28, 24)
colormap[173] = (49, 35, 30)
colormap[174] = (57, 41, 35)
colormap[175] = (30, 21, 18)
colormap[176] = (95, 75, 69)
colormap[177] = (116, 92, 84)
colormap[178] = (135, 107, 98)
colormap[179] = (71, 56, 51)
colormap[180] = (61, 64, 64)
colormap[181] = (75, 79, 79)
colormap[182] = (87, 92, 92)
colormap[183] = (46, 48, 48)
colormap[184] = (86, 51, 62)
colormap[185] = (105, 62, 75)
colormap[186] = (122, 73, 88)
colormap[187] = (64, 38, 46)
colormap[188] = (53, 43, 64)
colormap[189] = (65, 53, 79)
colormap[190] = (76, 62, 92)
colormap[191] = (40, 32, 48)
colormap[192] = (53, 35, 24)
colormap[193] = (65, 43, 30)
colormap[194] = (76, 50, 35)
colormap[195] = (40, 26, 18)
colormap[196] = (53, 57, 29)
colormap[197] = (65, 70, 36)
colormap[198] = (76, 82, 42)
colormap[199] = (40, 43, 22)
colormap[200] = (100, 42, 32)
colormap[201] = (122, 51, 39)
colormap[202] = (142, 60, 46)
colormap[203] = (75, 31, 24)
colormap[204] = (26, 15, 11)
colormap[205] = (31, 18, 13)
colormap[206] = (37, 22, 16)
colormap[207] = (19, 11, 8)
return colormap
main()
|
python
|
from queue import Queue
import threading
class Synchonizer:
def __init__(self):
return
|
python
|
nome = str(input('Digite o seu nome completo: ')).strip().title()
fatiar = nome.split()
fatiar2 = nome.split()
print(f'Primeiro nome: {fatiar[0]};')
print(f'Último nome: {fatiar2[-1]}.')
|
python
|
#!/usr/bin/env python3
import os
import numpy as np
from shutil import copyfile
import glob
import argparse
import h5py
import datetime
from dateutil.parser import isoparse
import time
import decimal
import shutil
from scipy.interpolate import interp2d
import pybind_isce3 as isce3
from shapely import wkt
from alos_to_nisar_l0b import get_alos_orbit, set_h5_orbit, getset_attitude
from isce3.stripmap.readers.l1.ALOS2.CEOS import ImageFile
from pybind_isce3.product import RadarGridParameters
from pybind_isce3.core import DateTime
from pybind_isce3.geometry import get_geo_perimeter_wkt
from pybind_isce3.geometry import DEMInterpolator
from H5pyGroupWrapper import H5pyGroupWrapper
'''
References:
https://en.wikipedia.org/wiki/Earth_radius#Directional
https://books.google.com/books?id=pFO6VB_czRYC&pg=PA98#v=onepage&q&f=false
https://www.eorc.jaxa.jp/ALOS-2/en/doc/fdata/PALSAR-2_xx_Format_CEOS_E_f.pdf
'''
FLAG_REFORMAT_DOPPLER_ISCE2 = True
SPEED_OF_LIGHT = 299792458.0
ALL_POLARIZATIONS_SET = set(['HH', 'HV', 'VV', 'VH', 'RH', 'RV'])
CALIBRATION_FIELD_LIST = ['elevationAntennaPattern', 'nes0']
def parse_args():
'''
Command line parser.
'''
parser = argparse.ArgumentParser(description="Package ALOS-2 L1 stripmap data into NISAR L1 HDF5")
parser.add_argument('-i', '--indir', dest='indir', type=str,
help="Folder containing one ALOS-2 L1 module",
required=True)
parser.add_argument('-l', '--pols',
dest='polarization_list', type=str,
help="List of polarizations to process",
nargs='*')
parser.add_argument('--first-line',
dest='first_line', type=int,
help="First azimuth line to unpack")
parser.add_argument('--last-line',
dest='last_line', type=int,
help="Last azimuth line to unpack")
parser.add_argument('-o', '--outh5', dest='outh5', type=str,
help="Name of output file. If not provided, will be determined from ALOS-2 granule")
parser_template = parser.add_mutually_exclusive_group()
parser_template.add_argument('-t'
'--template',
dest='template_file',
default=None,
help='Set template RSLC file')
parser_template = parser.add_mutually_exclusive_group()
parser_template.add_argument('--use-template',
dest='flag_use_template',
action='store_true',
help='Use template L1 RSLC file',
default=None)
parser_template.add_argument('--no-template',
'--do-not-use-template',
dest='flag_use_template',
action='store_false',
help='Prevent using template L1 RSLC file')
parser_verbose = parser.add_mutually_exclusive_group()
parser_verbose.add_argument('-q',
'--quiet',
dest='verbose',
action='store_false',
help='Activate quiet (non-verbose) mode',
default=True)
parser_verbose.add_argument('-v',
'--verbose',
dest='verbose',
action='store_true',
help='Activate verbose mode',
default=True)
args = parser.parse_args()
if not os.path.isdir(args.indir):
raise ValueError('{0} does not appear to be a directory'.format(args.indir))
if args.outh5 is None:
print('HDF5 output granule name will be determined on fly and created in cwd')
return args
def process(args=None):
'''
Main processing workflow.
'''
start_time = time.time()
if not args.polarization_list:
args.polarization_list = ['HH', 'HV', 'VV', 'VH']
# Discover file names
filenames = get_alos_filenames(args.indir, args)
# Parse the leader file
leader = parse_leader_file(filenames, args)
# Set up output file name
if args.outh5 is None:
args.outh5 = filenames['defaulth5']
if os.path.exists(args.outh5):
raise ValueError(f'Output HDF5 file {args.outh5} already exists. Exiting ...')
if args.template_file:
copyfile(args.template_file, args.outh5)
elif args.flag_use_template is not False:
script_dir = os.path.dirname(__file__)
template_file = os.path.join(script_dir, '..', 'templates',
'L1_SingleLookComplex.h5')
copyfile(template_file, args.outh5)
# Setup HDF5 skeleton
orbit = construct_nisar_hdf5(args.outh5, leader)
# Iterate over polarizations for imagery layers
metadata = {}
pol_list = []
for count, pol in enumerate(args.polarization_list):
pol_upper = pol.upper()
if pol_upper not in filenames:
continue
add_imagery(args, leader, filenames[pol_upper], pol_upper, orbit, metadata,
flag_first_image=count==0)
pol_list.append(pol_upper)
populate_hdf5(metadata, args.outh5, orbit, pol_list)
print('saved file:', args.outh5)
elapsed_time = time.time() - start_time
hms_str = str(datetime.timedelta(seconds = int(elapsed_time)))
print(f'elapsed time: {hms_str}s ({elapsed_time:.3f}s)')
def get_alos_filenames(indir, args):
'''
Parse the contents of a given directory to separate out leader and image files.
'''
filenames = {}
# First look for the leader file
flist = glob.glob(os.path.join(indir, 'LED-ALOS2*1.1__*'))
if len(flist) == 0:
raise ValueError('No leader files found in folder {0}'.format(indir))
elif len(flist) > 1:
raise ValueError('Multiple leader files in folder {0}'.format(indir))
filenames['leaderfile'] = flist[0]
pattern = os.path.basename(flist[0])[4:]
# Look for polarizations
if args.verbose:
print('looking for available polarizations...')
for pol in args.polarization_list:
flist = glob.glob(os.path.join(indir, 'IMG-{0}-{1}'.format(pol, pattern)))
if len(flist) == 1:
if args.verbose:
print(' found polarization: {0}'.format(pol))
filenames[pol] = flist[0]
#If no image files were found
if len(filenames) == 1:
raise ValueError('No image files were found in folder: {0}'.format(indir))
filenames['defaulth5'] = '{0}.h5'.format(pattern)
return filenames
def parse_leader_file(filenames, args):
'''
Parse leader file and check values against polarizations.
'''
from isce3.stripmap.readers.l1.ALOS2.CEOS import LeaderFile
try:
ldr = LeaderFile.LeaderFile(filenames['leaderfile'])
except AssertionError as msg:
print(msg)
raise AssertionError('Error parsing ALOS raw leader file: {0}'.format(filenames['leaderfile']))
# Checks to ensure that the number of polarizations is consistent
numpol = len(filenames) - 2 # Subtract leader and defaulth5 name
if not args.polarization_list and numpol != ldr.summary.NumberOfSARChannels:
print(f'WARNING Number of image files ({numpol}) discovered'
f' is inconsistent with Leader File ({ldr.summary.NumberOfSARChannels})')
return ldr
def construct_nisar_hdf5(outh5, ldr):
'''
Build skeleton of HDF5 file using leader file information.
'''
# Open file for writing
fid = h5py.File(outh5, 'a')
root_group = H5pyGroupWrapper(fid)
lsar_group = root_group.create_group('/science/LSAR')
# Fill up Identification
ident_group = lsar_group.create_group('identification')
ident_group.create_dataset('diagnosticModeFlag', data=np.string_("False"))
ident_group.create_dataset('isGeocoded', data=np.string_("False"))
ident_group.create_dataset('listOfFrequencies', data=np.string_(["A"]))
ident_group.create_dataset('missionId', data=np.string_("ALOS-2"))
ident_group.create_dataset('orbitPassDirection', data=np.string_(ldr.summary.TimeDirectionIndicatorAlongLine))
ident_group.create_dataset('processingType', data=np.string_("repackaging"))
ident_group.create_dataset('productType', data=np.string_("RSLC"))
ident_group.create_dataset('productVersion', data=np.string_("0.1"))
ident_group.create_dataset('absoluteOrbitNumber', data=np.uint32(0))
ident_group.create_dataset('trackNumber', data=np.uint8(0))
ident_group.create_dataset('frameNumber', data=np.uint16(0))
# Start populating metadata parts
rslc = lsar_group.create_group('SLC')
rslc.create_group('metadata/processingInformation/inputs')
# Start populating metadata
orbit_group = rslc.create_group('metadata/orbit', overwrite = True)
attitude_group = rslc.create_group('metadata/attitude')
orbit = get_alos_orbit(ldr)
set_h5_orbit(orbit_group, orbit)
getset_attitude(attitude_group, ldr, orbit)
del root_group['//science/LSAR/SLC/swaths/frequencyB']
del root_group['//science/LSAR/SLC/metadata/calibrationInformation/'
'frequencyB']
return orbit
def add_imagery(args, ldr, imgfile, pol, orbit, metadata,
flag_first_image):
'''
Populate swaths segment of HDF5 file.
'''
verbose = args.verbose
fid = h5py.File(args.outh5, 'r+')
assert(len(pol) == 2)
root_group = H5pyGroupWrapper(fid)
# parse imagefile descriptor and first record.
image = ImageFile.ImageFile(imgfile)
firstrec = image.readNextLine()
# set range-grid parameters
fsamp = ldr.summary.SamplingRateInMHz * 1.0e6
r0 = firstrec.SlantRangeToFirstSampleInm
dr = SPEED_OF_LIGHT / (2 * fsamp)
da = ldr.summary.LineSpacingInm
bytesperpixel = (image.description.NumberOfBytesPerDataGroup //
image.description.NumberOfSamplesPerDataGroup)
width = (image.description.NumberOfBytesOfSARDataPerRecord // bytesperpixel) // image.description.NumberOfSamplesPerDataGroup
length = image.description.NumberOfSARDataRecords
freq_str = '/science/LSAR/SLC/swaths/frequencyA'
calibration_factor_db = ldr.calibration.header.CalibrationFactor - 32
calibration_factor = np.sqrt(10.0**(calibration_factor_db/10))
if verbose:
print('absolute radiometric correction (DN to sigma-naught)')
print(' calibration factor [dB]:', calibration_factor_db)
print(' calibration factor [linear]:', calibration_factor)
# If this is first pol being written, add common information as well
if flag_first_image:
freq_group = root_group.create_group(freq_str, overwrite = True)
wavelength = ldr.summary.RadarWavelengthInm
freq_group.create_dataset('centerFrequency', data=SPEED_OF_LIGHT / wavelength)
bandwidth = ldr.summary.TotalProcessorBandwidthInRange * 1.0e3
freq_group.create_dataset('rangeBandwidth', data=bandwidth)
freq_group.create_dataset('chirpDuration', data=firstrec.ChirpLengthInns * 1.0e-9)
freq_group.create_dataset('chirpSlope', data=-((freq_group['rangeBandwidth'][()])/(freq_group['chirpDuration'][()])))
prf = firstrec.PRFInmHz * 1.0e-3
freq_group.create_dataset('nominalAcquisitionPRF', data=prf)
assert(ldr.summary.SensorIDAndMode[7] == 'R' or
ldr.summary.SensorIDAndMode[7] == 'L')
metadata['Center Wavelength'] = wavelength
metadata['Bandwidth'] = bandwidth
metadata['Average Pulse Repetition Interval'] = 1.0 / prf
metadata['Azimuth Spacing per Bin'] = da
metadata['Effective Velocity'] = da * prf
if ldr.summary.SensorIDAndMode[7] == 'L' and False:
lookside = 'left'
else:
lookside = 'right'
metadata['Look Direction'] = lookside.upper()
if verbose:
print('parameters from metadata:')
print(' bandwidth:', bandwidth)
print(' prf: ', prf)
print(' azimuth spacing: ', da)
print(' effective velocity: ', da * prf)
print(' look direction:', lookside)
freq_group.create_dataset('slantRangeSpacing', data=dr)
freq_group.create_dataset('slantRange', data=r0 + np.arange(width) * dr)
if not FLAG_REFORMAT_DOPPLER_ISCE2:
doppler_coeffs = [ldr.summary.CrossTrackDopplerConstantTermInHz,
ldr.summary.CrossTrackDopplerLinearTermInHzPerPixel,
ldr.summary.CrossTrackDopplerLinearTermInHzPerPixel2]
metadata['Doppler coeffs km'] = doppler_coeffs
if verbose:
print(' Doppler coeffs: [km]', ', '.join(map(str, doppler_coeffs)))
else:
doppler_coeffs = [ldr.summary.DopplerCenterFrequencyConstantTerm,
ldr.summary.DopplerCenterFrequencyLinearTerm]
rng = r0 + np.arange(0, width, 100) * dr
doppler = doppler_coeffs[0] + doppler_coeffs[1] * rng / 1000
dfit = np.polyfit(np.arange(0, width, 100), doppler, 1)
doppler_coeffs_rbin = [dfit[1], dfit[0], 0., 0.]
metadata['Doppler coeffs rbin'] = doppler_coeffs_rbin
if verbose:
print(' Doppler coeffs [rbin/index]:', ', '.join(map(str, doppler_coeffs)))
azfmrate_coeff = [ldr.summary.CrossTrackDopplerRateConstantTermInHzPerSec,
ldr.summary.CrossTrackDopplerRateLinearTermInHzPerSecPerPixel,
ldr.summary.CrossTrackDopplerRateQuadraticTermInHzPerSecPerPixel2]
metadata['Azimuth FM rate'] = azfmrate_coeff
if verbose:
print(' azimuth FM rate coeffs:', azfmrate_coeff)
sensing_start = (datetime.datetime(firstrec.SensorAcquisitionYear, 1, 1) +
datetime.timedelta(days=int(firstrec.SensorAcquisitionDayOfYear-1),
seconds=firstrec.SensorAcquisitionusecsOfDay*1e-6))
freq_group.create_dataset('numberOfSubSwaths', data=1)
freq_group.create_dataset('validSamplesSubSwath1', dtype='i8', shape=(length, 2))
metadata['Mission'] = 'ALOS'
metadata['Image Starting Range'] = r0
metadata['Range Spacing per Bin'] = dr
metadata['SLC width'] = width
metadata['SLC length'] = length
BAD_VALUE = -2**15
# Create imagery layer
compress = dict(chunks=(4, 512), compression="gzip",
compression_opts=9, shuffle=True)
cpxtype = np.dtype([('r', np.float32), ('i', np.float32)])
polimg = root_group.create_dataset(os.path.join(
freq_str, pol), dtype=cpxtype, shape=(length, width), **compress)
# Start populating the imagery
rec = firstrec
if args.first_line is not None:
first_line = args.first_line
else:
first_line = 1
if args.last_line is not None:
last_line = min([args.last_line, length+1])
else:
last_line = length+1
print(f'processing polarization {pol} ({length}L x {width}P):')
for linnum in range(first_line, last_line):
if (linnum % 1000 == 0):
print(' line number: {0} out of {1}'.format(linnum, length))
# Adjust range line
rshift = int(np.rint((rec.SlantRangeToFirstSampleInm - r0) / dr))
write_arr = np.full((2 * width), BAD_VALUE, dtype=np.float32)
inarr = rec.SARRawSignalData[0,:] # .astype(np.int32)
if rshift >= 0:
write_arr[2*rshift:] = inarr[:2 * (width - rshift)]
else:
write_arr[:2*rshift] = inarr[-2 * rshift:]
# Apply absolute radiometric correction
write_arr *= calibration_factor
# Complex float 16 writes work with write_direct only
polimg.write_direct(write_arr.view(cpxtype), dest_sel=np.s_[linnum-1])
# Read next record
if linnum != length:
rec = image.readNextLine()
if flag_first_image:
sensing_end = sensing_start + datetime.timedelta(seconds=length / prf)
# Get azimuth time bounds of the scene
metadata['Start Time of Acquisition'] = sensing_start
metadata['Stop Time of Acquisition'] = sensing_end
sensing_mid = sensing_start + (sensing_end - sensing_start) / 2
ref_epoch = orbit.reference_epoch.isoformat()
if verbose:
print('time parameters:')
print(' start: ', sensing_start)
print(' mid: ', sensing_mid)
print(' end: ', sensing_end)
print(' reference epoch: ', ref_epoch)
metadata['Scene Center Incidence Angle'] = \
ldr.summary.SceneCenterIncidenceAngle
ref_epoch = DateTime(ref_epoch)
timedelta_start = (DateTime(sensing_start) -
ref_epoch).total_seconds()
radar_grid = RadarGridParameters(timedelta_start,
wavelength,
prf,
r0,
dr,
lookside,
length,
width,
ref_epoch)
metadata['Radar Grid'] = radar_grid
def populate_hdf5(metadata, outfile, orbit, pol_list, frequency='A',
az_pad=10.0):
"""
Generate a Level-1 NISAR format HDF5 product.
"""
# Generate a common azimuth time vs. slant range grid for all calibration grids
# This also sets the reference epoch used for all subsequent dataset generation
construct_calibration_grid(metadata, metadata['Mission'], orbit,
az_pad=az_pad)
# Now open it for modification
with h5py.File(outfile, 'r+') as fid:
root_group = H5pyGroupWrapper(fid)
# Remove calibration fields of polarizations that were not processed
pol_set = set(pol_list)
not_processed_pol = ALL_POLARIZATIONS_SET - pol_set
for pol in not_processed_pol:
for field in CALIBRATION_FIELD_LIST:
key = ('//science/LSAR/SLC/metadata/calibrationInformation/'
f'frequencyA/{pol}/{field}')
del root_group[key]
# Set global CF conventions attribute
if frequency == 'A':
root_group.attrs['Conventions'] = np.string_('CF-1.7')
# Update the Dopplers
update_doppler(root_group, metadata, frequency)
# Update the radar metadata
update_metadata(root_group, metadata, pol_list, frequency=frequency)
# Update identification
update_identification(root_group, orbit, metadata)
def update_metadata(fid, metadata, pol_list, frequency='A'):
"""
Update radar metadata. This function mainly interfaces with the science/LSAR/SLC/swaths
group to set the right scalar parameters.
"""
# Open the correct frequency swath group
group = fid['science/LSAR/SLC/swaths/frequency' + frequency]
# Update polarization list
group['listOfPolarizations'] = np.array(pol_list, dtype='S2')
group['listOfPolarizations'].attrs['description'] = np.string_(
'List of processed polarization layers with frequency ' + frequency)
# Create new slant range array for all pixels
del group['slantRange']
R = (metadata['Image Starting Range'] + metadata['Range Spacing per Bin'] *
np.arange(metadata['SLC width']))
group['slantRange'] = R
group['slantRange'].attrs['description'] = np.string_(
'CF compliant dimension associated with slant range'
)
group['slantRange'].attrs['units'] = np.string_('meters')
group['slantRangeSpacing'][...] = metadata['Range Spacing per Bin']
inc = np.radians(metadata['Scene Center Incidence Angle'])
group['sceneCenterGroundRangeSpacing'] = metadata['Range Spacing per Bin'] / np.sin(inc)
# Bandwidth data
group['acquiredRangeBandwidth'] = metadata['Bandwidth']
group['processedRangeBandwidth'] = metadata['Bandwidth']
# Center frequency
group['acquiredCenterFrequency'] = SPEED_OF_LIGHT / metadata['Center Wavelength']
group['processedCenterFrequency'] = SPEED_OF_LIGHT / metadata['Center Wavelength']
# Nominal PRF
group['nominalAcquisitionPRF'][...] = 1.0 / metadata['Average Pulse Repetition Interval']
# Azimuth pixel spacing
group['sceneCenterAlongTrackSpacing'] = metadata['Azimuth Spacing per Bin']
# Azimuth bandwidth
if 'Azimuth Spacing per Bin' in metadata.keys():
azres = metadata['Azimuth Spacing per Bin']
group['processedAzimuthBandwidth'] = metadata['Effective Velocity'] / (2.0 * azres)
elif 'Antenna Length' in metadata.keys():
azres = 0.6 * metadata['Antenna Length']
group['processedAzimuthBandwidth'] = metadata['Effective Velocity'] / (2.0 * azres)
# Create array of azimuth times
if frequency == 'A':
group = fid['science/LSAR/SLC/swaths']
pri = metadata['Average Pulse Repetition Interval']
ref_epoch = metadata['ref_epoch']
t0 = (metadata['Start Time of Acquisition'] - ref_epoch).total_seconds()
t = t0 + pri * np.arange(metadata['SLC length'])
if 'zeroDopplerTime' in group:
desc = group['zeroDopplerTime'].attrs['description']
del group['zeroDopplerTime']
else:
desc = ''
group['zeroDopplerTime'] = t
group['zeroDopplerTime'].attrs['description'] = desc
group['zeroDopplerTime'].attrs['units'] = np.string_(metadata['ref_epoch_attr'])
group['zeroDopplerTimeSpacing'] = pri
def construct_calibration_grid(metadata, sensor_name, orbit, az_pad=10.0):
"""
Construct a low-resolution azimuth time vs. slant range grid to be used for all
calibration and geolocation grids. Spacing hard-coded for different sensors. This
function needs to be generalized to adapt to various topography heights and platform
altitudes.
"""
# Set calibration grid spacing
rspacing = 25.0
aspacing = 0.25
# Set slant-range bounds
R0 = 845000.0
R1 = 895000.0
# Get azimuth time bounds of the scene
a0 = metadata['Start Time of Acquisition']
a1 = metadata['Stop Time of Acquisition']
# Convert azimuth dates to seconds
ref_epoch_iso_format = orbit.reference_epoch.isoformat()
ref_epoch = isoparse(ref_epoch_iso_format)
metadata['ref_epoch'] = ref_epoch
metadata['Start Seconds of Acquisition'] = (a0 - ref_epoch).total_seconds()
metadata['Stop Seconds of Acquisition'] = (a1 - ref_epoch).total_seconds()
metadata['ref_epoch_attr'] = 'seconds since %s' % ref_epoch.isoformat(sep=' ')
# Pad the azimuth time bounds in each direction (az_pad in units of seconds)
a0 = round((a0 - ref_epoch).total_seconds() - az_pad)
a1 = round((a1 - ref_epoch).total_seconds() + az_pad)
# Construct grids and update metadata dictionary
rgrid = np.arange(R0, R1, rspacing)
agrid = np.arange(a0, a1, aspacing)
metadata['calibration_range_grid'] = rgrid
metadata['calibration_azimuth_grid'] = agrid
def update_identification(fid, orbit, metadata, min_height=-500.,
max_height=9000.):
"""
Updates the science/LSAR/identification group.
"""
group = fid['science/LSAR/identification']
# Zero doppler times
start = metadata['Start Time of Acquisition']
stop = metadata['Stop Time of Acquisition']
group['zeroDopplerStartTime'] = np.string_(start.isoformat())
group['zeroDopplerEndTime'] = np.string_(stop.isoformat())
# Look direction
group['lookDirection'] = np.string_(metadata['Look Direction'].lower())
# Radar grid
radar_grid = metadata['Radar Grid']
# Create DEM interpolators for min and max height
dem_min = DEMInterpolator(height=min_height)
dem_max = DEMInterpolator(height=max_height)
# Get min and max bounding boxes
box_min = get_geo_perimeter_wkt(radar_grid, orbit,
dem = dem_min)
box_max = get_geo_perimeter_wkt(radar_grid, orbit,
dem = dem_max)
# Determine minimum and maximum polygons
poly_min = wkt.loads(box_min)
poly_max = wkt.loads(box_max)
# Get polygon from intersection of poly_min and poly_max
poly = poly_min | poly_max
group['boundingPolygon'] = np.string_(poly.envelope)
group['boundingPolygon'].attrs['epsg'] = 4326
group['boundingPolygon'].attrs['ogr_geometry'] = np.string_('polygon')
group['boundingPolygon'].attrs['description'] = np.string_(
'OGR compatible WKT representation of bounding polygon of the image')
def update_doppler(fid, metadata, frequency): # time, position, velocity,
"""
Update HDF5 file for Doppler, FM rate, and effective velocity.
"""
# Get doppler group from metadata
parameters = 'science/LSAR/SLC/metadata/processingInformation/parameters'
if parameters not in fid:
pgroup = fid.create_group(parameters)
else:
pgroup = fid[parameters]
frequency_str = 'frequency' + frequency
if frequency_str not in pgroup:
dgroup = pgroup.create_group(frequency_str)
else:
dgroup = pgroup[frequency_str]
# Delete prior datasets
if 'dopplerCentroid' in dgroup:
del dgroup['dopplerCentroid']
if 'azimuthFMRate' in dgroup:
del dgroup['azimuthFMRate']
# If frequency A, clear old slant range and azimuth time datasets
if frequency == 'A':
# Clear
if 'slantRange' in pgroup:
del pgroup['slantRange']
if 'zeroDopplerTime' in pgroup:
del pgroup['zeroDopplerTime']
# Replace with preconstructed grids
pgroup['slantRange'] = metadata['calibration_range_grid']
pgroup['slantRange'].attrs['description'] = np.string_(
'Slant range dimension corresponding to processing information records'
)
pgroup['slantRange'].attrs['units'] = np.string_('meters')
pgroup['zeroDopplerTime'] = metadata['calibration_azimuth_grid']
pgroup['zeroDopplerTime'].attrs['description'] = np.string_(
'Zero doppler time dimension corresponding to processing information records'
)
pgroup['zeroDopplerTime'].attrs['units'] = np.string_(metadata['ref_epoch_attr'])
rgvals = pgroup['slantRange'][()]
azsecs = pgroup['zeroDopplerTime'][()]
if 'Doppler coeffs km' in metadata.keys():
doppler_coeff = metadata['Doppler coeffs km']
rgvals_in_km = rgvals / 1000.0
dop_vals = np.polyval(doppler_coeff[::-1], rgvals_in_km)
dop_vals = np.tile(dop_vals, (len(azsecs), 1))
else:
dop_coeffs = metadata['Doppler coeffs rbin']
range_bin = np.arange(0, len(rgvals), 1.0)
dop_vals = np.polyval(dop_coeffs[::-1], range_bin)
dop_vals = np.tile(dop_vals, (len(azsecs), 1))
# Update Doppler values
dgroup['dopplerCentroid'] = dop_vals
dgroup['dopplerCentroid'].attrs['description'] = np.string_(
'2D LUT of Doppler Centroid for Frequency ' + frequency)
dgroup['dopplerCentroid'].attrs['units'] = np.string_('Hz')
if __name__ == "__main__":
'''
Main driver.
'''
# Parse command line
args = parse_args()
# Process the data
process(args=args)
|
python
|
from django.conf.urls import url
from django.urls import include, path
from django.contrib.auth import views as auth_views
from django.urls import reverse_lazy
from envelope.views import ContactView
from . import views
from .forms import MyContactForm
urlpatterns = [
# path('change-password/', auth_views.PasswordChangeView.as_view()),
# url(r'^register/$', views.RegisterFormView.as_view(), name='register'),
url(r'^cabinet/$', views.UserCabinetView.as_view(), name='cabinet'),
url(r'^settings/$',views.UserSettingsView.as_view(), name='settings'),
#url(r'^pricing/$',views.PricingView.as_view(), name='pricing'),
url(r'^contacts/', ContactView.as_view(form_class=MyContactForm), name='contacts'),
# url(r'^makeorder/(?P<purchase_name>[\w-]+)/$', views.MakeOrder.as_view(), name='makeorder'),
#url(r'^purchase/(?P<purchase_name>[\w-]+)/$', views.RobokassaView.as_view(), name='purchase'),
#url(r'^robokassa/', include('django_robokassa.urls')),
]
|
python
|
"""
Finds all possible combinations of integers below value to generate value
Author: Juan Rios
"""
import math
"""
Calculate the possible number of partitions
"""
def find_partitions_dict(limit_value):
p = {(1,1):0}
for i in range(2,limit_value+1):
tmp = 1
index = 1
while index<=(i-index):
tmp += p[(i-index,index)]
index += 1
p[(i,1)]=tmp
index = 2
while index<=i:
if index<=(i//2):
p[(i,index)]= p[(i,index-1)]-p[(i-index+1,index-1)]
else:
p[(i,index)]= 1
index += 1
return p[(i,1)]
def find_partitions_array(limit_value):
p = [[0]]
for i in range(2,limit_value+1):
tmp = 1
for index in range(1,i//2+1):
tmp += p[i-index-1][index-1]
p.append([tmp])
for index in range(2,i+1):
if index<=(i//2):
p[i-1].append(p[i-1][index-2]-p[i-index][index-2])
else:
for c in range(0,i-index+1):
p[i-1].append(1)
break
return p[i-1][0]
if __name__ == "__main__":
limit_value = 100
print('The total possible combination of integers to generate the value {0} is {1}'.format(limit_value,find_partitions_array(limit_value)))
|
python
|
""" This script generates google search queries and uses requests to go get
the HTML from the google search page. This was the most time-consuming
step in the process.
Certain lines are commented out as evidence of debugging and
to help me remember what I had already done.
Anyways, this script makes links by week from 2010 to 2016 (included)
Spits out html files of google searches"""
__author__ = 'Kyle Martin'
__email__ = '[email protected]'
import requests as r
import time
import random
# year_range = range(2014, 2017)
year_range = range(2010, 2013)
month_range = range(1, 13)
weeks = [(1, 7), (8, 14), (15, 21), (22, 28)]
userAgent = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/'
'537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36')
# for week in weeks:
# print('week ' + str(week) +
# ' start: ' + str(week[0]) +
# ' end: ' + str(week[1]))
def get_links(month_range, weeks):
"""
pass in month and day to write a query that will
go get all the links on that google search page.
"""
links = []
for year in year_range:
for month in month_range:
for week in weeks:
# append to a search_str list
link_a = ('https://www.google.com/search?q=de&num=100&lr'
'=lang_fr&tbs=cdr:1,cd_min:')
link_b = (str(month) + '/' + str(week[0]) + '/' +
str(year) + ',cd_max:' +
str(month) + '/' + str(week[1]))
link_c = '/' + str(year) + ',sbd:1&tbm=nws&cr=countryFR'
link = link_a + link_b + link_c
links += [link, ]
return links
query_list = get_links(month_range, weeks)
""" I used the following lines to track how
often I was getting blocked by google.
# 0 - 26 at 10:56 PM mar 15
# 27 - 48 at 4:38 PM mar 16
# 48 - 60 at 10:45 AM mar 17
# stopped at 81 11:52 AM"""
# I had to manually set the index value of my query list whenever the script
# would die out.
index = 120
for url in query_list[120:len(query_list)]:
print('GETting {}...'.format(url))
headers = {'user-agent': userAgent}
response = r.get(url, headers=headers)
with open('new-webpages/search_no_' + str(index) + '.txt', 'w+') as f:
f.write(response.text)
print('wrote file no. ' + str(index))
time.sleep(random.uniform(30, 45))
index += 1
print('done!')
|
python
|
from django import forms
from django.core.exceptions import ValidationError
import re
def validar_exclucion_numeros(value):
if value != 'None':
numeros = re.sub("[^0-9]", "", value)
if numeros:
raise ValidationError("No permite numeros")
else:
return value
return value
class FormPersonalizado(forms.Form):
nombre = forms.CharField(label='Nombre', max_length=100,required=True)
apellido = forms.CharField(label='Apellido', max_length=100)
usuario = forms.CharField(label='Usuario', max_length=5)
ciudad = forms.CharField(label='Ciudad', max_length=100,validators =[validar_exclucion_numeros])
def __init__(self, *args, **kwargs):
super(FormPersonalizado, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
|
python
|
class Solution:
def maxNumber(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[int]
"""
def prep(nums, k):
dr = len(nums) - k # 要删除的数目
stay = [] # 保留的list
for num in nums:
# 删除的空间 dr
# 删除的必要 stay[-1] < num 即 堆栈法:上升就替换,下降就保留。
while dr and stay and stay[-1] < num:
stay.pop()
dr -= 1
stay.append(num)
return stay[:k]
def merge(x, y):
return [max(x, y).pop(0) for _ in x + y]
l1 = len(nums1)
l2 = len(nums2)
#dr = l1 + l2 -k
r = [0]
for i in range(k + 1):
# 遍历所有可能并比较大小
if i <= l1 and k-i <= l2:
r = max(merge(prep(nums1, i), prep(nums2, k - i)), r)
return r
if __name__ == "__main__":
n1 = [3, 4, 6, 5]
n2 = [9, 1, 2, 5, 8, 3]
k = 5
so = Solution()
res = so.maxNumber(n1, n2, k)
print(res)
|
python
|
from BeautifulSoup import BeautifulSoup
from urllib2 import urlopen
import re
print("Searching... please wait")
emails=[]
url="http://www.bbe.caltech.edu/people-public/all/all"
data=urlopen(url)
parse=BeautifulSoup(data).findAll('a')
b=[parse[k]['href'][7:] for k in range(len(parse)) if "@" in parse[k]['href']]
for k in b: emails.append(k)
links=[]
url="http://www.eas.caltech.edu/people"
data=urlopen(url)
parse=BeautifulSoup(data).findAll('ul')[:2]
for k in parse:
for j in k.findAll('li'):
links.append("http://www.eas.caltech.edu"+j.a['href'])
for link in links:
data=urlopen(link)
parse=BeautifulSoup(data).findAll('div',attrs={'class':'contact-box'})
try:
val=re.split("Email: |<|>",str(parse[0].p))[3]+"@"+re.split("Email: |<|>",str(parse[0].p))[-3]
if "gmail.com" in val or "caltech.edu" in val :
emails.append(val)
except: pass
url="http://www.pma.caltech.edu/people-public/professorial-faculty/all"
data=urlopen(url)
parse=BeautifulSoup(data).findAll('a')
b=[parse[k]['href'][7:] for k in range(len(parse)) if "@" in parse[k]['href']]
for k in b: emails.append(k)
url="http://www.hss.caltech.edu/people-public/Professorial%20Faculty/all"
data=urlopen(url)
parse=BeautifulSoup(data).findAll('a')
b=[parse[k]['href'][7:] for k in range(len(parse)) if "@" in parse[k]['href']]
for k in b: emails.append(k)
url="http://www.gps.caltech.edu/people-public/Professorial-Faculty/all"
data=urlopen(url)
parse=BeautifulSoup(data).findAll('a')
b=[parse[k]['href'][7:] for k in range(len(parse)) if "@" in parse[k]['href']]
for k in b: emails.append(k)
f=open('output.csv','w')
for k in emails :
if k.count('@')>1:
k='@'.join(k.split('@')[-2:])
if k.startswith('Nora O') :
k=k.split('(')[1].split(',')[0]
f.write(k+'\n')
f.close()
print("Finished. Results saved to caltech_results.csv")
|
python
|
from __future__ import absolute_import
from .base import ViewTest, TemplateViewTest, RedirectViewTest
from .dates import (ArchiveIndexViewTests, YearArchiveViewTests,
MonthArchiveViewTests, WeekArchiveViewTests, DayArchiveViewTests,
DateDetailViewTests)
from .detail import DetailViewTest
from .edit import (ModelFormMixinTests, CreateViewTests, UpdateViewTests,
DeleteViewTests)
from .list import ListViewTests
|
python
|
#!/usr/bin/env python
"""
Insert blank notes at the top of every set of cards.
Useful for creating new cards in bulk with an editor.
This is undone by running the normalization code, `sort.py`.
"""
import glob
from collections import OrderedDict
from library import DynamicInlineTableDict
from library import NoteLibrary
from sort import INDEX_NAME
from sort import write_toml
def insert_blanks(notes, filename):
blank_note = OrderedDict()
keys = [
('kanji', ''),
('kana', ''),
('english', ''),
]
if 'verbs' in filename:
conjugated = OrderedDict([
('base', ''),
('past', ''),
('plural', ''),
('continuous', ''),
])
conjugated = DynamicInlineTableDict(conjugated)
keys.extend([
('english-conjugated', conjugated),
('verb-type', ''),
('transitive', False),
])
keys.extend([
('source', ''),
('level', ''),
('explain', ''),
('tags', []),
])
blank_note['source'] = ''
blank_note['level'] = ''
blank_note['explain'] = ''
blank_note['tags'] = []
blank_note = OrderedDict(keys)
notes[INDEX_NAME].insert(0, blank_note)
def main():
for filename in glob.glob('**/*.toml', recursive=True):
print('Processing file: {0}'.format(filename))
if 'cardgen' in filename or 'temp/' in filename:
continue # XXX: Things here shouldn't be processed for now.
try:
notes = NoteLibrary.read_notes_from_toml_file(filename)
insert_blanks(notes, filename)
write_toml(notes, filename)
except Exception as e:
print('Error processing file: {0}'.format(filename))
print(e)
if __name__ == '__main__':
main()
|
python
|
from math import sqrt
class cone():
def __init__(self,radius,height):
self.radius = radius
self.height = height
def volume(self):
r = 3.14*self.radius**2*(self.height/3)
print("Volume of Cone a is : ",r)
def surfaceArea(self):
import math
c = 3.14*self.radius
d = sqrt(self.radius**2+self.height**2)
e = (self.radius+d)
print("Surface Area of a Cone is : " ,c*e)
a = float(input("Enter Radius : "))
b = float(input("Enter Height : "))
x = cone(a,b)
x.volume()
x.surfaceArea()
|
python
|
# Copyright 2018 by Paolo Inglese, National Phenome Centre, Imperial College
# London
# All rights reserved.
# This file is part of DESI-MSI recalibration, and is released under the
# "MIT License Agreement".
# Please see the LICENSE file that should have been included as part of this
# package.
import tempfile
import numpy as np
from joblib import Memory
from joblib import Parallel, delayed
from .msi import MSI
class MSBinner:
def __init__(self, decimals: int):
self.__decimals = decimals
self.__bin_cmz = None
def bin(self, msobj: MSI) -> np.ndarray:
def __thread(msp_: np.ndarray, cmz_, ndec):
bin_yi = np.zeros(len(cmz_))
mz__ = np.round(msp_[:, 0], decimals=ndec)
u, s = np.unique(mz__, return_index=True)
yi_ = np.split(msp_[:, 1], s[1:])
# Sum intensities same M/Z
yi_ = [np.sum(x) for x in yi_]
bin_yi[np.isin(cmz_, u)] = np.asarray(yi_)
return bin_yi
list_msx = msobj.msdata
# Extract the full m/z vector and bin it at the digit level
print("Binning M/Z values with bin size = {} M/Z ...".format(
10 ** self.__decimals))
_idx = msobj.pixels_indices
binned_mz = np.empty(0)
for msp in list_msx:
mz_ = np.round(msp[:, 0], decimals=self.__decimals)
binned_mz = np.append(binned_mz, mz_)
binned_mz = np.unique(binned_mz)
del mz_
# binned_mz = np.round(all_mz, decimals=self.__decimals)
self.__bin_cmz = np.sort(np.unique(binned_mz))
print("Num. M/Z bins: {}".format(len(self.__bin_cmz)))
# Bin the spectra intensities: skip empty objects
print("Binning intensities ...")
tempdir = tempfile.mkdtemp()
bin_yi_list = \
Parallel(n_jobs=-1,
temp_folder=tempdir)(
delayed(__thread)(msp, self.__bin_cmz, self.__decimals)
for msp in list_msx)
# bin_yi_list = [__thread(msp, self.__bin_cmz, self.__decimals)
# for msp in list_msx]
binned_intensities = np.zeros(
(np.prod(msobj.dim_xy), len(self.__bin_cmz)))
for i, idx in enumerate(_idx):
binned_intensities[idx, :] = bin_yi_list[i]
del bin_yi_list
memory = Memory(tempdir, verbose=0)
memory.clear(warn=False)
return binned_intensities
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
vi $HOME/.LoginAccount.txt
[mailClient]
host = smtp.qq.com
port = 25
user = ***
pass = ***
fr = [email protected]
to = [email protected]
debuglevel = True
login = False
starttls = False
'''
__all__ = ['get_smtp_client', 'sendmail']
import os, sys
from ConfigParser import ConfigParser
from ConfigParser import NoOptionError
from smtplib import SMTP
from smtplib import SMTPAuthenticationError
from email import Encoders
from email.base64MIME import encode as encode_base64
from email.MIMEBase import MIMEBase
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
def get_smtp_client(stor):
host = stor['host']
port = stor['port']
user = stor['user']
passwd = stor['pass']
debuglevel = stor['debuglevel']
login = stor['login']
starttls = stor['starttls']
s = SMTP(host, port)
if debuglevel:
s.set_debuglevel(True)
if starttls and login:
s.ehlo()
s.starttls()
s.ehlo()
s.login(user, passwd)
elif login:
try:
s.login(user, passwd)
except SMTPAuthenticationError:
sys.stdout.write('\n------- try Auth Login again ------\n')
s = SMTP(host, port)
if debuglevel:
s.set_debuglevel(True)
s.ehlo()
(code, resp) = s.docmd('AUTH LOGIN')
if code != 334:
raise SMTPAuthenticationError(code, resp)
(code, resp) = s.docmd(encode_base64(user, eol=""))
if code != 334:
raise SMTPAuthenticationError(code, resp)
(code, resp) = s.docmd(encode_base64(passwd, eol=""))
if code != 235:
raise SMTPAuthenticationError(code, resp)
return s
def sendmail(server, msg):
address = [i for f in ('To', 'Cc', 'Bcc') if msg[f] for i in msg[f].split(',')]
server.sendmail(msg['From'], address, msg.as_string())
def fn(options, args):
cfg = ConfigParser()
cfg.read(os.path.join(os.getenv('HOME'), '.LoginAccount.txt'))
flag = 'mailClient'
keys = ('host', 'port', 'user', 'pass', 'fr', 'to', 'debuglevel', 'login', 'starttls')
stor = {}
for k in keys: stor.setdefault(k, '')
try:
stor['host'] = cfg.get(flag, 'host')
stor['port'] = cfg.getint(flag, 'port')
stor['user'] = cfg.get(flag, 'user')
stor['pass'] = cfg.get(flag, 'pass')
stor['fr'] = cfg.get(flag, 'fr')
stor['to'] = cfg.get(flag, 'to')
stor['debuglevel'] = cfg.getboolean(flag, 'debuglevel')
stor['login'] = cfg.getboolean(flag, 'login')
stor['starttls'] = cfg.getboolean(flag, 'starttls')
except NoOptionError: pass
if options.addr:
stor['to'] = options.addr
s = get_smtp_client(stor)
for arg in args:
sys.stdout.write('sending... ' + arg)
msg = MIMEMultipart()
msg['From'] = stor['fr']
msg['Subject'] = arg
msg['To'] = stor['to']
msg.set_boundary('===== Baby, I love you. https://twitter.com/number23_cn =====')
if options.atta:
data = MIMEBase('application', 'octet-stream')
data.set_payload(open(arg, 'rb').read())
Encoders.encode_base64(data)
data.add_header('Content-Disposition', 'attachment', filename = arg)
msg.attach(data)
else:
b = '''<html><head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head><body><pre>'''
b += open(arg, 'rb').read()
b += '''</pre></body></html>'''
body = MIMEText(b, _subtype = 'html', _charset = 'utf-8')
msg.attach(body)
sendmail(s, msg)
sys.stdout.write(' done.\n')
s.close()
if __name__ == '__main__':
from optparse import OptionParser
usage = '%prog [-e addr] [-a] args...'
parser = OptionParser(usage=usage)
parser.add_option('-e', '--addr', dest='addr',
help='receive email address', metavar='address')
parser.add_option('-a', '--atta', dest='atta',
action='store_true', default=False,
help='attachment flag')
(options, args) = parser.parse_args()
if not args:
parser.print_usage()
sys.exit(1)
fn(options, args)
|
python
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.profil, name="profil"),
path('sport_profil/', views.create_sport_profil, name="createSportProfil"),
]
|
python
|
from __future__ import division, absolute_import
__copyright__ = """
Copyright (C) 2013 Andreas Kloeckner
Copyright (C) 2018 Matt Wala
Copyright (C) 2018 Hao Gao
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import pyopencl as cl
import time
import pytest
from pyopencl.tools import ( # noqa
pytest_generate_tests_for_pyopencl as pytest_generate_tests)
from pymbolic import evaluate
from boxtree.cost import FMMCostModel, _PythonFMMCostModel
from boxtree.cost import make_pde_aware_translation_cost_model
import sys
import logging
import os
logging.basicConfig(level=os.environ.get("LOGLEVEL", "WARNING"))
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
SUPPORTS_PROCESS_TIME = (sys.version_info >= (3, 3))
# {{{ test_compare_cl_and_py_cost_model
@pytest.mark.opencl
@pytest.mark.parametrize(
("nsources", "ntargets", "dims", "dtype"), [
(50000, 50000, 3, np.float64)
]
)
def test_compare_cl_and_py_cost_model(ctx_factory, nsources, ntargets, dims, dtype):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
# {{{ Generate sources, targets and target_radii
from boxtree.tools import make_normal_particle_array as p_normal
sources = p_normal(queue, nsources, dims, dtype, seed=15)
targets = p_normal(queue, ntargets, dims, dtype, seed=18)
from pyopencl.clrandom import PhiloxGenerator
rng = PhiloxGenerator(queue.context, seed=22)
target_radii = rng.uniform(
queue, ntargets, a=0, b=0.05, dtype=dtype
).get()
# }}}
# {{{ Generate tree and traversal
from boxtree import TreeBuilder
tb = TreeBuilder(ctx)
tree, _ = tb(
queue, sources, targets=targets, target_radii=target_radii,
stick_out_factor=0.15, max_particles_in_box=30, debug=True
)
from boxtree.traversal import FMMTraversalBuilder
tg = FMMTraversalBuilder(ctx, well_sep_is_n_away=2)
trav_dev, _ = tg(queue, tree, debug=True)
trav = trav_dev.get(queue=queue)
# }}}
# {{{ Construct cost models
cl_cost_model = FMMCostModel(None)
python_cost_model = _PythonFMMCostModel(None)
constant_one_params = cl_cost_model.get_unit_calibration_params().copy()
for ilevel in range(trav.tree.nlevels):
constant_one_params["p_fmm_lev%d" % ilevel] = 10
xlat_cost = make_pde_aware_translation_cost_model(dims, trav.tree.nlevels)
# }}}
# {{{ Test process_form_multipoles
nlevels = trav.tree.nlevels
p2m_cost = np.zeros(nlevels, dtype=np.float64)
for ilevel in range(nlevels):
p2m_cost[ilevel] = evaluate(
xlat_cost.p2m(ilevel),
context=constant_one_params
)
p2m_cost_dev = cl.array.to_device(queue, p2m_cost)
queue.finish()
start_time = time.time()
cl_form_multipoles = cl_cost_model.process_form_multipoles(
queue, trav_dev, p2m_cost_dev
)
queue.finish()
logger.info("OpenCL time for process_form_multipoles: {0}".format(
str(time.time() - start_time)
))
start_time = time.time()
python_form_multipoles = python_cost_model.process_form_multipoles(
queue, trav, p2m_cost
)
logger.info("Python time for process_form_multipoles: {0}".format(
str(time.time() - start_time)
))
assert np.array_equal(cl_form_multipoles.get(), python_form_multipoles)
# }}}
# {{{ Test process_coarsen_multipoles
m2m_cost = np.zeros(nlevels - 1, dtype=np.float64)
for target_level in range(nlevels - 1):
m2m_cost[target_level] = evaluate(
xlat_cost.m2m(target_level + 1, target_level),
context=constant_one_params
)
m2m_cost_dev = cl.array.to_device(queue, m2m_cost)
queue.finish()
start_time = time.time()
cl_coarsen_multipoles = cl_cost_model.process_coarsen_multipoles(
queue, trav_dev, m2m_cost_dev
)
queue.finish()
logger.info("OpenCL time for coarsen_multipoles: {0}".format(
str(time.time() - start_time)
))
start_time = time.time()
python_coarsen_multipoles = python_cost_model.process_coarsen_multipoles(
queue, trav, m2m_cost
)
logger.info("Python time for coarsen_multipoles: {0}".format(
str(time.time() - start_time)
))
assert cl_coarsen_multipoles == python_coarsen_multipoles
# }}}
# {{{ Test process_direct
queue.finish()
start_time = time.time()
cl_ndirect_sources_per_target_box = \
cl_cost_model.get_ndirect_sources_per_target_box(queue, trav_dev)
cl_direct = cl_cost_model.process_direct(
queue, trav_dev, cl_ndirect_sources_per_target_box, 5.0
)
queue.finish()
logger.info("OpenCL time for process_direct: {0}".format(
str(time.time() - start_time)
))
start_time = time.time()
python_ndirect_sources_per_target_box = \
python_cost_model.get_ndirect_sources_per_target_box(queue, trav)
python_direct = python_cost_model.process_direct(
queue, trav, python_ndirect_sources_per_target_box, 5.0
)
logger.info("Python time for process_direct: {0}".format(
str(time.time() - start_time)
))
assert np.array_equal(cl_direct.get(), python_direct)
# }}}
# {{{ Test aggregate_over_boxes
start_time = time.time()
cl_direct_aggregate = cl_cost_model.aggregate_over_boxes(cl_direct)
queue.finish()
logger.info("OpenCL time for aggregate_over_boxes: {0}".format(
str(time.time() - start_time)
))
start_time = time.time()
python_direct_aggregate = python_cost_model.aggregate_over_boxes(python_direct)
logger.info("Python time for aggregate_over_boxes: {0}".format(
str(time.time() - start_time)
))
assert cl_direct_aggregate == python_direct_aggregate
# }}}
# {{{ Test process_list2
nlevels = trav.tree.nlevels
m2l_cost = np.zeros(nlevels, dtype=np.float64)
for ilevel in range(nlevels):
m2l_cost[ilevel] = evaluate(
xlat_cost.m2l(ilevel, ilevel),
context=constant_one_params
)
m2l_cost_dev = cl.array.to_device(queue, m2l_cost)
queue.finish()
start_time = time.time()
cl_m2l_cost = cl_cost_model.process_list2(queue, trav_dev, m2l_cost_dev)
queue.finish()
logger.info("OpenCL time for process_list2: {0}".format(
str(time.time() - start_time)
))
start_time = time.time()
python_m2l_cost = python_cost_model.process_list2(queue, trav, m2l_cost)
logger.info("Python time for process_list2: {0}".format(
str(time.time() - start_time)
))
assert np.array_equal(cl_m2l_cost.get(), python_m2l_cost)
# }}}
# {{{ Test process_list 3
m2p_cost = np.zeros(nlevels, dtype=np.float64)
for ilevel in range(nlevels):
m2p_cost[ilevel] = evaluate(
xlat_cost.m2p(ilevel),
context=constant_one_params
)
m2p_cost_dev = cl.array.to_device(queue, m2p_cost)
queue.finish()
start_time = time.time()
cl_m2p_cost = cl_cost_model.process_list3(queue, trav_dev, m2p_cost_dev)
queue.finish()
logger.info("OpenCL time for process_list3: {0}".format(
str(time.time() - start_time)
))
start_time = time.time()
python_m2p_cost = python_cost_model.process_list3(queue, trav, m2p_cost)
logger.info("Python time for process_list3: {0}".format(
str(time.time() - start_time)
))
assert np.array_equal(cl_m2p_cost.get(), python_m2p_cost)
# }}}
# {{{ Test process_list4
p2l_cost = np.zeros(nlevels, dtype=np.float64)
for ilevel in range(nlevels):
p2l_cost[ilevel] = evaluate(
xlat_cost.p2l(ilevel),
context=constant_one_params
)
p2l_cost_dev = cl.array.to_device(queue, p2l_cost)
queue.finish()
start_time = time.time()
cl_p2l_cost = cl_cost_model.process_list4(queue, trav_dev, p2l_cost_dev)
queue.finish()
logger.info("OpenCL time for process_list4: {0}".format(
str(time.time() - start_time)
))
start_time = time.time()
python_p2l_cost = python_cost_model.process_list4(queue, trav, p2l_cost)
logger.info("Python time for process_list4: {0}".format(
str(time.time() - start_time)
))
assert np.array_equal(cl_p2l_cost.get(), python_p2l_cost)
# }}}
# {{{ Test process_refine_locals
l2l_cost = np.zeros(nlevels - 1, dtype=np.float64)
for ilevel in range(nlevels - 1):
l2l_cost[ilevel] = evaluate(
xlat_cost.l2l(ilevel, ilevel + 1),
context=constant_one_params
)
l2l_cost_dev = cl.array.to_device(queue, l2l_cost)
queue.finish()
start_time = time.time()
cl_refine_locals_cost = cl_cost_model.process_refine_locals(
queue, trav_dev, l2l_cost_dev
)
queue.finish()
logger.info("OpenCL time for refine_locals: {0}".format(
str(time.time() - start_time)
))
start_time = time.time()
python_refine_locals_cost = python_cost_model.process_refine_locals(
queue, trav, l2l_cost
)
logger.info("Python time for refine_locals: {0}".format(
str(time.time() - start_time)
))
assert cl_refine_locals_cost == python_refine_locals_cost
# }}}
# {{{ Test process_eval_locals
l2p_cost = np.zeros(nlevels, dtype=np.float64)
for ilevel in range(nlevels):
l2p_cost[ilevel] = evaluate(
xlat_cost.l2p(ilevel),
context=constant_one_params
)
l2p_cost_dev = cl.array.to_device(queue, l2p_cost)
queue.finish()
start_time = time.time()
cl_l2p_cost = cl_cost_model.process_eval_locals(queue, trav_dev, l2p_cost_dev)
queue.finish()
logger.info("OpenCL time for process_eval_locals: {0}".format(
str(time.time() - start_time)
))
start_time = time.time()
python_l2p_cost = python_cost_model.process_eval_locals(queue, trav, l2p_cost)
logger.info("Python time for process_eval_locals: {0}".format(
str(time.time() - start_time)
))
assert np.array_equal(cl_l2p_cost.get(), python_l2p_cost)
# }}}
# }}}
# {{{ test_estimate_calibration_params
@pytest.mark.opencl
def test_estimate_calibration_params(ctx_factory):
from boxtree.pyfmmlib_integration import FMMLibExpansionWrangler
nsources_list = [1000, 2000, 3000, 4000]
ntargets_list = [1000, 2000, 3000, 4000]
dims = 3
dtype = np.float64
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
traversals = []
traversals_dev = []
level_to_orders = []
timing_results = []
def fmm_level_to_nterms(tree, ilevel):
return 10
for nsources, ntargets in zip(nsources_list, ntargets_list):
# {{{ Generate sources, targets and target_radii
from boxtree.tools import make_normal_particle_array as p_normal
sources = p_normal(queue, nsources, dims, dtype, seed=15)
targets = p_normal(queue, ntargets, dims, dtype, seed=18)
from pyopencl.clrandom import PhiloxGenerator
rng = PhiloxGenerator(queue.context, seed=22)
target_radii = rng.uniform(
queue, ntargets, a=0, b=0.05, dtype=dtype
).get()
# }}}
# {{{ Generate tree and traversal
from boxtree import TreeBuilder
tb = TreeBuilder(ctx)
tree, _ = tb(
queue, sources, targets=targets, target_radii=target_radii,
stick_out_factor=0.15, max_particles_in_box=30, debug=True
)
from boxtree.traversal import FMMTraversalBuilder
tg = FMMTraversalBuilder(ctx, well_sep_is_n_away=2)
trav_dev, _ = tg(queue, tree, debug=True)
trav = trav_dev.get(queue=queue)
traversals.append(trav)
traversals_dev.append(trav_dev)
# }}}
wrangler = FMMLibExpansionWrangler(trav.tree, 0, fmm_level_to_nterms)
level_to_orders.append(wrangler.level_nterms)
timing_data = {}
from boxtree.fmm import drive_fmm
src_weights = np.random.rand(tree.nsources).astype(tree.coord_dtype)
drive_fmm(trav, wrangler, (src_weights,), timing_data=timing_data)
timing_results.append(timing_data)
if SUPPORTS_PROCESS_TIME:
time_field_name = "process_elapsed"
else:
time_field_name = "wall_elapsed"
def test_params_sanity(test_params):
param_names = ["c_p2m", "c_m2m", "c_p2p", "c_m2l", "c_m2p", "c_p2l", "c_l2l",
"c_l2p"]
for name in param_names:
assert isinstance(test_params[name], np.float64)
def test_params_equal(test_params1, test_params2):
param_names = ["c_p2m", "c_m2m", "c_p2p", "c_m2l", "c_m2p", "c_p2l", "c_l2l",
"c_l2p"]
for name in param_names:
assert test_params1[name] == test_params2[name]
python_cost_model = _PythonFMMCostModel(make_pde_aware_translation_cost_model)
python_model_results = []
for icase in range(len(traversals)-1):
traversal = traversals[icase]
level_to_order = level_to_orders[icase]
python_model_results.append(python_cost_model.cost_per_stage(
queue, traversal, level_to_order,
_PythonFMMCostModel.get_unit_calibration_params(),
))
python_params = python_cost_model.estimate_calibration_params(
python_model_results, timing_results[:-1], time_field_name=time_field_name
)
test_params_sanity(python_params)
cl_cost_model = FMMCostModel(make_pde_aware_translation_cost_model)
cl_model_results = []
for icase in range(len(traversals_dev)-1):
traversal = traversals_dev[icase]
level_to_order = level_to_orders[icase]
cl_model_results.append(cl_cost_model.cost_per_stage(
queue, traversal, level_to_order,
FMMCostModel.get_unit_calibration_params(),
))
cl_params = cl_cost_model.estimate_calibration_params(
cl_model_results, timing_results[:-1], time_field_name=time_field_name
)
test_params_sanity(cl_params)
if SUPPORTS_PROCESS_TIME:
test_params_equal(cl_params, python_params)
# }}}
# {{{ test_cost_model_op_counts_agree_with_constantone_wrangler
class OpCountingTranslationCostModel(object):
"""A translation cost model which assigns at cost of 1 to each operation."""
def __init__(self, dim, nlevels):
pass
@staticmethod
def direct():
return 1
@staticmethod
def p2l(level):
return 1
l2p = p2l
p2m = p2l
m2p = p2l
@staticmethod
def m2m(src_level, tgt_level):
return 1
l2l = m2m
m2l = m2m
@pytest.mark.opencl
@pytest.mark.parametrize(
("nsources", "ntargets", "dims", "dtype"), [
(5000, 5000, 3, np.float64)
]
)
def test_cost_model_op_counts_agree_with_constantone_wrangler(
ctx_factory, nsources, ntargets, dims, dtype):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
from boxtree.tools import make_normal_particle_array as p_normal
sources = p_normal(queue, nsources, dims, dtype, seed=16)
targets = p_normal(queue, ntargets, dims, dtype, seed=19)
from pyopencl.clrandom import PhiloxGenerator
rng = PhiloxGenerator(queue.context, seed=20)
target_radii = rng.uniform(queue, ntargets, a=0, b=0.04, dtype=dtype).get()
from boxtree import TreeBuilder
tb = TreeBuilder(ctx)
tree, _ = tb(
queue, sources, targets=targets, target_radii=target_radii,
stick_out_factor=0.15, max_particles_in_box=30, debug=True
)
from boxtree.traversal import FMMTraversalBuilder
tg = FMMTraversalBuilder(ctx, well_sep_is_n_away=2)
trav_dev, _ = tg(queue, tree, debug=True)
trav = trav_dev.get(queue=queue)
from boxtree.tools import ConstantOneExpansionWrangler
wrangler = ConstantOneExpansionWrangler(trav.tree)
timing_data = {}
from boxtree.fmm import drive_fmm
src_weights = np.random.rand(tree.nsources).astype(tree.coord_dtype)
drive_fmm(trav, wrangler, (src_weights,), timing_data=timing_data)
cost_model = FMMCostModel(
translation_cost_model_factory=OpCountingTranslationCostModel
)
level_to_order = np.array([1 for _ in range(tree.nlevels)])
modeled_time = cost_model.cost_per_stage(
queue, trav_dev, level_to_order,
FMMCostModel.get_unit_calibration_params(),
)
mismatches = []
for stage in timing_data:
if timing_data[stage]["ops_elapsed"] != modeled_time[stage]:
mismatches.append(
(stage, timing_data[stage]["ops_elapsed"], modeled_time[stage]))
assert not mismatches, "\n".join(str(s) for s in mismatches)
# {{{ Test per-box cost
total_cost = 0.0
for stage in timing_data:
total_cost += timing_data[stage]["ops_elapsed"]
per_box_cost = cost_model.cost_per_box(
queue, trav_dev, level_to_order,
FMMCostModel.get_unit_calibration_params(),
)
total_aggregate_cost = cost_model.aggregate_over_boxes(per_box_cost)
assert total_cost == (
total_aggregate_cost
+ modeled_time["coarsen_multipoles"]
+ modeled_time["refine_locals"]
)
# }}}
# }}}
# You can test individual routines by typing
# $ python test_cost_model.py 'test_routine(cl.create_some_context)'
if __name__ == "__main__":
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
from pytest import main
main([__file__])
# vim: foldmethod=marker
|
python
|
"""
Render shortest/fastest paths for intervals as HTML.
"""
from typing import Any, List, Optional, Tuple
import attr
import osmnx as ox
from jinja2 import Template
from .routing import RestrictedGraph
from .calculation import IntervalCalculation
@attr.define
class Page:
"""
A full HTML page of interval calculations.
"""
_graph: RestrictedGraph
calculations: List[IntervalCalculation] = attr.ib(init=False, factory=list)
def add(self, calculation: IntervalCalculation):
"""
Add an interval to the page for future rendering.
"""
self.calculations.append(calculation) # pylint: disable=no-member
_template = Template(
"""
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
<meta name="viewport" content="width=device-width,
initial-scale=1.0, maximum-scale=1.0, user-scalable=no" />
<style type="text/css">
html {
padding: 2em;
}
td {
padding: 0 3em 1em;
}
td:first-child {
padding-left: 0;
}
.folium-map {
display: block;
height: 50em;
width: 50em;
}
</style>
<script>
L_NO_TOUCH = false;
L_DISABLE_3D = false;
</script>
{% for script in scripts %}<script defer src="{{script}}"></script>{% endfor %}
{% for sheet in stylesheets %}<link rel="stylesheet" href="{{sheet}}"/>{% endfor %}
</head>
<body>
{% for calculation in this.calculations %}
{% if loop.index > 1 %}<hr>{% endif %}
{{ this.render_calculation(calculation) }}
{% endfor %}
</body>
</html>
"""
)
_calculation_template = Template(
"""
<div>
<table>
<thead>
<tr>
<th>From</th>
<th>To</th>
</tr>
</thead>
<tbody>
<tr>
<td>{{ page.render_stop(this.from_stop) }}</td>
<td>{{ page.render_stop(this.to_stop) }}</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>Interval Type</th>
<th>Description</th>
<th>Directions</th>
</tr>
</thead>
<tbody>
<tr>
<td>{{ this.interval_type}}</td>
<td>{{ this.description }}</td>
<td>
{% if has_maps %}
<a target="_blank"
href="{{ google_maps_url | e}}">Google Maps</a><br>
<a target="_blank"
href="{{ osm_url | e}}">OpenStreetMap</a><br>
{% endif %}
</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>Route</th>
<th>Length (ft)</th>
</tr>
</thead>
<tbody>
{% for item in results %}
<tr>
{% for cell in item %}<td>{{ cell }}</td>{% endfor %}
</tr>
{% endfor %}
</tbody>
</table>
{{ folium_map_html }}
<script type="text/javascript">
window.addEventListener('DOMContentLoaded', function() {
{{ folium_map_script }}
});
</script>
</div>
"""
)
_stop_template = Template(
"""
{{ this.description }} ({{ this.id }})<br>
{% if osm_url %}<a href="{{osm_url | e}}">OpenStreetMap</a><br>{% endif %}
<a href="https://www.mbta.com/stops/{{ this.id }}">MBTA.com</a><br>
<a href="https://api-v3.mbta.com/stops/{{ this.id }}">V3 API</a>
"""
)
def render_calculation(self, calculation: IntervalCalculation) -> str:
"""
Render the calculation as HTML.
"""
print(calculation)
results = self._calculate_results(calculation)
has_maps = calculation.is_located()
if has_maps:
google_maps_url = self._google_maps_url(
calculation.from_stop, calculation.to_stop
)
osm_url = self._osm_url(calculation.from_stop, calculation.to_stop)
folium_map = self._graph.folium_map(
calculation.from_stop, calculation.to_stop, calculation.paths()
)
folium_map.render()
map_root = folium_map.get_root()
folium_map_html = map_root.html.render()
folium_map_script = map_root.script.render()
else:
google_maps_url = osm_url = folium_map_html = folium_map_script = None
return self._calculation_template.render(
page=self,
this=calculation,
google_maps_url=google_maps_url,
osm_url=osm_url,
results=results,
folium_map_html=folium_map_html,
folium_map_script=folium_map_script,
)
def _calculate_results(
self, calculation: IntervalCalculation
) -> List[Tuple[str, str, str]]:
results = []
if calculation.interval.distance_between_measured:
results.append(
(
"Measured",
str(calculation.interval.distance_between_measured),
)
)
if calculation.interval.distance_between_map:
results.append(
(
"Map",
str(calculation.interval.distance_between_map),
)
)
named_paths = list(
zip(["Fastest (red)", "Shortest (yellow)"], calculation.paths())
)
for (name, path) in named_paths:
results.append(
(
name,
str(self.meters_to_feet(self._graph.path_length(path))),
)
)
if not named_paths:
results.append(("Empty", "0"))
return results
@staticmethod
def _google_maps_url(from_stop, to_stop):
return (
f"https://www.google.com/maps/dir/?api=1&"
f"travelmode=driving&"
f"origin={ from_stop.y },{ from_stop.x }&"
f"destination={ to_stop.y },{ to_stop.x }"
)
@staticmethod
def _osm_url(from_stop, to_stop) -> str:
return (
f"https://www.openstreetmap.org/directions?engine=fossgis_osrm_car&"
f"route={from_stop.y},{from_stop.x};{to_stop.y},{to_stop.x}"
)
@classmethod
def render_stop(cls, stop) -> str:
"""
Render a stop to HTML.
"""
if hasattr(stop, "x") and hasattr(stop, "y"):
osm_url = (
f"https://www.openstreetmap.org/query?"
f"lat={stop.y}&lon={stop.x}"
f"#map=18/{stop.y}/{stop.x}"
)
else:
osm_url = None
return cls._stop_template.render(this=stop, osm_url=osm_url)
@staticmethod
def meters_to_feet(meters: float) -> int:
"""
Convert the given distance in meters to feet.
"""
return int(meters * 3.281)
def render(self) -> str:
"""
Render to HTML.
"""
# pylint: disable=line-too-long
ox.utils.log("rendering page...")
scripts = [
"https://cdn.jsdelivr.net/npm/[email protected]/dist/leaflet.js",
"https://cdnjs.cloudflare.com/ajax/libs/Leaflet.awesome-markers/2.0.2/leaflet.awesome-markers.js",
]
stylesheets = [
"https://cdn.jsdelivr.net/npm/[email protected]/dist/leaflet.css",
"https://cdnjs.cloudflare.com/ajax/libs/Leaflet.awesome-markers/2.0.2/leaflet.awesome-markers.css",
"https://maxcdn.bootstrapcdn.com/font-awesome/4.6.3/css/font-awesome.min.css",
"https://maxcdn.bootstrapcdn.com/bootstrap/3.0.0/css/bootstrap-glyphicons.css",
]
return self._template.render(
this=self, scripts=scripts, stylesheets=stylesheets
)
def null_str(value: Optional[Any]) -> str:
"""
Return NULL if the value is None, otherwise str(value).
"""
if value is None:
return "NULL"
return str(value)
|
python
|
# -*- coding: utf-8 -*-
from .Cardstack import *
from .Exception import *
from copy import deepcopy
playPreds = ("PLAY", "PLAY COIN", "THRONE", "THRONE GENERIC", "THRONE COIN", "KING")
# -- Standard Exceptions -- #
def persistent(exc):
newExc = deepcopy(exc)
newExc.persistent = True
return newExc
def always(move):
return True
def default_action(moves, i, blockLength, state):
moves[i].pred.action(moves, i, blockLength, state)
defaultMove = Exception(always, default_action, -1, [], 0, True)
def check(predList, targetList=[]):
def out_function(move):
if predList:
if move.pred not in predList:
return False
if targetList:
if move.items:
if len([t for t in targetList if (t in move.items[0])]) == 0:
return False
return True
return out_function
def set_phase(action):
def out_function(moves, i, blockLength, state):
if moves[i].indent == 0:
state.phase = 1
action(moves, i, blockLength, state)
return out_function
def moveFunct(src, dest):
def out_function(moves, i, blockLength, state):
state.move(moves[i].player, src, dest, moves[i].items[0])
return out_function
def checkMove(predList, src, dest, targetList=[]):
return Exception(check(predList, targetList), moveFunct(src, dest))
def move_play(source, dest="INPLAYS"):
def out_function(moves, i, blockLength, state):
state.move(moves[i].player, source, dest, moves[i].items[0])
standard_plays(moves, i, blockLength, state)
return out_function
exc_revealTopdeck = checkMove(["TOPDECK"], "DECKS", "DECKS")
exc_revealDiscard = checkMove(["DISCARD"], "DECKS", "DISCARDS")
exc_harbinger = checkMove(["TOPDECK"], "DISCARDS", "DECKS")
exc_settlers = checkMove(["PUT INHAND"], "DISCARDS", "HANDS")
exc_gainHand = checkMove(["GAIN"], "SUPPLY", "HANDS")
def standard_trash(source):
def out_function(moves, i, blockLength, state):
move = moves[i]
target = move.items[0].primary
if target == "ESTATE":
target = state.inherited[move.player]
if (
i + blockLength < len(moves)
and moves[i + blockLength].pred == "GAIN"
and moves[i + blockLength].items[0].primary in ["MADMAN", "MERCENARY"]
):
state.move(move.player, "INPLAYS", "TRASH", move.items[0])
elif move.indent == 0 and state.phase == 4:
state.move(move.player, "INPLAYS", "TRASH", move.items[0])
else:
state.move(move.player, source, "TRASH", move.items[0])
triggers = {"FORTRESS": [checkMove(["PUT INHAND"], "TRASH", "HANDS")]}
if target in triggers:
for exc in triggers[target]:
newExc = deepcopy(exc)
newExc.lifespan = blockLength
newExc.indents = [moves[i].indent + 1]
state.exceptions.add(newExc)
if target == "ROCKS":
if state.phase == 2:
newExc = gainTo("SUPPLY", "DECKS")
else:
newExc = gainTo("SUPPLY", "HANDS")
newExc.lifespan = blockLength
newExc.indents = [moves[i].indent + 1]
state.exceptions.add(newExc)
return out_function
exc_revealTrash = Exception(check(["TRASH"]), standard_trash("DECKS"))
exc_supplyTrash = Exception(check(["TRASH"]), standard_trash("SUPPLY"))
exc_inplayTrash = Exception(check(["TRASH"]), standard_trash("INPLAYS"))
def gainTo(source, destination):
return Exception(check(["GAIN"]), standard_gains(source, destination))
# -- PREDS -- #
def new_turn_action(moves, i, blockLength, state):
state.activePlayer = moves[i].player
state.phase = 0
state.coins = 0
state.actions = 1
state.buys = 1
newDurations = []
state.orderedPlays = []
for stack, life in state.durations[moves[i].player]:
if life != 0:
if life > 0:
life -= 1
newDurations.append((stack, life))
for card in state["INPLAYS"][moves[i].player]:
for amt in range(state["INPLAYS"][moves[i].player][card]):
state.orderedPlays.append(card)
state.durations[moves[i].player] = newDurations
state.linkedPlays = []
state.amuletSilvers = 0
state.cargoShips = 0
state.bridges = 0
state.lastMove = ["", ""]
Preds["NEW TURN"].action = new_turn_action
def turn_start_action(moves, i, blockLength, state):
def start_gain(moves, i, blockLength, state):
move = moves[i]
for item in move.items[0]:
if item != "SILVER":
state.move(move.player, "SUPPLY", "HANDS", move.items[0])
return
if move.items[0]["SILVER"] <= state.amuletSilvers:
state.move(move.player, "SUPPLY", "DISCARDS", move.items[0])
state.amuletSilvers -= move.items[0]["SILVER"]
else:
state.move(move.player, "SUPPLY", "HANDS", move.items[0])
def start_piazza(moves, i, blockLength, state):
state.exceptions.add(
Exception(check(["TOPDECK"]), empty, lifespan=2, indents=[moves[i].indent])
)
state.exceptions.add(
Exception(
check(["PLAY"]),
move_play("DECKS"),
lifespan=2,
indents=[moves[i].indent],
priority=2,
)
)
def testo(moves, i, blockLength, state):
state.move(moves[i].player, "OTHERS", "HANDS", moves[i].items[0])
def boon_check(move):
return move.pred == "RECEIVE" and "b" in Cards[move.items[0].primary].types
def boon_action(moves, i, blockLength, state):
for life in range(1, len(moves) - i):
secondary = moves[i + life]
if (
secondary.pred == "DISCARD"
and secondary.items[0].primary == moves[i].items[0].primary
or secondary.indent == 0
):
break
state.exceptions.add(
Exception(
check(["GAIN"]), standard_gains("SUPPLY"), lifespan=life, priority=2
)
)
state.phase = 0
exceptions = [
checkMove(["PUT INHAND"], "OTHERS", "HANDS"),
checkMove(["INHAND GENERIC"], "OTHERS", "HANDS"),
Exception(check(["GAIN"]), start_gain),
Exception(check(["PLAY"]), move_play("OTHERS")),
Exception(check(["REVEAL"]), start_piazza),
Exception(boon_check, boon_action, priority=0),
]
for exc in exceptions:
newExc = deepcopy(exc)
newExc.persistent = True
newExc.lifespan = blockLength
newExc.indents = [moves[i].indent + 1]
state.exceptions.add(newExc)
# Cobbler / Amulet stuff
amuletPlays = 0
for stack, life in state.durations[moves[i].player]:
amuletPlays += stack["AMULET"]
index = i + 1
while index < i + blockLength:
secondary = moves[index]
if secondary.pred == "COINS GENERIC" and secondary.items[0].primary == "AMULET":
amuletPlays -= 1
elif secondary.pred == "TRASH" and secondary.indent == 1:
amuletPlays -= 1
elif secondary.pred == "CALL":
index += 1
index += 1
state.amuletSilvers = amuletPlays
# Ghost linkages
for throneIndex in range(i + 1, i + blockLength):
throneMove = moves[throneIndex]
if throneMove.pred == "THRONE":
target = throneMove.items[0].primary
for index in range(throneIndex - 1, i, -1):
secondary = moves[index]
if secondary.pred == "PLAY" and secondary.items[0].primary == target:
plays = [secondary, index]
block = [plays, Cardstack({target: 1, "GHOST": 1}), None]
state.linkedPlays.append(block)
break
elif secondary.indent == throneMove.indent - 1:
break
Preds["TURN START"].action = turn_start_action
def end_buys_action(moves, i, blockLength, state):
exceptions = [checkMove(["DISCARD"], "TAVERN", "DISCARDS", ["WINE MERCHANT"])]
for exc in exceptions:
newExc = deepcopy(exc)
newExc.lifespan = blockLength
newExc.indents = [moves[i].indent + 1]
state.exceptions.add(newExc)
Preds["END BUYPHASE"].action = end_buys_action
def donate_action(moves, i, blockLength, state):
def moveEverything(moves, i, blockLength, state):
move = moves[i]
discards = state["DISCARDS"][move.player]
if discards > move.items[0] and move.items[0] > discards:
state.move(move.player, "DISCARDS", "HANDS", state["DISCARDS"][move.player])
else:
state.move(move.player, "DECKS", "HANDS", state["DECKS"][move.player])
def shuffleBack(moves, i, blockLength, state):
state.move(moves[i].player, "HANDS", "DECKS", state["HANDS"][moves[i].player])
putExc = Exception(check(["PUT INHAND"]), moveEverything, persistent=True)
shuffleExc = Exception(check(["SHUFFLE INTO"]), shuffleBack)
for exc in [putExc, shuffleExc]:
newExc = deepcopy(exc)
newExc.lifespan = blockLength
newExc.indents = [moves[i].indent + 1]
state.exceptions.add(newExc)
Preds["BETWEEN TURNS"].action = donate_action
Preds["STARTS"].action = moveFunct("SUPPLY", "DECKS")
def get_gain_dest(card):
alts = {
"NOMAD CAMP": "DECKS",
"DEN OF SIN": "HANDS",
"GUARDIAN": "HANDS",
"GHOST TOWN": "HANDS",
"NIGHT WATCHMAN": "HANDS",
}
if card in alts:
return alts[card]
else:
return "DISCARDS"
def standard_gains(source, destination="DISCARDS"):
def out_function(moves, i, blockLength, state):
move = moves[i]
target = deepcopy(move.items[0])
if target == "ESTATE":
target = state.inherited[move.player]
if move.indent == 0:
state.phase = 2
blockEnd = i + blockLength
def move_target(endpoint):
def out_function(moves, i, blockLength, state):
source = (
get_gain_dest(target.primary)
if destination == "DISCARDS"
else destination
)
block = Cardstack({target.primary: 1})
state.move(moves[i].player, source, endpoint, block)
return out_function
def cargo_check(move):
return move.pred == "SET ASIDE WITH" and move.arguments[0] == "Cargo Ship"
def cargo_move(moves, i, blockLength, state):
state.move(moves[i].player, destination, "OTHERS", moves[i].items[0])
twinned = sum([block[1]["CARGO SHIP"] for block in state.linkedPlays])
soloShips = state.cargoShips - twinned
block = [Cardstack({"CARGO SHIP": 1}), 1]
if state.cargoCount < soloShips:
state.durations[moves[i].player].append(block)
else:
twinCapacity = sum(
[len(block[0]) for block in state.linkedPlays if block[2]]
)
if twinCapacity + soloShips == state.cargoCount:
for j in range(len(state.linkedPlays)):
plays, cards, ship = state.linkedPlays[j]
if "CARGO SHIP" in cards and not ship:
newDur = [cards, 1]
state.linkedPlays[j][2] = newDur
state.cargoCount += 1
def innovation_check(move):
return (
move.pred == "SET ASIDE"
and "b" not in Cards[move.items[0].primary].types
and move.items[0].primary == target.primary
)
def innovation_action(moves, i, blockLength, state):
target = moves[i].items[0].primary
endpoint = (
get_gain_dest(target) if destination == "DISCARDS" else destination
)
state.move(moves[i].player, endpoint, "OTHERS", moves[i].items[0])
state.exceptions.add(
Exception(
check(["PLAY"]),
move_play("OTHERS"),
lifespan=blockEnd - i,
indents=[moves[i].indent],
)
)
def changeling_return(moves, i, blockLength, state):
target = moves[i].items[0].primary
endpoint = (
get_gain_dest(target) if destination == "DISCARDS" else destination
)
if "s" not in Cards[target].types:
state.move(moves[i].player, endpoint, "SUPPLY", moves[i].items[0])
def fg_react(moves, i, blockLength, state):
newExc = deepcopy(checkMove(["GAIN"], "SUPPLY", "DECKS", ["GOLD"]))
newExc.lifespan = blockLength + 1
newExc.indents = [moves[i].indent]
state.exceptions.add(newExc)
def villa_phase(moves, i, blockLength, state):
state.move(moves[i].player, destination, "HANDS", moves[i].items[0])
state.phase = 1
# Check for the Changeling lose track message
for secondary in range(i + 1, len(moves)):
if (
moves[secondary].indent == moves[i].indent
and moves[secondary].pred != "LOSETRACK GENERIC"
):
blockLength = secondary - i
break
# If default, check for exceptional gain destinations
if destination == "DISCARDS":
for card in target:
block = Cardstack({card: target[card]})
state.move(move.player, source, get_gain_dest(card), block)
else:
state.move(move.player, source, destination, target)
# Topdeck / trash reactions / Innovation
for secondary in moves[i + 1 : i + blockLength]:
if (
check(
["REACT"],
["ROYAL SEAL", "WATCHTOWER", "TRAVELLING FAIR", "TRACKER"],
)(secondary)
and secondary.indent == move.indent + 1
):
for action, endpoint in [("TOPDECK", "DECKS"), ("TRASH", "TRASH")]:
newExc = Exception(
check([action], ["CARD", target.primary]), move_target(endpoint)
)
newExc.lifespan = blockLength
newExc.indents = [move.indent + 1]
state.exceptions.add(newExc)
break
# Cargo Ship
standardExceptions = [
Exception(cargo_check, cargo_move),
Exception(innovation_check, innovation_action),
Exception(check(["RETURN"]), changeling_return),
]
for exc in standardExceptions:
newExc = deepcopy(exc)
newExc.lifespan = blockLength
newExc.indents = [moves[i].indent + 1]
state.exceptions.add(newExc)
triggers = {
"PROVINCE": [Exception(check(["TRASH"], ["FOOL'S GOLD"]), fg_react)],
"INN": [Exception(check(["SHUFFLE"]), empty)],
"MANDARIN": [checkMove(["TOPDECK"], "INPLAYS", "DECKS")],
"VILLA": [Exception(check(["PUT INHAND"]), villa_phase)],
}
if target.primary in triggers:
for exc in triggers[target.primary]:
newExc = deepcopy(exc)
newExc.lifespan = blockLength
newExc.indents = [moves[i].indent + 1]
state.exceptions.add(newExc)
if "ROCKS" in moves[i].items[0]:
if state.phase == 2:
newExc = gainTo("SUPPLY", "DECKS")
else:
newExc = gainTo("SUPPLY", "HANDS")
newExc.lifespan = blockLength
newExc.indents = [moves[i].indent + 1]
state.exceptions.add(newExc)
return out_function
def gain_experiment(moves, i, blockLength, state):
state.move(moves[i].player, "SUPPLY", "DECKS", Cardstack({"EXPERIMENT": 1}))
def get_cost(card, player, state):
reductions = state.bridges
highwayLike = ["HIGHWAY", "BRIDGE TROLL", "PRINCESS"]
for highway in highwayLike:
reductions += state["INPLAYS"][player][highway]
reductions += state["INPLAYS"][player]["PRINCESS"]
if "a" in Cards[card].types:
reductions += state["INPLAYS"][player]["QUARRY"] * 2
if "CANAL" in state.projects[player]:
reductions += 1
if card == "PEDDLER" and state.phase == 2:
for inplay in state["INPLAYS"][player]:
if "a" in Cards[inplay].types:
reductions += state["INPLAYS"][player][inplay] * 2
actualCost = deepcopy(Cards[card].cost)
actualCost[0] = max(0, actualCost[0] - reductions)
return actualCost
def buy_action(moves, i, blockLength, state):
move = moves[i]
target = move.items[0].primary
if move.indent == 0:
state.phase = 2
state.buys -= sum(
[
move.items[0][card] if "p" not in Cards[card].types else 1
for card in move.items[0]
]
)
costs = [
[
(move.items[0][card] if "p" not in Cards[card].types else 1) * i
for i in get_cost(card, move.player, state)
]
for card in move.items[0]
]
cost = [sum([x[i] for x in costs if len(x) > i]) for i in range(2)]
state.coins -= cost[0]
if len(cost) > 1:
state.debt[move.player] += cost[1]
triggers = {
"MINT": [exc_inplayTrash],
"NOBLE BRIGAND": [exc_revealDiscard, exc_revealTrash],
"DOCTOR": [exc_revealTrash, exc_revealDiscard, exc_revealTopdeck],
"HERALD": [exc_harbinger],
"BONFIRE": [exc_inplayTrash],
"SCOUTING PARTY": [exc_revealDiscard, exc_revealTopdeck],
"ANNEX": [Exception(check(["SHUFFLE"]), empty)],
"SALT THE EARTH": [exc_supplyTrash],
"SUMMON": [checkMove(["SET ASIDE"], get_gain_dest(target), "OTHERS")],
}
if target in triggers:
for exc in triggers[target]:
newExc = deepcopy(exc)
newExc.lifespan = blockLength
newExc.indents = [moves[i].indent + 1]
newExc.persistent = True
state.exceptions.add(newExc)
if target == "SAVE":
for life in range(1, len(moves) - i):
if moves[i + life - 1].pred == "NEW TURN":
break
state.exceptions.add(
Exception(
check(["PUT INHAND"]),
moveFunct("OTHERS", "HANDS"),
lifespan=life,
indents=[0],
)
)
elif "p" in Cards[target].types:
state.projects[move.player].add(target)
def buy_and_gain(moves, i, blockLength, state):
buy_action(moves, i, blockLength, state)
standard_gains("SUPPLY")(moves, i, blockLength, state)
Preds["BUY"].action = buy_action
Preds["BUY AND GAIN"].action = buy_and_gain
Preds["GAIN TOPDECK"].action = standard_gains("SUPPLY", "DECKS")
Preds["GAIN TRASH"].action = standard_gains("TRASH")
Preds["GAIN EXPERIMENT"].action = gain_experiment
Preds["GAIN"].action = standard_gains("SUPPLY")
Preds["TRASH"].action = standard_trash("HANDS")
def discard_action(moves, i, blockLength, state):
move = moves[i]
for c in "bs":
if c in Cards[move.items[0].primary].types:
return
state.move(move.player, "HANDS", "DISCARDS", move.items[0])
Preds["DISCARD"].action = discard_action
def get_stayout_duration(moves, i, state):
move = moves[i]
target = move.items[0].primary
if target == "ESTATE":
target = state.inherited[move.player]
# Check for enchanted
for secondary in moves[i + 1 :]:
if secondary.pred == "ENCHANTED":
return 0
if secondary.indent == 0:
break
if target in [
"CARAVAN",
"FISHING VILLAGE",
"LIGHTHOUSE",
"MERCHANT SHIP",
"WHARF",
"AMULET",
"BRIDGE TROLL",
"CARAVAN GUARD",
"DUNGEON",
"HAUNTED WOODS",
"SWAMP HAG",
"ENCHANTRESS",
"COBBLER",
"DEN OF SIN",
"GHOST TOWN" "GUARDIAN",
"RAIDER",
"GHOST",
]:
return 1
elif target in ["CHAMPION", "HIRELING"]:
return -1
elif target in ["HAVEN", "GEAR"]:
j = i + 1
while j < len(moves) and moves[j].indent > moves[i].indent:
secondary = moves[j]
if (
secondary.indent == move.indent + 1
and secondary.pred == "SET ASIDE WITH"
):
return 1
j += 1
elif target == "OUTPOST":
j = i + 1
while j < len(moves) and moves[j].indent > moves[i].indent:
if moves[j].pred in ["OUTPOST FAIL", "OUTPOST FAIL2"]:
return 0
j += 1
return 1
elif target == "RESEARCH":
j = i + 1
while j < len(moves) and moves[j].indent > moves[i].indent:
secondary = moves[j]
if secondary.indent == move.indent + 1 and secondary.pred == "SET ASIDE":
return 1
j += 1
return 0
elif target in ["ARCHIVE", "CRYPT"]:
j = i + 1
while j < len(moves) and moves[j].indent > moves[i].indent:
secondary = moves[j]
if secondary.indent == move.indent + 1 and secondary.pred == "SET ASIDE":
return len(secondary.items[0])
j += 1
elif target == "SECRET CAVE":
j = i + 1
while j < len(moves) and moves[j].indent > moves[i].indent:
secondary = moves[j]
if (
secondary.indent == move.indent + 1
and secondary.pred == "DISCARD"
and len(secondary.items[0]) == 3
):
return 1
j += 1
else:
return 0
def standard_plays(moves, i, blockLength, state):
def deathcart_play(moves, i, blockLength, state):
move = moves[i]
if (
move.items[0].primary == "DEATH CART"
and "DEATH CART" in state["INPLAYS"][move.player]
):
state.move(move.player, "INPLAYS", "TRASH", move.items[0])
else:
state.move(move.player, "HANDS", "TRASH", move.items[0])
def hermit_trash(moves, i, blockLength, state):
move = moves[i]
if move.items[0].primary in state["DISCARDS"][move.player]:
standard_trash("DISCARDS")(moves, i, blockLength, state)
else:
standard_trash("HANDS")(moves, i, blockLength, state)
def smallcastle_trash(moves, i, blockLength, state):
move = moves[i]
if move.items[0].primary in state["HANDS"][move.player]:
standard_trash("HANDS")(moves, i, blockLength, state)
else:
standard_trash("INPLAYS")(moves, i, blockLength, state)
def monastery_trash(moves, i, blockLength, state):
move = moves[i]
inplayCoppers = state["INPLAYS"][move.player]["COPPER"]
trashCoppers = move.items[0]["COPPER"]
copperStack = Cardstack({"COPPER": min(inplayCoppers, trashCoppers)})
state.move(move.player, "INPLAYS", "TRASH", copperStack)
state.move(move.player, "HANDS", "TRASH", move.items[0] - copperStack)
def knight_selfTrash(knight):
def out_function(move):
return move.pred == "TRASH" and move.items[0].primary == knight
return out_function
def knight_oppTrash(knightPlayer):
def out_function(move):
return move.pred == "TRASH" and move.player != knightPlayer
return out_function
def michael_discard(knightPlayer):
def out_function(move):
return move.pred == "DISCARD" and move.player != knightPlayer
return out_function
def settler_bug_check(move):
return move.indent == 0 and move.pred == "PUT INHAND"
triggers = {
"ARTISAN": [gainTo("SUPPLY", "HANDS")],
"BANDIT": [exc_revealTrash, exc_revealDiscard],
"BUREAUCRAT": [gainTo("SUPPLY", "DECKS")],
"HARBINGER": [exc_harbinger],
"LIBRARY": [
checkMove(["SETS ASIDE WITH"], "HANDS", "OTHERS"),
checkMove(["DISCARD"], "OTHERS", "DISCARDS"),
],
"MINE": [gainTo("SUPPLY", "HANDS")],
"SENTRY": [exc_revealTrash, exc_revealDiscard, exc_revealTopdeck],
"VASSAL": [
exc_revealDiscard,
Exception(check(["PLAY"]), move_play("DISCARDS")),
],
"LURKER": [exc_supplyTrash, gainTo("TRASH", "DISCARDS")],
"MINING VILLAGE": [exc_inplayTrash],
"PATROL": [exc_revealTopdeck],
"SWINDLER": [exc_revealTrash],
"TORTURER": [gainTo("SUPPLY", "HANDS")],
"TRADING POST": [gainTo("SUPPLY", "HANDS")],
"EMBARGO": [exc_inplayTrash],
"EXPLORER": [gainTo("SUPPLY", "HANDS")],
"AMBASSADOR": [checkMove(["RETURN TO"], "HANDS", "SUPPLY")],
"ISLAND": [checkMove(["PUT ONTO"], "INPLAYS", "OTHERS")],
"LOOKOUT": [exc_revealTrash, exc_revealDiscard, exc_revealTopdeck],
"NATIVE VILLAGE": [
checkMove(["SET ASIDE WITH"], "DECKS", "OTHERS"),
checkMove(["PUT INHAND"], "OTHERS", "HANDS"),
],
"NAVIGATOR": [exc_revealTopdeck, exc_revealDiscard],
"PEARL DIVER": [exc_revealTopdeck, checkMove(["BOTTOMDECK"], "DECKS", "DECKS")],
"PIRATE SHIP": [exc_revealTrash, exc_revealDiscard],
"SEA HAG": [exc_revealDiscard, gainTo("SUPPLY", "DECKS")],
"TREASURE MAP": [exc_inplayTrash],
"APOTHECARY": [exc_revealTopdeck],
"GOLEM": [
Exception(check(["REVEAL"]), moveFunct("DECKS", "OTHERS"), persistent=True),
Exception(check(["PLAY"]), move_play("OTHERS"), persistent=True),
checkMove(["DISCARD"], "OTHERS", "DISCARDS"),
],
"SCRYING POOL": [persistent(exc_revealTopdeck), persistent(exc_revealDiscard)],
"COUNTING HOUSE": [exc_settlers],
"LOAN": [exc_revealDiscard, exc_revealTrash],
"RABBLE": [exc_revealDiscard, exc_revealTopdeck],
"VENTURE": [exc_revealDiscard, Exception(check(playPreds), move_play("DECKS"))],
"BAG OF GOLD": [gainTo("SUPPLY", "DECKS")],
"FARMING VILLAGE": [exc_revealDiscard],
"FORTUNE TELLER": [exc_revealDiscard, exc_revealTopdeck],
"HARVEST": [exc_revealDiscard],
"HORN OF PLENTY": [exc_inplayTrash],
"HUNTING PARTY": [exc_revealDiscard],
"JESTER": [exc_revealDiscard],
"TOURNAMENT": [gainTo("SUPPLY", "DECKS")],
"CARTOGRAPHER": [exc_revealTopdeck, exc_revealDiscard],
"DEVELOP": [persistent(gainTo("SUPPLY", "DECKS"))],
"DUCHESS": [persistent(exc_revealTopdeck), persistent(exc_revealDiscard)],
"ILL-GOTTEN GAINS": [gainTo("SUPPLY", "HANDS")],
"JACK OF ALL TRADES": [exc_revealTopdeck, exc_revealDiscard],
"NOBLE BRIGAND": [exc_revealTrash, exc_revealDiscard],
"ORACLE": [persistent(exc_revealTopdeck), persistent(exc_revealDiscard)],
"ARMORY": [gainTo("SUPPLY", "DECKS")],
"BAND OF MISFITS": [Exception(check(["PLAY"]), standard_plays)],
"BEGGAR": [gainTo("SUPPLY", "HANDS")],
"CATACOMBS": [exc_revealDiscard],
"COUNTERFEIT": [exc_inplayTrash],
"DEATH CART": [Exception(check(["TRASH"]), deathcart_play)],
"GRAVEROBBER": [
Exception(check(["GAIN TOPDECK"]), standard_gains("TRASH", "DECKS"))
],
"HERMIT": [Exception(check(["TRASH"]), hermit_trash)],
"IRONMONGER": [exc_revealDiscard, exc_revealTopdeck],
"PILLAGE": [exc_inplayTrash],
"REBUILD": [exc_revealTrash, exc_revealDiscard],
"PROCESSION": [exc_inplayTrash],
"ROGUE": [exc_revealTrash, exc_revealDiscard, gainTo("TRASH", "DISCARDS")],
"SAGE": [exc_revealDiscard],
"SCAVENGER": [exc_harbinger],
"SURVIVORS": [exc_revealDiscard, exc_revealTopdeck],
"VAGRANT": [exc_revealTopdeck],
"WANDERING MINSTREL": [exc_revealDiscard, exc_revealTopdeck],
"ADVISOR": [exc_revealDiscard],
"BUTCHER": [Exception(check(["USE COFFER", "USE COFFERS"]), empty)],
"DOCTOR": [exc_revealTrash, exc_revealDiscard, exc_revealTopdeck],
"HERALD": [Exception(check(["PLAY"]), move_play("DECKS"))],
"JOURNEYMAN": [exc_revealDiscard],
"TAXMAN": [gainTo("SUPPLY", "DECKS")],
"ARTIFICER": [gainTo("SUPPLY", "DECKS")],
"COIN OF THE REALM": [checkMove(["PUT ONTO"], "INPLAYS", "TAVERN")],
"DISTANT LANDS": [checkMove(["PUT ONTO"], "INPLAYS", "TAVERN")],
"DUPLICATE": [checkMove(["PUT ONTO"], "INPLAYS", "TAVERN")],
"GIANT": [exc_revealDiscard, exc_revealTrash],
"GUIDE": [checkMove(["PUT ONTO"], "INPLAYS", "TAVERN")],
"MAGPIE": [exc_revealTopdeck],
"MISER": [checkMove(["PUT ONTO"], "HANDS", "TAVERN")],
"RATCATCHER": [checkMove(["PUT ONTO"], "INPLAYS", "TAVERN")],
"RAZE": [checkMove(["TRASH"], "INPLAYS", "TRASH", ["RAZE"]), exc_revealDiscard],
"ROYAL CARRIAGE": [checkMove(["PUT ONTO"], "INPLAYS", "TAVERN")],
"TEACHER": [checkMove(["PUT ONTO"], "INPLAYS", "TAVERN")],
"TRANSMOGRIFY": [checkMove(["PUT ONTO"], "INPLAYS", "TAVERN")],
"WARRIOR": [
persistent(exc_revealDiscard),
persistent(checkMove(["TRASH"], "DISCARDS", "TRASH")),
],
"WINE MERCHANT": [checkMove(["PUT ONTO"], "INPLAYS", "TAVERN")],
"SETTLERS": [exc_settlers],
"BUSTLING VILLAGE": [exc_settlers],
"GLADIATOR": [Exception(check(["TRASH"]), standard_trash("SUPPLY"))],
"SMALL CASTLE": [Exception(check(["TRASH"]), smallcastle_trash)],
"ARCHIVE": [checkMove(["SET ASIDE"], "DECKS", "OTHERS")],
"FARMERS' MARKET": [exc_inplayTrash],
"OVERLORD": [Exception(check(["PLAY"]), standard_plays)],
"CHANGELING": [exc_inplayTrash],
"SACRED GROVE": [
Exception(check(["RECEIVE"]), standard_boonhex(True)),
Exception(check(["DISCARD"], ["THE SUN'S GIFT"]), empty, priority=2),
],
"CRYPT": [checkMove(["SET ASIDE"], "INPLAYS", "OTHERS")],
"MONASTERY": [persistent(Exception(check(["TRASH"]), monastery_trash))],
"NECROMANCER": [Exception(check(["PLAY"]), standard_plays)],
"NIGHT WATCHMAN": [exc_revealDiscard, exc_revealTopdeck],
"TRAGIC HERO": [exc_inplayTrash],
"MAGIC LAMP": [exc_inplayTrash],
"GHOST": [exc_revealDiscard, checkMove(["SET ASIDE"], "DECKS", "OTHERS")],
"WILL-O'-WISP": [exc_revealTopdeck],
"WISH": [gainTo("SUPPLY", "HANDS")],
"ZOMBIE MASON": [exc_revealTrash],
"ZOMBIE SPY": [exc_revealDiscard, exc_revealTopdeck],
"BLACK MARKET": [checkMove(["BOTTOMDECK"], "SUPPLY", "SUPPLY")],
"ENVOY": [exc_revealDiscard],
"PRINCE": [checkMove(["SET ASIDE"], "HANDS", "OTHERS")],
"ACTING TROUPE": [exc_inplayTrash],
"BORDER GUARD": [exc_revealDiscard],
"MOUNTAIN VILLAGE": [exc_settlers],
"SCEPTER": [Exception(check(["PLAY"]), standard_plays)],
"SCULPTOR": [gainTo("SUPPLY", "HANDS")],
"SEER": [exc_revealTopdeck],
"TREASURER": [gainTo("TRASH", "HANDS")],
"RESEARCH": [checkMove(["SET ASIDE"], "DECKS", "OTHERS")],
"CAPTAIN": [Exception(check(["PLAY"]), standard_plays)],
}
move = moves[i]
for target in move.items[0]:
if target == "ESTATE":
target = state.inherited[move.player]
if target in triggers:
for exc in triggers[target]:
newExc = deepcopy(exc)
newExc.lifespan = blockLength
newExc.indents = [moves[i].indent + 1]
state.exceptions.add(newExc)
if target == "REPLACE":
for secondary in moves[i + 1 : i + blockLength]:
if secondary.pred == "GAIN":
subject = secondary.items[0]
def replace_topdeck(moves, i, blockLength, state):
block = Cardstack({subject.primary: 1})
state.move(
moves[i].player,
get_gain_dest(subject.primary),
"DECKS",
block,
)
newExc = Exception(
check(["TOPDECK"], ["CARD", subject.primary]), replace_topdeck
)
newExc.lifespan = blockLength
newExc.indents = [secondary.indent]
state.exceptions.add(newExc)
break
elif target in ["BRIDGE", "INVENTOR"]:
state.bridges += 1
elif target in ["THRONE ROOM", "KING'S COURT", "DISCIPLE", "CROWN"]:
plays = []
for j in range(i + 1, i + blockLength):
if (
moves[j].indent == moves[i].indent + 1
and moves[j].pred in playPreds
):
subject = moves[j].items[0].primary
plays.append(j)
if plays:
block = [plays, Cardstack({target: 1, subject: 1}), None]
state.linkedPlays.append(block)
elif target == "SCEPTER":
if i + 1 < len(moves) and moves[i + 1].pred == "PLAY":
stayout = get_stayout_duration(moves, i + 1, state)
subject = moves[i + 1].items[0].primary
if stayout:
# Look for something already going
for j in range(len(state.linkedPlays)):
plays, cards, current = state.linkedPlays[j]
if current:
state.durations[moves[i].player].remove(current)
newDur = [cards, 1]
plays.append(i + 1)
cards["SCEPTER"] += 1
state.linkedPlays[j][2] = newDur
state.durations[moves[i].player].append(newDur)
return
# Look for something minimal (not in linkedPlays)
for j in range(0, i):
secondary = moves[j]
if check(["PLAY"], [subject])(secondary):
if len([x for x in state.linkedPlays if j in x[0]]) == 0:
block = Cardstack({secondary: 1, "SCEPTER": 1})
newDur = [block, 1]
state.linkedPlays.append([[j, i + 1], block, newDur])
state.durations[moves[i].player].append(newDur)
return
# Look for minimal in linkedPlays
state.linkedPlays.sort(key=lambda x: len(x[1]))
plays, cards, current = state.linkedPlays[0]
state.durations[moves[i].player].remove(current)
newDur = [cards, 1]
plays.append(i + 1)
cards["SCEPTER"] += 1
state.linkedPlays[0][2] = newDur
state.durations[moves[i].player].append(newDur)
return
else:
# Look for something not already going
for j in range(len(state.linkedPlays)):
plays, cards, current = state.linkedPlays[j]
if not current:
plays.append(i + 1)
cards["SCEPTER"] += 1
return
# Look for something minimal (not in linkedPlays)
for j in range(0, i):
secondary = moves[j]
if check(["PLAY"], [subject])(secondary):
if len([x for x in state.linkedPlays if j in x[0]]) == 0:
block = Cardstack({secondary: 1, "SCEPTER": 1})
state.linkedPlays.append([[j, i + 1], block, None])
return
# Look for minimal in linkedPlays
state.linkedPlays.sort(key=lambda x: len(x[1]))
plays, cards, current = state.linkedPlays[0]
state.durations[moves[i].player].remove(current)
newDur = [cards, 1]
plays.append(i + 1)
cards["SCEPTER"] += 1
state.linkedPlays[0][2] = newDur
state.durations[moves[i].player].append(newDur)
return
elif target == "STORYTELLER":
state.coins = 0
elif target == "ENGINEER":
if move.indent == 0:
def engineer_trash(moves, i, blockLength, state):
state.move(moves[i].player, "INPLAYS", "TRASH", moves[i].items[0])
exceptions = [
Exception(
check(["TRASH"]),
set_phase(engineer_trash),
indents=[0, 1],
lifespan=blockLength + 1,
),
Exception(
check(["GAIN"]),
set_phase(standard_gains("SUPPLY")),
indents=[0, 1],
lifespan=blockLength + 2,
),
]
else:
exceptions = [deepcopy(exc_inplayTrash)]
exceptions[0].indents = [move.indent + 1]
exceptions[0].lifespan = blockLength
for exc in exceptions:
state.exceptions.add(exc)
elif target == "PIXIE":
exceptions = [
Exception(
check(["TRASH"], ["PIXIE"]),
moveFunct("INPLAYS", "TRASH"),
priority=2,
),
Exception(check(["TAKES"]), standard_boonhex()),
]
for exc in exceptions:
exc.indents = [move.indent + 1]
exc.lifespan = blockLength
state.exceptions.add(exc)
elif target == "CARGO SHIP":
state.cargoShips += 1
elif target == "SETTLERS":
bugExc = Exception(
settler_bug_check,
moveFunct("DISCARDS", "HANDS"),
indents=[0],
lifespan=blockLength + 1,
)
state.exceptions.add(bugExc)
elif target == "VASSAL":
hasPlayed = 0
for secondary in moves[i + 1 : i + blockLength]:
if secondary.pred == "DISCARD":
hasPlayed = 1
target = secondary.items[0].primary
elif secondary.pred == "PLAY":
hasPlayed = 2
break
if hasPlayed == 1:
bugExc = Exception(
check(["PLAY"], [target]),
set_phase(move_play("DISCARDS")),
indents=[move.indent],
lifespan=blockLength + 1,
)
state.exceptions.add(bugExc)
if "k" in Cards[target].types:
for newExc in [
Exception(knight_selfTrash(target), moveFunct("INPLAYS", "TRASH")),
exc_revealDiscard,
Exception(knight_oppTrash(move.player), moveFunct("DECKS", "TRASH")),
]:
newExc.lifespan = blockLength
newExc.indents = [moves[i].indent + 1]
state.exceptions.add(newExc)
if target == "SIR MICHAEL":
life = 1
for j in range(1, blockLength):
if moves[i + j].pred == "REVEAL":
life = j
break
newExc = Exception(
michael_discard(move.player),
moveFunct("HANDS", "DISCARDS"),
life,
[moves[i].indent + 1],
2,
)
state.exceptions.add(newExc)
stayout = get_stayout_duration(moves, i, state)
inside = False
if stayout:
for index, data in enumerate(state.linkedPlays):
plays, cards, current = data
if i in plays:
if current is None or stayout > current[1]:
if current:
state.durations[move.player].remove(current)
newDur = [cards, stayout]
state.durations[move.player].append(newDur)
state.linkedPlays[index][2] = newDur
inside = True
break
if not inside:
newDur = [Cardstack({target: 1}), stayout]
state.linkedPlays.append([[i], Cardstack({target: 1}), newDur])
state.durations[move.player].append(newDur)
def play_action(moves, i, blockLength, state):
target = Cards[moves[i].items[0].primary]
if moves[i].indent == 0:
# Set phase
if "t" in target.types and "a" not in target.types:
state.phase = 2
elif "n" in target.types and "a" not in target.types:
state.phase = 3
elif target == "WEREWOLF":
state.phase = 1
if i < len(moves):
if moves[i + 1].pred == "TAKES BOONHEX":
state.phase = 3
else:
state.phase = 1
if state.phase == 1:
state.actions -= 1
standard_plays(moves, i, blockLength, state)
state.move(moves[i].player, "HANDS", "INPLAYS", moves[i].items[0])
def play_coin_action(moves, i, blockLength, state):
move = moves[i]
state.coins += int(move.arguments[0])
play_action(moves, i, blockLength, state)
def throne_generic(moves, i, blockLength, state):
if moves[i].items[1].primary == "CITADEL":
if moves[i].indent == 0:
state.phase = 1
target = moves[i].items[0].primary
for index in range(i - 1, 0, -1):
secondary = moves[index]
if secondary.pred == "PLAY" and secondary.items[0].primary == target:
existingBlock = False
for block in state.linkedPlays:
if index in block[0]:
block[0].append(i)
existingBlock = True
break
if not existingBlock:
block = [[i, index], Cardstack({target: 1}), None]
state.linkedPlays.append(block)
break
elif secondary.indent == moves[i].indent - 1:
break
standard_plays(moves, i, blockLength, state)
Preds["PLAY"].action = play_action
Preds["PLAY COIN"].action = play_coin_action
Preds["THRONE"].action = standard_plays
Preds["KING"].action = standard_plays
Preds["THRONE COIN"].action = standard_plays
Preds["THRONE GENERIC"].action = throne_generic
def topdeck_action(moves, i, blockLength, state):
move = moves[i]
if state.phase == 4:
# Probably Scheme (or walled village / alch / treasury)
state.move(move.player, "INPLAYS", "DECKS", move.items[0])
else:
state.move(move.player, "HANDS", "DECKS", move.items[0])
Preds["TOPDECK"].action = topdeck_action
Preds["INSERT INTO"].action = moveFunct("HANDS", "DECKS")
Preds["BOTTOMDECK"].action = moveFunct("HANDS", "DECKS")
def draw_action(moves, i, blockLength, state):
move = moves[i]
player = move.player
# Cleanup
if state.phase == 4:
if state.activePlayer == player:
cleanable = state["INPLAYS"][player]
for stack, life in state.durations[player]:
if life != 0:
cleanable -= stack
state.move(player, "INPLAYS", "DISCARDS", cleanable)
state.move(player, "HANDS", "DISCARDS", state["HANDS"][player])
state.move(move.player, "DECKS", "HANDS", move.items[0])
Preds["DRAW"].action = draw_action
for p in ["DRAW GENERIC", "TACTICIAN DRAW", "DRAW FROM"]:
Preds[p].action = moveFunct("DECKS", "HANDS")
def wish_action(moves, i, blockLength, state):
move = moves[i]
block = Cardstack({move.items[0].primary: 1})
state.move(move.player, "DECKS", "HANDS", block)
Preds["WISH SUCCESS"].action = wish_action
def inhand_action(moves, i, blockLength, state):
move = moves[i]
if (
state.phase == 4
and move.indent == 0
and move.items[0].primary == "FAITHFUL HOUND"
):
state.move(move.player, "OTHERS", "HANDS", move.items[0])
else:
state.move(move.player, "DECKS", "HANDS", move.items[0])
Preds["PUT INHAND"].action = inhand_action
def inhand_generic_action(moves, i, blockLength, state):
move = moves[i]
if move.items[1].primary in ["HAVEN", "GEAR", "ARCHIVE", "CRYPT"]:
state.move(move.player, "OTHERS", "HANDS", move.items[0])
else:
state.move(move.player, "DECKS", "HANDS", move.items[0])
Preds["INHAND GENERIC"].action = inhand_generic_action
def set_aside_action(moves, i, blockLength, state):
move = moves[i]
if "b" not in Cards[move.items[0].primary].types:
state.move(move.player, "INPLAYS", "OTHERS", move.items[0])
Preds["SET ASIDE"].action = set_aside_action
Preds["PUT ONTO"].action = moveFunct("HANDS", "OTHERS")
def call_action(moves, i, blockLength, state):
move = moves[i]
target = move.items[0].primary
if target == "ESTATE":
target = state.inherited[move.player]
state.move(move.player, "TAVERN", "INPLAYS", move.items[0])
# Barring some weird stuff like carriaging a werewolf/crown
if move.indent == 0:
state.phase = 1
triggers = {
"COIN OF THE REALM": [Exception(check(["ACTIONS GENERIC"]), empty)],
"DUPLICATE": [gainTo("SUPPLY", "DISCARDS")],
"TRANSMOGRIFY": [
Exception(check(["GAIN"]), standard_gains("SUPPLY", "HANDS"), priority=2)
],
"ROYAL CARRIAGE": [Exception(check(["THRONE"]), standard_plays)],
}
if target in triggers:
for exc in triggers[target]:
newExc = deepcopy(exc)
newExc.action = set_phase(newExc.action)
newExc.lifespan = blockLength + 1
newExc.indents = [moves[i].indent]
state.exceptions.add(newExc)
def find_associated(moves, i):
# Find carriage plays associated with the original play on
# decision i
turns = [i]
for j in range(i + 1, len(moves)):
if moves[j].indent <= moves[i].indent:
break
elif (
moves[j].pred == "CALL"
and moves[j].items[0].primary == "ROYAL CARRIAGE"
and moves[j].indent == moves[i].indent + 1
):
turns += find_associated(moves, j + 1)
if moves[i].indent == 0:
while j < len(moves):
if (
moves[j].pred == "CALL"
and moves[j].items[0].primary == "ROYAL CARRIAGE"
and moves[j].indent == 0
):
j += 1
turns += find_associated(moves, j)
elif moves[j].indent == 0:
break
j += 1
return turns
if target == "ROYAL CARRIAGE":
if move.indent == 0:
for base in range(i - 1, 0, -1):
if moves[base].pred in playPreds and moves[base].indent == 0:
break
else:
for base in range(i - 1, 0, -1):
if moves[base].indent == move.indent - 1:
break
inside = False
for index in range(len(state.linkedPlays)):
plays, cards, current = state.linkedPlays[index]
if base in plays:
plays.append(i + 1)
cards["ROYAL CARRIAGE"] += 1
if current:
newDur = [cards, current[1]]
state.durations[move.player].remove(current)
state.durations[move.player].append(newDur)
state.linkedPlays[index][2] = newDur
inside = True
break
if not inside:
subject = moves[base].items[0].primary
stack = Cardstack({subject: 1, "ROYAL CARRIAGE": 1})
state.linkedPlays.append([[base, i + 1], stack, None])
Preds["CALL"].action = call_action
def deck_discard_action(moves, i, blockLength, state):
state.move(moves[i].player, "DECKS", "DISCARDS", state["DECKS"][moves[i].player])
Preds["DISCARD DECK"].action = deck_discard_action
Preds["SHUFFLE INTO"].action = moveFunct("DISCARDS", "DECKS")
def shuffle_action(moves, i, blockLength, state):
player = moves[i].player
# Cleanup
if state.phase == 4 and player == state.activePlayer:
cleanable = state["INPLAYS"][player]
for stack, life in state.durations[player]:
if life != 0:
cleanable -= stack
state.move(player, "INPLAYS", "DISCARDS", cleanable)
state.move(player, "HANDS", "DISCARDS", state["HANDS"][player])
state.move(player, "DISCARDS", "DECKS", state["DISCARDS"][player])
Preds["SHUFFLE"].action = shuffle_action
def return_to_action(moves, i, blockLength, state):
move = moves[i]
if move.items[0].primary == "ENCAMPMENT" or (
state.inherited[move.player] == "ENCAMPMENT"
and move.items[0].primary == "ESTATE"
):
state.move(moves[i].player, "OTHERS", "SUPPLY", move.items[0])
else:
state.move(moves[i].player, "INPLAYS", "SUPPLY", move.items[0])
Preds["RETURN TO"].action = return_to_action
def return_action(moves, i, blockLength, state):
move = moves[i]
target = move.items[0].primary
if target in ["DELUDED", "ENVIOUS"]:
state.phase = 2
if "s" in Cards[target].types:
state.projects[moves[i].player].discard(target)
else:
state.move(moves[i].player, "INPLAYS", "SUPPLY", move.items[0])
Preds["RETURN"].action = return_action
def famine_action(moves, i, blockLength, state):
life = 1
for j in range(1, len(moves) - i):
if moves[j + i].pred == "DISCARD" and moves[j + i].items[0].primary == "FAMINE":
life = j
break
newExc = Exception(
check(["SHUFFLE"]), empty, indents=[moves[i].indent], lifespan=life
)
state.exceptions.add(newExc)
def standard_boonhex(grove=False):
def out_function(moves, i, blockLength, state):
move = moves[i]
target = move.items[0].primary
triggers = {
"BAD OMENS": [checkMove(["TOPDECK"], "DISCARDS", "DECKS")],
"FAMINE": [
exc_revealDiscard,
checkMove(["SHUFFLE INTO"], "DECKS", "DECKS"),
Exception(check(["REVEAL"]), famine_action),
],
"GREED": [gainTo("SUPPLY", "DECKS")],
"LOCUSTS": [exc_revealTrash],
"PLAGUE": [gainTo("SUPPLY", "HANDS")],
"WAR": [exc_revealDiscard, exc_revealTrash],
"THE MOON'S GIFT": [checkMove(["TOPDECK"], "DISCARDS", "DECKS")],
"THE SUN'S GIFT": [exc_revealDiscard, exc_revealTopdeck],
}
if target in triggers:
for exc in triggers[target]:
newExc = deepcopy(exc)
life = 1
for j in range(1, len(moves) - i):
secondary = moves[i + j - 1]
if secondary.indent < move.indent:
life = j
break
elif not grove and (
secondary.pred == "DISCARD"
and secondary.items[0].primary == target
):
life = j
break
newExc.lifespan = life - 1
newExc.indents = [moves[i].indent]
newExc.persistent = True
state.exceptions.add(newExc)
return out_function
def receive_action(moves, i, blockLength, state):
move = moves[i]
target = move.items[0].primary
if target in [
"TREASURE HUNTER",
"WARRIOR",
"HERO",
"CHAMPION",
"SOLDIER",
"FUGITIVE",
"DISCIPLE",
"TEACHER",
"CHANGELING",
"BAT",
"VAMPIRE",
]:
state.move(moves[i].player, "SUPPLY", "DISCARDS", move.items[0])
elif "b" in Cards[target].types:
standard_boonhex()(moves, i, blockLength, state)
Preds["RETURN"].action = return_action
Preds["RECEIVE"].action = receive_action
def pass_action(moves, i, blockLength, state):
move = moves[i]
state["HANDS"][move.player] -= move.items[0]
state["HANDS"][1 - move.player] += move.items[0]
Preds["PASS"].action = pass_action
def react_action(moves, i, blockLength, state):
move = moves[i]
target = move.items[0].primary
exc = None
if target == "HORSE TRADERS":
exc = Exception(
check(["SET ASIDE"]), moveFunct("HANDS", "OTHERS"), 2, [move.indent]
)
elif target == "MARKET SQURE":
exc = Exception(
check(["DISCARD"]), moveFunct("HANDS", "DISCARDS"), 2, [move.indent]
)
elif target == "FAITHFUL HOUND":
exc = Exception(
check(["SET ASIDE"]), moveFunct("DISCARDS", "OTHERS"), 2, [move.indent]
)
if exc:
state.exceptions.add(exc)
Preds["REACT"].action = react_action
def genericVP(moves, i, blockLength, state):
move = moves[i]
state.vps[move.player] += int(move.arguments[0])
for p in ["SHIELD GAIN", "SHIELD GET", "SHIELD GENERIC"]:
Preds[p].action = genericVP
Preds["SET ASIDE WITH"].action = moveFunct("HANDS", "OTHERS")
def take_coffers(moves, i, blockLength, state):
move = moves[i]
state.coffers[move.player] += int(move.arguments[0])
def single_coffers(moves, i, blockLength, state):
move = moves[i]
state.coffers[move.player] += 1
Preds["COFFERS GENERIC"].action = take_coffers
Preds["COFFER GENERIC"].action = take_coffers
Preds["GAIN COFFERS"].action = take_coffers
Preds["COFFERS FROM"].action = single_coffers
def use_coffers(moves, i, blockLength, state):
move = moves[i]
state.coffers[move.player] -= int(move.arguments[0])
state.coins += int(move.arguments[0])
Preds["USE COFFERS"].action = use_coffers
Preds["USE COFFER"].action = use_coffers
def take_debt(moves, i, blockLength, state):
move = moves[i]
state.debt[move.player] += int(move.arguments[0])
Preds["TAKE DEBT"].action = take_debt
def take_action(moves, i, blockLength, state):
move = moves[i]
target = move.items[0]
if target.primary in ["MISERABLE", "TWICE MISERABLE"]:
state.vps[move.player] -= 2
if "f" in Cards[target.primary].types:
state.projects[move.player].add(target.primary)
state.projects[1 - move.player].discard(target.primary)
elif "s" in Cards[target.primary].types:
state.projects[move.player].add(target.primary)
Preds["TAKES"].action = take_action
def repay_debt(moves, i, blockLength, state):
move = moves[i]
if move.indent == 0:
state.phase = 2
state.debt[move.player] -= int(move.arguments[0])
state.coins -= int(move.arguments[0])
Preds["REPAY DEBT"].action = repay_debt
Preds["REPAY DEBT PARTIAL"].action = repay_debt
def gain_coin(moves, i, blockLength, state):
move = moves[i]
state.coins += int(move.arguments[0]) if move.arguments else 1
Preds["COINS GENERIC"].action = gain_coin
Preds["GAIN COINS"].action = gain_coin
Preds["COIN TOKEN"].action = gain_coin
def lose_coin(moves, i, blockLength, state):
move = moves[i]
state.coins -= int(move.arguments[0])
Preds["LOSE COINS"].action = lose_coin
Preds["LOSE COIN"].action = lose_coin
def get_buy(moves, i, blockLength, state):
move = moves[i]
state.buys += int(move.arguments[0]) if move.arguments else 1
Preds["BUYS GENERIC"].action = get_buy
Preds["BUY GENERIC"].action = get_buy
Preds["BUY TOKEN"].action = get_buy
Preds["GET BUYS"].action = get_buy
Preds["GET BUY"].action = get_buy
def get_action(moves, i, blockLength, state):
move = moves[i]
state.actions += int(move.arguments[0]) if move.arguments else 1
Preds["ACTIONS GENERIC"].action = get_action
Preds["ACTION GENERIC"].action = get_action
Preds["ACTION TOKEN"].action = get_action
Preds["ACTIONS"].action = get_action
Preds["ACTION"].action = get_action
def get_villager(moves, i, blockLength, state):
move = moves[i]
state.villagers[move.player] += int(move.arguments[0])
Preds["VILLAGERS GENERIC"].action = get_villager
Preds["VILLAGER GENERIC"].action = get_villager
Preds["GAIN VILLAGERS"].action = get_villager
Preds["GAIN VILLAGER"].action = get_villager
def use_villager(moves, i, blockLength, state):
move = moves[i]
if move.indent == 0:
state.phase = 1
state.villagers[move.player] -= int(move.arguments[0])
state.actions += int(move.arguments[0])
Preds["USE VILLAGERS"].action = use_villager
Preds["USE VILLAGER"].action = use_villager
def obelisk_choice(moves, i, blocklength, state):
move = moves[i]
target = move.items[0].primary
state.obelisk = [target]
sets = [
["ENCAMPMENT", "PLUNDER"],
["PATRICIAN", "EMPORIUM"],
["SETTLERS", "BUSTLING VILLAGE"],
["CATAPULT", "ROCKS"],
["GLADIATOR", "FORTUNE"],
[
"KNIGHTS",
"DAME ANNA",
"DAME JOSEPHINE",
"DAME MOLLY",
"DAME NATALIE",
"DAME SYLVIA",
"SIR BAILEY",
"SIR DESTRY",
"SIR MARTIN",
"SIR MICHAEL",
"SIR VANDER",
],
[
"RUINS",
"RUINED LIBRARY",
"RUINED VILLAGE",
"ABANDONED MINE",
"RUINED MARKET",
"SURVIVORS",
],
["SAUNA", "AVANTO"],
]
for group in sets:
if target == group[0]:
state.obelisk = group
def inherit_action(moves, i, blocklength, state):
move = moves[i]
state.move(move.player, "SUPPLY", "OTHERS", move.items[0])
state.inherited[move.player] = move.items[0].primary
Preds["OBELISK CHOICE"].action = obelisk_choice
Preds["INHERIT"].action = inherit_action
def enchant_action(moves, i, blocklength, state):
for end in range(i + 1, len(moves)):
if moves[end].indent < moves[i].indent:
break
enchantedExc = Exception(
always, default_action, end - i + 1, [moves[i].indent], 2, True
)
state.exceptions.add(enchantedExc)
Preds["ENCHANTED"].action = enchant_action
INTRINSIC_EXCEPTIONS = [defaultMove]
|
python
|
import unittest
import numpy as np
import mesostat.utils.iterators.sweep as sweep
class TestUtilIterSweep(unittest.TestCase):
pass
# TODO: Implement me
unittest.main()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#######################################################
# Script that runs all regression tests.
#
#
# [email protected] 2011-02-23
#######################################################
#
from collections import defaultdict
from contextlib import contextmanager
import difflib
import fnmatch
import functools
import glob
import io
import json
import multiprocessing
import numbers
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import webbrowser
# Third-party module or package imports.
import matplotlib.pyplot as plt
import numpy as np
import simplejson
# Code repository sub-package imports.
import pyfunnel
from buildingspy.development import error_dictionary_jmodelica
from buildingspy.development import error_dictionary_optimica
from buildingspy.development import error_dictionary_dymola
from buildingspy.io.outputfile import Reader
from buildingspy.io.postprocess import Plotter
import buildingspy.io.outputfile as of
import buildingspy.io.reporter as rep
def runSimulation(worDir, cmd):
""" Run the simulation.
:param worDir: The working directory.
:param cmd: An array which is passed to the `args` argument of
:mod:`subprocess.Popen`
.. note:: This method is outside the class definition to
allow parallel computing.
"""
# JModelica requires the working directory to be part of MODELICAPATH
env = os.environ.copy() # will be passed to the subprocess.Popen call
if 'MODELICAPATH' in os.environ:
env['MODELICAPATH'] = "{}:{}".format(worDir, os.environ['MODELICAPATH'])
else:
env['MODELICAPATH'] = worDir
logFilNam = os.path.join(worDir, 'stdout.log')
#
with open(logFilNam, mode="w", encoding="utf-8") as logFil:
# Here we add worDir to cmd[1], see https://github.com/lbl-srg/BuildingsPy/issues/303
pro = subprocess.Popen(args=[cmd[0], worDir + "/" + cmd[1]] + cmd[2:],
stdout=logFil,
stderr=logFil,
shell=False,
env=env,
cwd=worDir)
try:
retcode = pro.wait()
if retcode != 0:
print("*** Execution of command '{}' failed".format(cmd))
print("*** Working directory is {}".format(worDir))
print("*** Files in directory {} are\n".format(worDir))
for fil in os.listdir(worDir):
print(" {}".format(fil))
print("*** The command returned the following output: \n")
if os.path.isfile(logFilNam):
with open(logFilNam, 'r') as f:
print(f.read())
else:
print("The file {} does not exist.\n".format(logFilNam))
print("*** end of command output\n")
print("Child was terminated by signal {}".format(retcode))
return retcode
else:
return 0
except OSError as e:
sys.stderr.write("Execution of '" + " ".join(map(str, cmd)) + " failed.\n"
+ "Working directory is '" + worDir + "'.")
raise(e)
except KeyboardInterrupt as e:
pro.kill()
sys.stderr.write("Users stopped simulation in %s.\n" % worDir)
@contextmanager
def _stdout_redirector(stream):
""" Redirects sys.stdout to stream."""
old_stdout = sys.stdout
sys.stdout = stream
try:
yield
finally:
sys.stdout = old_stdout
class Tester(object):
""" Class that runs all regression tests using Dymola.
Initiate with the following optional arguments:
:param check_html: Boolean (default ``True``). Specify whether to load tidylib and
perform validation of html documentation.
:param tool: string {``'dymola'``, ``'omc'``, ``'optimica'``, ``'jmodelica'``}.
Default is ``'dymola'``, specifies the
tool to use for running the regression test with :func:`~buildingspy.development.Tester.run`.
:param cleanup: Boolean (default ``True``). Specify whether to delete temporary directories.
:param tol: float or dict (default=1E-3). Comparison tolerance
If a float is provided, it is assigned to the absolute tolerance along x axis and to the
absolute and relative tolerance along y axis.
(If ``comp_tool='legacy'``, only the absolute tolerance in y is used.)
If a dict is provided, keys must conform with ``pyfunnel.compareAndReport`` arguments.
:param skip_verification: Boolean (default ``False``).
If ``True``, unit test results are not verified against reference points.
This class can be used to run all regression tests.
*Regression testing using Dymola*
For Dymola, this module searches the directory
``CURRENT_DIRECTORY/Resources/Scripts/Dymola`` for
all ``*.mos`` files that contain the string ``simulate``,
where ``CURRENT_DIRECTORY`` is the name of the directory in which the Python
script is started, as returned by the function :func:`getLibraryName`.
All these files will be executed as part of the regression tests.
Any variables or parameters that are plotted by these ``*.mos`` files
will be compared to previous results that are stored in
``CURRENT_DIRECTORY/Resources/ReferenceResults/Dymola``.
If no reference results exist, then they will be created.
Otherwise, the accuracy of the new results is compared to the
reference results. If they differ by more than a prescibed
tolerance, a plot such as the one below is shown.
.. figure:: img/unitTestPlot.png
:width: 560 px
Plot that compares the new results (solid line) of the regression test with the old results (dotted line).
The blue line indicates the time where the largest error occurs.
In this plot, the vertical line indicates the time where the biggest error
occurs.
The user is then asked to accept or reject the new results.
For Dymola, the regression tests also store and compare the following statistics
for the initialization problem and the time domain simulation:
#. The number and the size of the linear system of equations,
#. the number and the size of the nonlinear system of equations, and
#. the number of the numerical Jacobians.
To run the regression tests, type
>>> import os
>>> import buildingspy.development.regressiontest as r
>>> rt = r.Tester(tool="dymola")
>>> myMoLib = os.path.join("buildingspy", "tests", "MyModelicaLibrary")
>>> rt.setLibraryRoot(myMoLib)
>>> rt.run() # doctest: +ELLIPSIS
Using ... of ... processors to run unit tests for dymola.
Number of models : ...
blocks : 2
functions: 0
Generated ... regression tests.
<BLANKLINE>
Comparison files output by funnel are stored in the directory 'funnel_comp' of size ... MB.
Run 'report' method of class 'Tester' to access a summary of the comparison results.
<BLANKLINE>
Script that runs unit tests had 0 warnings and 0 errors.
<BLANKLINE>
See 'simulator-....log' for details.
Unit tests completed successfully.
<BLANKLINE>
Execution time = ...
To run regression tests only for a single package, call :func:`setSinglePackage`
prior to :func:`run`.
*Regression testing using OPTIMICA or JModelica*
For OPTIMICA and JModelica, the selection of test cases is done the same
way as for Dymola. However, the solver tolerance is obtained
from the `.mo` file by reading the annotation
`Tolerance="value"`.
For OPTIMICA and JModelica, a JSON file stored as
``Resources/Scripts/BuildingsPy/conf.json`` can be used
to further configure tests. The file has the syntax below,
where ``optimica`` or ``jmodelica`` specifies the tool.
.. code-block:: javascript
[
{
"optimica": {
"ncp": 500,
"rtol": 1E-6,
"solver": "CVode",
"simulate": True,
"translate": True,
"time_out": 600
},
"model_name": "Buildings.Fluid.Examples.FlowSystem.Simplified2"
}
]
Any JSON elements are optional, and the entries shown above
are the default values, except for the relative tolerance `rtol`
which is read from the `.mo` file. However, with `rtol`, this
value can be overwritten.
Note that this syntax is still experimental and may be changed.
"""
def __init__(
self,
check_html=True,
tool="dymola",
cleanup=True,
comp_tool='funnel',
tol=1E-3,
skip_verification=False,
):
""" Constructor."""
if tool == 'optimica':
e = error_dictionary_optimica
elif tool == 'jmodelica':
e = error_dictionary_jmodelica
else:
e = error_dictionary_dymola
# --------------------------
# Class variables
self._checkHtml = check_html
# Set the default directory for the library.
# We are not calling setLibraryRoot because the
# function checks for the argument to be a valid
# library directory. This is also checked in run(),
# hence for the default value in this constructor,
# we do not verify whether the directory contains
# a valid library.
self._libHome = os.path.abspath(".")
self._rootPackage = os.path.join(self._libHome, 'Resources', 'Scripts', 'Dymola')
# Set the tool
if tool in ['dymola', 'omc', 'optimica', 'jmodelica']:
self._modelica_tool = tool
else:
raise ValueError(
"Value of 'tool' of constructor 'Tester' must be 'dymola', 'omc', 'optimica' or 'jmodelica'. Received '{}'.".format(tool))
# File to which the console output of the simulator is written
self._simulator_log_file = "simulator-{}.log".format(tool)
# File to which the console output of the simulator of failed simulations is written
self._failed_simulator_log_file = "failed-simulator-{}.log".format(tool)
# File to which statistics is written to
self._statistics_log = "statistics.json"
self._nPro = multiprocessing.cpu_count()
self._batch = False
self._pedanticModelica = False
# List of scripts that should be excluded from the regression tests
# self._exclude_tests=['Resources/Scripts/Dymola/Airflow/Multizone/Examples/OneOpenDoor.mos']
self._exclude_tests = []
# Number of data points that are used
self._nPoi = 101
# List of temporary directories that are used to run the simulations.
self._temDir = []
# Flag to delete temporary directories.
self._deleteTemporaryDirectories = cleanup
# Flag to use existing results instead of running a simulation.
self._useExistingResults = False
# Flag to compare results against reference points for OPTIMICA and JModelica.
self._skip_verification = skip_verification
#self._skip_verification = True
# Comparison tool.
self._comp_tool = comp_tool
# Absolute (a) or relative (r) tolerance in x and y.
self._tol = {} # Keys: 'ax', 'ay', 'lx', 'ly', 'rx', 'ry'. Values: defaulting to 0.
if isinstance(tol, numbers.Real):
self._tol['ax'] = tol
self._tol['ay'] = tol
self._tol['ly'] = tol
elif isinstance(tol, dict):
self._tol = tol
else:
raise TypeError('Parameter `tol` must be a number or a dict.')
for k in ['ax', 'ay', 'lx', 'ly', 'rx', 'ry']:
try:
self._tol[k]
except KeyError:
self._tol[k] = 0
# Data structures for storing comparison data.
self._comp_info = []
self._comp_log_file = "comparison-{}.log".format(tool)
self._comp_dir = "funnel_comp"
# (Delete and) Create directory for storing funnel data.
# Done by run method to allow for runing report method without having to rerun simulations.
# Path of templates for HTML report and plot.
self._REPORT_TEMPLATE = os.path.join(
os.path.dirname(__file__), os.path.pardir, 'templates', 'datatable.html')
self._PLOT_TEMPLATE = os.path.join(
os.path.dirname(__file__), os.path.pardir, 'templates', 'plot.html')
# Write result dictionary that is used by OpenModelica's regression testing
# self.writeOpenModelicaResultDictionary()
'''
List of dicts, each dict with all meta-information about a single model to be tested.
keys equal to the ``*.mos`` file name, and values
containing a dictionary with keys ``matFil`` and ``y``.
The values of ``y`` are a list of the
form `[[a.x, a.y], [b.x, b.y1, b.y2]]` if the
mos file plots `a.x` versus `a.y` and `b.x` versus `(b.y1, b.y2)`.
'''
self._data = []
self._reporter = rep.Reporter(os.path.join(os.getcwd(), "unitTests-{}.log".format(tool)))
# By default, include export of FMUs.
self._include_fmu_test = True
# Variable that contains the figure size in inches.
# This variable is set after the first plot has been rendered.
# If a user resizes the plot, then the next plot will be displayed with
# the same size.
self._figSize = None
# Dictionary with error messages, error counter and messages written to the user
self._error_dict = e.ErrorDictionary()
# By default, do not show the GUI of the simulator
self._showGUI = False
def report(self, timeout=600, browser=None, autoraise=True, comp_file=None):
"""Builds and displays HTML report.
Serves until timeout (s) or KeyboardInterrupt.
"""
if self._comp_tool != 'funnel':
raise ValueError('Report is only available with comp_tool="funnel".')
report_file = 'report.html'
plot_file = os.path.join(self._comp_dir, 'plot.html')
with open(self._REPORT_TEMPLATE, 'r') as f:
template = f.read()
content = re.sub(r'\$SIMULATOR_LOG', self._comp_log_file, template)
content = re.sub(r'\$COMP_DIR', self._comp_dir, content)
server = pyfunnel.MyHTTPServer(
('',
0),
pyfunnel.CORSRequestHandler,
str_html=content,
url_html='funnel',
browse_dir=os.getcwd())
# Pre-build HTML plot file.
with open(self._PLOT_TEMPLATE, 'r') as f:
template = f.read()
content = re.sub(r'\$SERVER_PORT', str(server.server_port), template)
with open(plot_file, 'w') as f:
f.write(content)
server.browse(browser=browser, timeout=60 * 15)
def get_unit_test_log_file(self):
""" Return the name of the log file of the unit tests,
such as ``unitTests-optimica.log``, ``unitTests-jmodelica.log`` or ``unitTests-dymola.log``.
"""
return "unitTests-{}.log".format(self._modelica_tool)
def _initialize_error_dict(self):
""" Initialize the error dictionary.
"""
if self._modelica_tool == 'optimica':
import buildingspy.development.error_dictionary_optimica as e
elif self._modelica_tool == 'jmodelica':
import buildingspy.development.error_dictionary_jmodelica as e
else:
import buildingspy.development.error_dictionary_dymola as e
self._error_dict = e.ErrorDictionary()
def setLibraryRoot(self, rootDir):
""" Set the root directory of the library.
:param rootDir: The top-most directory of the library.
The root directory is the directory that contains the ``Resources`` folder
and the top-level ``package.mo`` file.
Usage: Type
>>> import os
>>> import buildingspy.development.regressiontest as r
>>> rt = r.Tester()
>>> myMoLib = os.path.join("buildingspy", "tests", "MyModelicaLibrary")
>>> rt.setLibraryRoot(myMoLib)
"""
self._libHome = os.path.abspath(rootDir)
self._rootPackage = os.path.join(self._libHome, 'Resources', 'Scripts', 'Dymola')
self.isValidLibrary(self._libHome)
def useExistingResults(self, dirs):
""" This function allows to use existing results, as opposed to running a simulation.
:param dirs: A non-empty list of directories that contain existing results.
This method can be used for testing and debugging. If called, then no simulation is
run.
If the directories
``['/tmp/tmp-Buildings-0-zABC44', '/tmp/tmp-Buildings-0-zQNS41']``
contain previous results, then this method can be used as
>>> import buildingspy.development.regressiontest as r
>>> l=['/tmp/tmp-Buildings-0-zABC44', '/tmp/tmp-Buildings-0-zQNS41']
>>> rt = r.Tester()
>>> rt.useExistingResults(l)
>>> rt.run() # doctest: +SKIP
"""
if len(dirs) == 0:
raise ValueError(
"Argument 'dirs' of function 'useExistingResults(dirs)' must have at least one element.")
self.setNumberOfThreads(len(dirs))
self._temDir = dirs
self.deleteTemporaryDirectories(False)
self._useExistingResults = True
def setNumberOfThreads(self, number):
""" Set the number of parallel threads that are used to run the regression tests.
:param number: The number of parallel threads that are used to run the regression tests.
By default, the number of parallel threads are set to be equal to the number of
processors of the computer.
"""
self._nPro = number
def showGUI(self, show=True):
""" Call this function to show the GUI of the simulator.
By default, the simulator runs without GUI
"""
self._showGUI = show
return
def batchMode(self, batchMode):
""" Set the batch mode flag.
:param batchMode: Set to ``True`` to run without interactive prompts
and without plot windows.
By default, the regression tests require the user to respond if results differ from previous simulations.
This method can be used to run the script in batch mode, suppressing all prompts that require
the user to enter a response. If run in batch mode, no new results will be stored.
To run the regression tests in batch mode, enter
>>> import os
>>> import buildingspy.development.regressiontest as r
>>> r = r.Tester()
>>> r.batchMode(True)
>>> r.run() # doctest: +SKIP
"""
self._batch = batchMode
def pedanticModelica(self, pedanticModelica):
""" Set the pedantic Modelica mode flag.
:param pedanticModelica: Set to ``True`` to run the unit tests in the pedantic Modelica mode.
By default, regression tests are run in non-pedantic Modelica mode.
This however will be changed in the near future.
>>> import os
>>> import buildingspy.development.regressiontest as r
>>> r = r.Tester()
>>> r.pedanticModelica(True)
>>> r.run() # doctest: +SKIP
"""
self._pedanticModelica = pedanticModelica
def include_fmu_tests(self, fmu_export):
""" Sets a flag that, if ``False``, does not test the export of FMUs.
:param fmu_export: Set to ``True`` to test the export of FMUs (default), or ``False``
to not test the FMU export.
To run the unit tests but do not test the export of FMUs, type
>>> import os
>>> import buildingspy.development.regressiontest as r
>>> r = r.Tester()
>>> r.include_fmu_tests(False)
>>> r.run() # doctest: +SKIP
"""
self._include_fmu_test = fmu_export
def getModelicaCommand(self):
""" Return the name of the modelica executable.
:return: The name of the modelica executable.
"""
if self._modelica_tool == 'optimica' or self._modelica_tool == 'jmodelica':
return 'jm_ipython.sh'
else:
return self._modelica_tool
def isExecutable(self, program):
""" Return ``True`` if the ``program`` is an executable
"""
import platform
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
# Add .exe, which is needed on Windows 7 to test existence
# of the program
if platform.system() == "Windows":
program = program + ".exe"
if is_exe(program):
return True
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return True
return False
@staticmethod
def isValidLibrary(library_home):
""" Returns true if the regression tester points to a valid library
that implements the scripts for the regression tests.
:param library_home: top-level directory of the library, such as ``Buildings``.
:return: ``True`` if the library implements regression tests, ``False`` otherwise.
"""
topPackage = os.path.abspath(os.path.join(library_home, "package.mo"))
if not os.path.isfile(topPackage):
raise ValueError("Directory %s is not a Modelica library.\n Expected file '%s'."
% (library_home, topPackage))
srcDir = os.path.join(library_home, "Resources", "Scripts")
if not os.path.exists(srcDir):
raise ValueError(
"Directory %s is not a Modelica library.\n Expected directories '%s'." %
(library_home, srcDir))
return os.path.exists(os.path.join(library_home, "Resources", "Scripts"))
def getLibraryName(self):
""" Return the name of the library that will be run by this regression test.
:return: The name of the library that will be run by this regression test.
"""
return os.path.basename(self._libHome)
def checkPythonModuleAvailability(self):
""" Check whether all required python modules are installed.
If some modules are missing, then an `ImportError` is raised.
"""
requiredModules = ['buildingspy', 'matplotlib.pyplot', 'numpy', 'scipy.io']
if self._checkHtml:
requiredModules.append('tidylib')
missingModules = []
for module in requiredModules:
try:
__import__(module)
except ImportError:
missingModules.append(module)
if len(missingModules) > 0:
msg = "The following python module(s) are required but failed to load:\n"
for mod in missingModules:
msg += " " + mod + "\n"
msg += "You need to install these python modules to use this script.\n"
raise ImportError(msg)
def _checkKey(self, key, fileName, counter):
""" Checks whether ``key`` is contained in the header of the file ``fileName``
If the first line starts with ``within``
and the second line starts with ``key``
the counter is increased by one.
"""
with open(fileName, mode="rt", encoding="utf-8-sig") as filObj:
# filObj is an iterable object, so we can use next(filObj)
line0 = next(filObj).strip()
if line0.startswith("within"):
line1 = next(filObj).strip()
if line1.startswith(key):
counter += 1
return counter
def setExcludeTest(self, excludeFile):
""" Exclude from the regression tests all tests specified in ``excludeFile``.
:param excludeFile: The text file with files that shall be excluded from regression tests
"""
self._reporter.writeWarning(
"The function setExcludeTest will be removed in future releases.")
if os.path.isfile(excludeFile):
with open(excludeFile, mode="r", encoding="utf-8-sig") as f:
for line in f:
if line.rstrip().endswith('.mos') and not line.startswith('#'):
filNamTup = line.rpartition(self.getLibraryName())
filNam = filNamTup[2].rstrip().replace('\\', '/').lstrip('/')
self._exclude_tests.append(filNam)
else:
self._reporter.writeError("Could not find file {!s}".format(excludeFile))
def _includeFile(self, fileName):
""" Returns true if the file need to be included in the list of scripts to run
:param fileName: The name of the ``*.mos`` file.
The parameter ``fileName`` need to be of the form
``Resources/Scripts/Dymola/Fluid/Actuators/Examples/Damper.mos``
or ``Resources/Scripts/someOtherFile.ext``.
This function checks if ``fileName`` exists in the global list
``self._exclude_tests``. For checking, ``fileName`` will be normalized (strip
whitespace, convert backslash to slash, strip path).
"""
if fileName.rstrip().endswith('.mos'):
# This is a mos file, normalize the name
filNamTup = fileName.rpartition(self.getLibraryName())
filNam = filNamTup[2].rstrip().replace('\\', '/').lstrip('/')
# Check whether the file is in the exclude list
if filNam in self._exclude_tests:
self._reporter.writeWarning(
"Excluded file {} from the regression tests.".format(filNam))
return False
else:
return True
else:
# This is not a mos file, do not include it
return False
@staticmethod
def expand_packages(packages):
"""
Expand the ``packages`` from the form
``A.{B,C}`` and return ``A.B,A.C``
:param: packages: A list of packages
"""
ids = packages.find('{')
if ids < 0:
# This has no curly bracket notation
return packages
ide = packages.find('}')
# Make some simple test for checking the string format
if ide - 1 <= ids:
raise ValueError("String '{}' is wrong formatted".format(packages))
# Get text before the curly brackets
pre = packages[0:ids]
# Get text inside the curly brackets
in_bra = packages[ids + 1:ide]
entries = in_bra.split(',')
# Add the start to the entries
pac = []
for ele in entries:
pac.append("{}{}".format(pre, ele))
ret = ",".join(pac)
return ret.replace(' ', '')
def _remove_duplicate_packages(self, packages):
""" Remove duplicate packages in the list of packages.
For example, if packages = [A.B.C, A.B, A.F], or packages = [A.B, A.B.C, A.F],
then this function returns [A.B, A.F] because A.B.C is already contained in A.B
"""
sor = sorted(packages) # This sets sor = [A.B, A.B.C, A.F]
ret = list()
for i in range(len(sor)):
add = True
for j in range(len(ret)):
if sor[i].startswith(ret[j]):
# The parent package is already in the list
add = False
self._reporter.writeWarning(
"Found package that is contained in other package in test configuration '{}' and '{}'".format(
sor[i], ret[j]))
if add:
ret.append(sor[i])
return ret
def setSinglePackage(self, packageName):
"""
Set the name of one or multiple Modelica package(s) to be tested.
:param packageName: The name of the package(s) to be tested.
Calling this method will cause the regression tests to run
only for the examples in the package ``packageName``, and in
all its sub-packages.
For example:
* If ``packageName = Annex60.Controls.Continous.Examples``,
then a test of the ``Annex60`` library will run all examples in
``Annex60.Controls.Continous.Examples``.
* If ``packageName = Annex60.Controls.Continous.Examples,Annex60.Controls.Continous.Validation``,
then a test of the ``Annex60`` library will run all examples in
``Annex60.Controls.Continous.Examples`` and in ``Annex60.Controls.Continous.Validation``.
"""
# Create a list of packages, unless packageName is already a list
packages = list()
if ',' in packageName:
# First, split packages in case they are of the form Building.{Examples, Fluid}
expanded_packages = self.expand_packages(packageName)
packages = expanded_packages.split(',')
else:
packages.append(packageName)
packages = self._remove_duplicate_packages(packages)
# Inform the user that not all tests are run, but don't add to warnings
# as this would flag the test to have failed
self._reporter.writeOutput(
"""Regression tests are only run for the following package{}:""".format(
'' if len(packages) == 1 else 's'))
for pac in packages:
self._reporter.writeOutput(""" {}""".format(pac))
# Remove the top-level package name as the unit test directory does not
# contain the name of the library.
# Set data dictionary as it may have been generated earlier for the whole library.
self._data = []
for pac in packages:
pacSep = pac.find('.')
pacPat = pac[pacSep + 1:]
pacPat = pacPat.replace('.', os.sep)
rooPat = os.path.join(self._libHome, 'Resources', 'Scripts', 'Dymola', pacPat)
# Verify that the directory indeed exists
if not os.path.isdir(rooPat):
msg = """Requested to test only package '%s', but directory
'%s' does not exist.""" % (pac, rooPat)
raise ValueError(msg)
self.setDataDictionary(rooPat)
def writeOpenModelicaResultDictionary(self):
""" Write in ``Resources/Scripts/OpenModelica/compareVars`` files whose
name are the name of the example model, and whose content is::
compareVars :=
{
"controler.y",
"sensor.T",
"heater.Q_flow"
};
These files are then used in the regression testing that is done by the
OpenModelica development team.
"""
# Create the data dictionary.
if len(self._data) == 0:
self.setDataDictionary(self._rootPackage)
# Directory where files will be stored
desDir = os.path.join(self._libHome, "Resources", "Scripts", "OpenModelica", "compareVars")
if not os.path.exists(desDir):
os.makedirs(desDir)
# Loop over all experiments and write the files.
for experiment in self._data:
if 'model_name' in experiment and experiment['mustSimulate']:
if 'ResultVariables' in experiment:
# For OpenModelica, don't group variables into those
# who should be plotted together, as all are plotted in
# the same plot.
res = []
for pair in experiment['ResultVariables']:
for var in pair:
res.append(var)
# Content of the file.
filCon = "compareVars :=\n {\n \"%s\"\n };\n" % ("\",\n \"".join(res))
# File name.
filNam = os.path.join(desDir, experiment['model_name'] + ".mos")
# Write the file
with open(filNam, mode="w", encoding="utf-8") as fil:
fil.write(filCon)
@staticmethod
def get_plot_variables(line):
""" For a string of the form `*y={aa,bb,cc}*`, optionally with whitespace characters,
return the list `[aa, bb, cc]`.
If the string does not contain `y = ...`, return `None`.
A usage may be as follows. Note that the second call returns `None` as
it has a different format.
>>> import buildingspy.development.regressiontest as r
>>> r.Tester.get_plot_variables('y = {"a", "b", "c"}')
['a', 'b', 'c']
>>> r.Tester.get_plot_variables('... x}, y = {"a", "b", "c"}, z = {...')
['a', 'b', 'c']
>>> r.Tester.get_plot_variables("y=abc") is None
True
"""
import re
import shlex
# Make sure line has no "y = {..." that is not closed, e.g., it spans multiple lines
incomplete = re.search(r"y\s*=\s*{.*\n", line)
# This evaluates for example
# re.search("y.*=.*{.*}", "aay = {aa, bb, cc}aa").group()
# 'y = {aa, bb, cc}'
var = re.search(r"y\s*=\s*{.*}", line)
if var is None and incomplete is None:
return None
if var is None and incomplete is not None:
msg = "Malformed line '{}'".format(line)
raise ValueError(msg)
s = var.group()
s = re.search('{.*?}', s).group()
s = s.strip('{}')
# Use the lexer module as simply splitting by "," won't work because arrays have
# commas in the form "a[1, 1]", "a[1, 2]"
lexer = shlex.shlex(s)
lexer.quotes = '"'
lexer.whitespace = ", \t" # Skip commas, otherwise they are also returned as a token
y = list(lexer)
for i in range(len(y)):
# Remove quotes as we deal with a string already
y[i] = y[i].replace('"', '')
# Strip whitespace characters
y[i] = y[i].strip()
# Replace a[1,1] by a[1, 1], which is required for the
# Reader to be able to read the result.
# Also, replace multiple white spaces with a single white space as
# reading .mat is picky. For example, it refused to read a[1,1] or a[1, 1]
y[i] = re.sub(r',\W*', ', ', y[i])
return y
@staticmethod
def get_tolerance(library_home, model_name):
""" Return the tolerance as read from the `.mo` file.
:param library_home: Home directory of the library.
:param model_name: Name of the model.
"""
import os
import re
import io
file_name = os.path.join(library_home, '..', model_name.replace('.', os.path.sep) + ".mo")
if not os.path.exists(file_name):
raise IOError("Failed to find file '{}' for model '{}'".format(file_name, model_name))
p_number = re.compile(r'Tolerance\s*=\s*(-?\ *[0-9]+\.?[0-9]*(?:[Ee]\ *-?\ *[0-9]+)?)')
tols = list()
with open(file_name, 'r') as fil:
for lin in fil:
tol = re.findall(p_number, lin)
if len(tol) > 0:
tols.append(tol)
# Make sure we found exactly one entry
if len(tols) == 0:
raise RuntimeError("Failed to find Tolerance in '{}'.".format(file_name))
if len(tols) > 1:
raise RuntimeError(
"Found multiple entries for Tolerance in '{}', but require exactly one entry.".format(file_name))
return tols[0][0]
def setDataDictionary(self, root_package=None):
""" Build the data structures that are needed to parse the output files.
:param: root_package The name of the top-level package for which the files need to be parsed.
Separate package names with a period.
"""
def _get_attribute_value(line, keyword, dat):
""" Get the value of an attribute in the `.mos` file.
This function will remove leading and ending quotes.
:param line: The line that contains the keyword and the value.
:param keyword: The keyword
:param dat: The data dictionary to which dat[keyword] = value will be written.
"""
line = re.sub(' ', '', line)
pos = line.find(keyword)
if pos > -1:
posEq = line.find('=', pos)
posComma = line.find(',', pos)
posBracket = line.find(')', pos)
posEnd = min(posComma, posBracket)
if posEnd < 0:
posEnd = max(posComma, posBracket)
# Ensure that keyword is directly located before the next = sign
if posEq == pos + len(keyword):
entry = line[posEq + 1:posEnd]
dat[keyword] = re.sub(r'^"|"$', '', entry)
return
old_len = self.get_number_of_tests()
# Check if the data dictionary has already been set, in
# which case we return doing nothing.
# This is needed because methods append to the dictionary, which
# can lead to double entries.
roo_pac = root_package if root_package is not None else os.path.join(
self._libHome, 'Resources', 'Scripts', 'Dymola')
for root, _, files in os.walk(roo_pac):
for mosFil in files:
# Exclude the conversion scripts and also backup copies
# which have the extensions .mos~ if they are generated from emacs
if mosFil.endswith('.mos') and (
not mosFil.startswith(
"Convert" + self.getLibraryName())):
matFil = ""
dat = {
'ScriptFile': os.path.join(root[len(os.path.join(self._libHome, 'Resources', 'Scripts', 'Dymola')) + 1:],
mosFil),
'mustSimulate': False,
'mustExportFMU': False}
# ScriptFile is something like Controls/Continuous/Examples/LimPIDWithReset.mos
# JModelica CI testing needs files below 140 characters, which includes Buildings.
# Hence, write warning if a file is equal or longer than 140-9=131 characters.
if len(dat['ScriptFile']) >= 131:
self._reporter.writeError(
"""File {} is {}-character long. Reduce it to maximum of 130 characters.""".format(
dat['ScriptFile'], len(
dat['ScriptFile'])))
# _check_reference_result_file_name(dat['ScriptFile'])
# open the mos file and read its content.
# Path and name of mos file without 'Resources/Scripts/Dymola'
with open(os.path.join(root, mosFil), mode="r", encoding="utf-8-sig") as fMOS:
Lines = fMOS.readlines()
# Remove white spaces
for i in range(len(Lines)):
Lines[i] = Lines[i].replace(' ', '')
# Set some attributes in the Data object
if self._includeFile(os.path.join(root, mosFil)):
for lin in Lines:
# Add the model name to the dictionary.
# This is needed to export the model as an FMU.
# Also, set the flag mustSimulate to True.
simCom = re.search(r'simulateModel\(\s*".*"', lin)
if simCom is not None:
modNam = re.sub(r'simulateModel\(\s*"', '', simCom.string)
modNam = modNam[0:modNam.index('"')]
dat['mustSimulate'] = True
dat['model_name'] = modNam
dat['TranslationLogFile'] = modNam + ".translation.log"
# parse startTime and stopTime, if any
if dat['mustSimulate']:
for attr in ["startTime", "stopTime"]:
_get_attribute_value(lin, attr, dat)
# Check if this model need to be translated as an FMU.
if (self._include_fmu_test and "translateModelFMU" in lin):
dat['mustExportFMU'] = True
if dat['mustExportFMU']:
for attr in ["modelToOpen", "modelName"]:
_get_attribute_value(lin, attr, dat)
# Dymola uses in translateModelFMU the syntax
# modelName=... but our dictionary uses model_name
if attr == "modelName" and "modelName" in dat:
dat["model_name"] = dat["modelName"]
del dat["modelName"]
# The .mos script allows modelName="", hence
# we set the model name to be the entry of modelToOpen
if "model_name" in dat and dat["model_name"] == "":
if "modelToOpen" in dat:
dat["model_name"] = dat["modelToOpen"]
# Get tolerance from mo file. This is used to set the tolerance
# for OPTIMICA and JModelica.
# Only get the tolerance for the models that need to be simulated,
# because those that are only exported as FMU don't need this setting.
if dat['mustSimulate']:
try:
dat['tolerance'] = self.get_tolerance(
self._libHome, dat['model_name'])
except Exception as e:
self._reporter.writeError(str(e))
dat['tolerance'] = None
# We are finished iterating over all lines of the .mos
# For FMU export, if model_name="", then Dymola uses the
# Modelica class name, with "." replaced by "_".
# If the Modelica class name consists of "_", then they
# are replaced by "_0".
# Hence, we update dat['model_name'] if needed.
if dat['mustExportFMU']:
# Strip quotes from model_name and modelToOpen
dat['FMUName'] = dat['model_name'].strip('"')
dat['modelToOpen'] = dat['modelToOpen'].strip('"')
# Update the name of the FMU if model_name is "" in .mos file.
if len(dat["FMUName"]) == 0:
dat['FMUName'] = dat['modelToOpen']
# Update the FMU name, for example to change
# Buildings.Fluid.FMI.Examples.FMUs.IdealSource_m_flow to
# Buildings_Fluid_FMI_Examples_FMUs_IdealSource_0m_0flow
dat['FMUName'] = dat['FMUName'].replace("_", "_0").replace(".", "_")
dat['FMUName'] = dat['FMUName'] + ".fmu"
# Plot variables are only used for those models that need to be simulated.
# For JModelica, if dat['jmodelica']['simulate'] == False:
# dat['ResultVariables'] is reset to [] in _add_experiment_specifications
if dat['mustSimulate']:
plotVars = []
iLin = 0
for lin in Lines:
iLin = iLin + 1
try:
y = self.get_plot_variables(lin)
if y is not None:
plotVars.append(y)
except (AttributeError, ValueError) as e:
s = "%s, line %s, could not be parsed.\n" % (mosFil, iLin)
s += "The problem occurred at the line below:\n"
s += "%s\n" % lin
s += "Make sure that each assignment of the plot command is on one line.\n"
self._reporter.writeError(s)
# Store the error, but keep going to check other lines and files
pass
if len(plotVars) == 0:
s = "%s does not contain any plot command.\n" % mosFil
s += "You need to add a plot command to include its\n"
s += "results in the regression tests.\n"
self._reporter.writeError(s)
# Store grouped plot variables without duplicates.
# (Duplicates happen when the same y variables are plotted against
# different x variables.)
dat['ResultVariables'] = []
for v_i in plotVars:
if v_i not in dat['ResultVariables']:
dat['ResultVariables'].append(v_i)
# search for the result file
for lin in Lines:
if 'resultFile=\"' in lin:
matFil = re.search(
r'(?<=resultFile=\")[a-zA-Z0-9_\.]+', lin).group()
# Add the .mat extension as this is not included in the
# resultFile entry.
matFil = matFil + '.mat'
break
if self._modelica_tool == 'optimica' or self._modelica_tool == 'jmodelica':
matFil = '{}_result.mat'.format(
re.sub(r'\.', '_', dat['model_name']))
# Some *.mos file only contain plot commands, but no simulation.
# Hence, if 'resultFile=' could not be found, try to get the file that
# is used for plotting.
# cf. BUG
if len(matFil) == 0:
for lin in Lines:
if 'filename=\"' in lin:
# Note that the filename entry already has the .mat
# extension.
matFil = re.search(
r'(?<=filename=\")[a-zA-Z0-9_\.]+', lin).group()
break
if len(matFil) == 0:
raise ValueError('Did not find *.mat file in ' + mosFil)
dat['ResultFile'] = matFil
# Some files like plotFan.mos has neither a simulateModel
# nor a translateModelFMU command.
# These there must not be added to the data array.
if dat['mustSimulate'] or dat['mustExportFMU']:
self._data.append(dat)
# Make sure we found at least one unit test.
if self.get_number_of_tests() == old_len:
msg = """Did not find any regression tests in '%s'.""" % root_package
self._reporter.writeError(msg)
self._checkDataDictionary()
# Raise an error if there was any error reported.
if self._reporter.getNumberOfErrors() > 0:
raise ValueError("Error when setting up unit tests.")
# Add the experiment specifications to the data.
self._add_experiment_specifications()
return
def _add_experiment_specifications(self):
""" Add the experiment specification to the data structure.
This method reads the `Resources/Scripts/BuildingsPy/conf.json` file
and adds it to the data structure.
"""
import copy
import json
def_dic = {
self._modelica_tool: {
'solver': 'CVode',
'translate': True,
'simulate': True,
'ncp': 500,
'time_out': 1200
}
}
for all_dat in self._data:
# Add default data
for key in def_dic.keys():
all_dat[key] = copy.deepcopy(def_dic[key])
# Get configuration data from file, if present
conf_dir = os.path.join(self._libHome, 'Resources', 'Scripts', 'BuildingsPy')
conf_file = os.path.join(conf_dir, 'conf.json')
if os.path.exists(conf_file):
with open(conf_file, 'r') as f:
conf_data = json.load(f)
# Add model specific data
for con_dat in conf_data:
for all_dat in self._data:
if con_dat['model_name'] == all_dat['model_name']:
# Add all elements of the configuration data
for key in con_dat.keys():
# Have dictionary in dictionary
if key == self._modelica_tool:
for k in con_dat[key]:
val = con_dat[key][k]
if k == 'translate':
all_dat[key][k] = val
# Write a warning if a model is not translated
if not val:
# Set simulate to false as well as it can't be simulated
# if not translated
all_dat[key]['simulate'] = False
elif k == 'simulate':
all_dat[key][k] = val
# Write a warning if a model is not simulated
if not val:
# Reset plot variables
all_dat['ResultVariables'] = []
else:
all_dat[self._modelica_tool][k] = val
else:
all_dat[key] = con_dat[key]
# Write warning if this model should not be translated or simulated.
msg = None
if all_dat[self._modelica_tool]['translate'] is False:
msg = f"{all_dat['model_name']}: Requested to be excluded from translation."
elif all_dat[self._modelica_tool]['simulate'] is False:
msg = f"{all_dat['model_name']}: Requested to be excluded from simulation."
if msg is not None:
if 'comment' in all_dat[self._modelica_tool]:
msg = f"{msg} {all_dat[self._modelica_tool]['comment']}"
self._reporter.writeOutput(msg)
def _checkDataDictionary(self):
""" Check if the data used to run the regression tests do not have duplicate ``*.fmu`` files
and ``*.mat`` names.
Since Dymola writes all ``*.fmu`` and ``*.mat`` files to the current working directory,
duplicate file names would cause a translation or simulation to overwrite the files
of a previous test. This would make it impossible to check the FMU export
and to compare the results to previously obtained results.na
If there are duplicate ``.fmu`` and ``*.mat`` file names used, then this method raises
a ``ValueError`` exception.
"""
s_fmu = set()
s_mat = set()
errMes = ""
for data in self._data:
if 'ResultFile' in data:
resFil = data['ResultFile']
if data['mustSimulate']:
if resFil in s_mat:
errMes += "*** Error: Result file %s_mat is generated by more than one script.\n" \
" You need to make sure that all scripts use unique result file names.\n" % resFil
else:
s_mat.add(resFil)
for data in self._data:
if 'FMUName' in data:
fmuFil = data['FMUName']
if fmuFil in s_fmu:
errMes += "*** Error: FMU file {} is generated by more than one script.\n" \
" You need to make sure that all scripts use unique result file names.\n".format(
fmuFil)
else:
s_fmu.add(fmuFil)
if len(errMes) > 0:
raise ValueError(errMes)
def _getTimeGrid(self, tMin, tMax, nPoi):
"""
Return the time grid for the output result interpolation
:param tMin: Minimum time of the results.
:param tMax: Maximum time of the results.
:param nPoi: Number of result points.
"""
return [tMin + float(i) / (nPoi - 1) * (tMax - tMin) for i in range(nPoi)]
def _getSimulationResults(self, data, warnings, errors):
"""Get the simulation results for a single unit test.
:param data: The class that contains the data structure for the simulation results.
:param warning: A list to which all warnings will be appended.
:param errors: A list to which all errors will be appended.
Extracts and returns the simulation results from the `*.mat` file as
a list of dictionaries. Each element of the list contains a dictionary
of results that need to be printed together.
"""
def extractData(y, step):
# Replace the last element with the last element in time,
# [::step] may not extract the last time stamp, in which case
# the final time changes when the number of event changes.
r = y[::step]
r[len(r) - 1] = y[len(y) - 1]
return r
# Get the working directory that contains the ".mat" file
fulFilNam = os.path.join(data['ResultDirectory'], self.getLibraryName(), data['ResultFile'])
if self._modelica_tool == 'optimica' or self._modelica_tool == 'jmodelica':
fulFilNam = os.path.join(data['ResultDirectory'], data['ResultFile'])
ret = []
try:
r = Reader(fulFilNam, self._modelica_tool)
except IOError as e:
errors.append("Failed to read %s generated by %s.\n%s\n" %
(fulFilNam, data['ScriptFile'], e))
return ret
except ValueError as e: # BUG #9
errors.append("Error while reading %s generated by %s.\n%s\n" %
(fulFilNam, data['ScriptFile'], e))
return ret
for pai in data['ResultVariables']: # pairs of variables that are plotted together
dat = dict()
for var in pai:
time = []
val = []
try:
var_mat = var
# Matrix variables in OPTIMICA and JModelica are stored in mat file with
# no space e.g. [1,1].
if self._modelica_tool == 'optimica' or self._modelica_tool == 'jmodelica':
var_mat = re.sub(' ', '', var_mat)
(time, val) = r.values(var_mat)
# Make time grid to which simulation results
# will be interpolated.
# This reduces the data that need to be stored.
# It also makes it easier to compare accuracy
# in case that a slight change in the location of
# state events triggered a different output interval grid.
tMin = float(min(time))
tMax = float(max(time))
nPoi = min(self._nPoi, len(val))
ti = self._getTimeGrid(tMin, tMax, nPoi)
except ZeroDivisionError as e:
s = "When processing " + fulFilNam + " generated by " + \
data['ScriptFile'] + ", caught division by zero.\n"
s += " len(val) = " + str(len(val)) + "\n"
s += " tMax-tMin = " + str(tMax - tMin) + "\n"
warnings.append(s)
break
except KeyError:
warnings.append("%s uses %s which does not exist in %s.\n" %
(data['ScriptFile'], var, data['ResultFile']))
else:
# Store time grid.
if ('time' not in dat):
dat['time'] = [tMin, tMax]
if self._isParameter(val):
dat[var] = val
else:
try:
dat[var] = Plotter.interpolate(ti, time, val)
except ValueError as e:
msg = "Failed to process {} generated by {}.\n{}\n".format(
fulFilNam, data['ScriptFile'], e)
errors.append(msg)
return ret
if len(dat) > 0:
ret.append(dat)
return ret
def _getTranslationStatistics(self, data, warnings, errors):
"""
Get the translation statistics for a single unit test.
:param data: The class that contains the data structure for the simulation results.
:param warning: A list to which all warnings will be appended.
:param errors: A list to which all errors will be appended.
:return: The translation log from the `*.translation.log` file as
a list of dictionaries.
Extracts and returns the translation log from the `*.translation.log` file as
a list of dictionaries.
In case of an error, this method returns `None`.
"""
# Get the working directory that contains the ".log" file
fulFilNam = os.path.join(data['ResultDirectory'],
self.getLibraryName(), data['TranslationLogFile'])
return of.get_model_statistics(fulFilNam, self._modelica_tool)
def _legacy_comp(self, tOld, yOld, tNew, yNew, tGriOld, tGriNew, varNam, filNam, tol):
# Interpolate the new variables to the old time stamps
#
if len(yNew) > 2:
try:
yInt = Plotter.interpolate(tGriOld, tGriNew, yNew)
except (IndexError, ValueError):
em = (
"Data series have different length:\n"
"File=%s\n"
"variable=%s\n"
"len(tGriOld) = %d\n"
"len(tGriNew) = %d\n"
"len(yNew) = %d\n") % (filNam,
varNam,
len(tGriOld),
len(tGriNew),
len(yNew))
self._reporter.writeError(em)
raise ValueError(em)
else:
yInt = [yNew[0], yNew[0]]
# If the variable is heatPort.T or heatPort.Q_flow, with length=2, then
# it has been evaluated as a parameter in the Buildings library. In the Annex60
# library, this may be a variable as the Buildings library uses a more efficient
# implementation of the heatPort. Hence, we test for this special case, and
# store the parameter as if it were a variable so that the reference result are not
# going to be changed.
# (Not needed for funnel: can deal with len(yNew) != len(yOld))
if (varNam.endswith("heatPort.T") or varNam.endswith("heatPort.Q_flow")) and (
len(yInt) == 2) and len(yOld) != len(yInt):
yInt = np.ones(len(yOld)) * yInt[0]
# Compute error for the variable with name varNam
if len(yOld) != len(yInt):
# If yOld has two points, but yInt has more points, then
# extrapolate yOld to nPoi
t = self._getTimeGrid(tOld[0], tOld[-1], self._nPoi)
if len(yOld) == 2 and len(yInt) == self._nPoi:
t = self._getTimeGrid(t[0], t[-1], self._nPoi)
yOld = Plotter.interpolate(t, tOld, yOld)
# If yInt has only two data points, but yOld has more, then interpolate yInt
elif len(yInt) == 2 and len(yOld) == self._nPoi:
yInt = Plotter.interpolate(t, [tOld[0], tOld[-1]], yInt)
else:
raise ValueError((
"Program error, yOld and yInt have different lengths.\n"
"Result file : %s\n"
"Variable : %s\n"
"len(yOld)=%d\n"
"len(yInt)=%d\n"
"Stop processing.\n") % (filNam, varNam, len(yOld), len(yInt))
)
errAbs = np.zeros(len(yInt))
errRel = np.zeros(len(yInt))
errFun = np.zeros(len(yInt))
for i in range(len(yInt)):
errAbs[i] = abs(yOld[i] - yInt[i])
if np.isnan(errAbs[i]):
raise ValueError('NaN in errAbs ' + varNam + " " + str(yOld[i])
+ " " + str(yInt[i]) + " i, N " + str(i) +
" --:" + str(yInt[i - 1])
+ " ++:", str(yInt[i + 1]))
if (abs(yOld[i]) > 10 * tol):
errRel[i] = errAbs[i] / abs(yOld[i])
else:
errRel[i] = 0
errFun[i] = errAbs[i] + errRel[i]
t_err_max, warning = 0, None
if max(errFun) > tol:
iMax = 0
eMax = 0
for i in range(len(errFun)):
if errFun[i] > eMax:
eMax = errFun[i]
iMax = i
tGri = self._getTimeGrid(tOld[0], tOld[-1], self._nPoi)
t_err_max = tGri[iMax]
warning = filNam + ": " + varNam + " has absolute and relative error = " + \
("%0.3e" % max(errAbs)) + ", " + ("%0.3e" % max(errRel)) + ".\n"
if self._isParameter(yInt):
warning += " %s is a parameter.\n" % varNam
else:
warning += " Maximum error is at t = %s\n" % str(t_err_max)
return (t_err_max, warning)
def _funnel_comp(
self,
tOld,
yOld,
tNew,
yNew,
varNam,
filNam,
model_name,
tol,
data_idx,
keep_dir=True):
"""Method calling funnel comparison tool."""
t_err_max, warning = 0, None
tmp_dir = tempfile.mkdtemp()
log_stdout = io.StringIO()
with _stdout_redirector(log_stdout):
exitcode = pyfunnel.compareAndReport(
xReference=tOld,
yReference=yOld,
xTest=tNew,
yTest=yNew,
outputDirectory=tmp_dir,
atolx=tol['ax'],
atoly=tol['ay'],
ltolx=tol['lx'],
ltoly=tol['ly'],
rtolx=tol['rx'],
rtoly=tol['ry'],
)
log_content = log_stdout.getvalue()
log_content = re.sub(r'(^.*Warning:\s+)|(Error:\s+)', '', log_content)
log_stdout.close()
if exitcode != 0:
warning = "While processing file {} for variable {}: {}".format(
filNam, varNam, log_content)
test_passed = False
funnel_success = False
else:
err_path = os.path.join(tmp_dir, 'errors.csv')
err_arr = np.genfromtxt(err_path, delimiter=',', skip_header=1).transpose()
err_max = np.max(err_arr[1]) # difference between y test value and funnel bounds
idx_err_max = np.where(err_arr[1] == err_max)[0][0]
t_err_max = err_arr[0][idx_err_max]
test_passed = (err_max == 0)
if err_max > 0:
warning = (
"{}: {} exceeds funnel tolerance with absolute error = {:.3e}. "
).format(filNam, varNam, err_max)
if self._isParameter(yOld):
warning += "{} is a parameter.\n".format(varNam)
else:
warning += "Maximum error is at t = {}\n".format(t_err_max)
funnel_success = True
if keep_dir and funnel_success:
target_path = os.path.join(self._comp_dir, '{}_{}'.format(filNam, varNam))
shutil.move(tmp_dir, target_path)
else:
target_path = None
shutil.rmtree(tmp_dir)
idx = self._init_comp_info(model_name, filNam)
self._update_comp_info(idx, varNam, target_path, test_passed, t_err_max, warning, data_idx)
return (t_err_max, warning)
def _init_comp_info(self, model_name, file_name):
"""Update self._comp_info with dict to store comparison results for model_name.
Returns: index of dict storing results for model_name.
"""
try:
idx = next(i for i, el in enumerate(self._comp_info) if el['model'] == model_name)
except StopIteration: # no model_name found in self._comp_info (case dymola): create
self._comp_info.append({
"model": model_name,
})
idx = len(self._comp_info) - 1
try:
self._comp_info[idx]["comparison"]
except KeyError: # no comparison data stored for model_name: create
self._comp_info[idx]["comparison"] = {
"variables": [],
"funnel_dirs": [],
"test_passed": [],
"file_name": file_name,
"success_rate": 0,
"var_groups": [], # index of the group of variables belonging to the same subplot
"warnings": [],
"t_err_max": [],
}
return idx
def _update_comp_info(
self,
idx,
var_name,
funnel_dir,
test_passed,
t_err_max,
warning,
data_idx,
var_group=None):
"""Store comparison info for var_name in self._comp_info."""
# NOTE: data_idx can differ from idx if simulation failed or variable not available.
should_update = True
if var_group is None:
try:
var_group = next(
iv for iv, vl in enumerate(
self._data[data_idx]["ResultVariables"]) if var_name in vl)
except StopIteration:
if warning == 'skip':
should_update = False
else:
warning = ("Variable {} not found in ResultVariables for model {}. "
"However it was found in reference results file.\n").format(
var_name, self._comp_info[idx]['model'])
self._reporter.writeWarning(warning)
if should_update:
self._comp_info[idx]["comparison"]["variables"].append(var_name)
self._comp_info[idx]["comparison"]["funnel_dirs"].append(funnel_dir)
self._comp_info[idx]["comparison"]["test_passed"].append(
int(test_passed)) # Boolean not JSON serializable
self._comp_info[idx]["comparison"]["t_err_max"].append(t_err_max)
self._comp_info[idx]["comparison"]["warnings"].append(warning)
self._comp_info[idx]["comparison"]["var_groups"].append(var_group)
self._comp_info[idx]["comparison"]["success_rate"] = sum(
self._comp_info[idx]["comparison"]["test_passed"]) / len(self._comp_info[idx]["comparison"]["variables"])
return None
def areResultsEqual(self, tOld, yOld, tNew, yNew, varNam, data_idx):
""" Return `True` if the data series are equal within a tolerance.
:param tOld: List of old time values.
:param yOld: Old simulation results.
:param tNew: Time stamps of new results.
:param yNew: New simulation results.
:param varNam: Variable name, used for reporting.
:param filNam: File name, used for reporting.
:param model_name: Model name, used for reporting.
:return: A list with ``False`` if the results are not equal, and the time
of the maximum error, and an error message or `None`.
In case of errors, the time of the maximum error may by `None`.
"""
try:
filNam = self._data[data_idx]['ResultFile']
model_name = self._data[data_idx]['model_name']
except BaseException:
filNam = 'Undefined file name'
model_name = 'Undefined model name'
def getTimeGrid(t, nPoi=self._nPoi):
if len(t) == 2:
return self._getTimeGrid(t[0], t[-1], nPoi)
elif len(t) == nPoi:
return t
else:
s = ("While processing file {} for variable {}: The new time grid has {} points "
"but it must have 2 or {} points.\n"
"Stop processing.\n").format(
filNam,
varNam,
len(tNew),
nPoi)
raise ValueError(s)
# Check if the first and last time stamp are equal
def test_equal_time(t1, t2, tol=1E-6):
"""Test if time values are equal within a given tolerance.
t1, t2 and tol are floats.
Returns Boolean value equal to test result.
If t1 is close to 0, the tolerance is considered as absolute.
Otherwise, the tolerance is considered as relative to abs(t1).
"""
if abs(t1) <= tol:
res = abs(t1 - t2) <= tol
else:
res = abs(t1 - t2) <= tol * abs(t1)
return res
if not test_equal_time(tOld[0], tNew[0]):
error = (
"While processing file {} for variable {}: Different start time between "
"reference and test data.\n"
"Old reference points are for {} <= t <= {}\n"
"New reference points are for {} <= t <= {}\n").format(
filNam, varNam, tOld[0], tOld[len(tOld) - 1], tNew[0], tNew[len(tNew) - 1])
test_passed = False
t_err_max = min(tOld[0], tNew[0])
else: # Overwrite tOld with tNew to prevent any exception raised by the comparison tool.
tOld[0] = tNew[0]
if not test_equal_time(tOld[-1], tNew[-1]):
error = (
"While processing file {} for variable {}: Different end time between "
"reference and test data.\n"
"tNew = [{}, {}]\n"
"tOld = [{}, {}]\n").format(filNam, varNam, tNew[0], tNew[-1], tOld[0], tOld[-1])
test_passed = False
t_err_max = min(tOld[-1], tNew[-1])
else: # Overwrite tOld with tNew to prevent any exception raised by the comparison tool.
tOld[-1] = tNew[-1]
# The next test may be true if a simulation stopped with an error prior to
# producing sufficient data points
if len(yNew) < len(yOld) and len(yNew) > 2:
error = (
"While processing file {} for variable {}: Fewer data points than reference results.\n"
"len(yOld) = {}\n"
"len(yNew) = {}\n"
"Skipping error checking for this variable.\n").format(
filNam, varNam, len(yOld), len(yNew))
test_passed = False
t_err_max = None
if self._comp_tool == 'legacy':
if len(yNew) > 2:
# Some reference results contain already a time grid,
# whereas others only contain the first and last time stamp.
# Hence, we make sure to have the right time grid before we
# call the interpolation.
tGriOld = getTimeGrid(tOld, len(yNew))
tGriNew = getTimeGrid(tNew, min(len(yNew), self._nPoi))
else:
tGriOld = tOld
tGriNew = tNew
elif self._comp_tool == 'funnel':
# funnel_comp only needs len(t) = len(y) for Old and New time series
if len(yNew) > 2:
tNew = getTimeGrid(tNew, len(yNew))
if len(yOld) > 2:
tOld = getTimeGrid(tOld, len(yOld))
if self._comp_tool == 'legacy':
try: # In case an error has been raised before: no comparison performed.
error
except NameError:
t_err_max, error = self._legacy_comp(
tOld, yOld, tNew, yNew, tGriOld, tGriNew, varNam, filNam, self._tol['ay'])
else:
idx = self._init_comp_info(model_name, filNam)
comp_tmp = self._comp_info[idx]['comparison']
try:
# Check if the variable has already been tested.
# (This might happen if the variable is used in different plots.)
# In this case we do not want to perform the comparison again but we still want the variable to be
# plotted several times as it was originally intended: update _comp_info
# with stored data.
var_idx = comp_tmp['variables'].index(varNam)
fun_dir = comp_tmp['funnel_dirs'][var_idx]
test_passed = comp_tmp['test_passed'][var_idx]
# variable group already stored for this variable
var_group_str = comp_tmp['var_groups'][var_idx]
# Now looking for the new variable group to be stored.
var_group = var_group_str + 1 + next(iv for iv, vl in enumerate(
self._data[data_idx]["ResultVariables"][(var_group_str + 1):]) if varNam in vl)
error = comp_tmp['warnings'][var_idx]
t_err_max = comp_tmp['t_err_max'][var_idx]
self._update_comp_info(
idx,
varNam,
fun_dir,
test_passed,
t_err_max,
error,
data_idx,
var_group)
except (ValueError, StopIteration):
try: # In case an error has been raised before: no comparison performed.
self._update_comp_info(
idx, varNam, None, test_passed, t_err_max, error, data_idx)
except NameError:
t_err_max, error = self._funnel_comp(
tOld, yOld, tNew, yNew, varNam, filNam, model_name, self._tol, data_idx)
test_passed = True
if error is not None:
test_passed = False
return (test_passed, t_err_max, error)
def _isParameter(self, dataSeries):
""" Return `True` if `dataSeries` is from a parameter.
"""
import numpy as np
if not (isinstance(dataSeries, np.ndarray) or isinstance(dataSeries, list)):
raise TypeError("Program error: dataSeries must be a numpy.ndarr or a list. Received type "
+ str(type(dataSeries)) + ".\n")
return (len(dataSeries) == 2)
def format_float(self, value):
""" Return the argument in exponential notation, with
non-significant zeros removed.
"""
import re
return re.sub(re.compile(r'\.e'), 'e',
re.sub(re.compile('0*e'), 'e', "{0:.15e}".format(value)))
def _writeReferenceResults(self, refFilNam, y_sim, y_tra):
""" Write the reference results.
:param refFilNam: The name of the reference file.
:param y_sim: The data points to be written to the file.
:param y_tra: The dictionary with the translation log.
This method writes the results in the form ``key=value``, with one line per entry.
"""
from datetime import date
import json
with open(refFilNam, mode="w", encoding="utf-8") as f:
f.write('last-generated=' + str(date.today()) + '\n')
for stage in ['initialization', 'simulation', 'fmu-dependencies']:
if stage in y_tra:
# f.write('statistics-%s=\n%s\n' % (stage, _pretty_print(y_tra[stage])))
f.write('statistics-%s=\n%s\n' % (stage, json.dumps(y_tra[stage],
indent=2,
separators=(',', ': '),
sort_keys=True)))
# FMU exports do not have simulation results.
# Hence, we preclude them if y_sim == None
if y_sim is not None:
# Set, used to avoid that data series that are plotted in two plots are
# written twice to the reference data file.
s = set()
for pai in y_sim:
for k, v in list(pai.items()):
if k not in s:
s.add(k)
f.write(k + '=')
# Use many digits, otherwise truncation errors occur that can be higher
# than the required accuracy.
formatted = [str(self.format_float(e)) for e in v]
f.write(str(formatted).replace("'", ""))
f.write('\n')
def _readReferenceResults(self, refFilNam):
""" Read the reference results.
:param refFilNam: The name of the reference file.
:return: A dictionary with the reference results.
If the simulation statistics was found in the reference results,
then the return value also has an entry
`statistics-simulation={'numerical Jacobians': '0', 'nonlinear': ' ', 'linear': ' '}`,
where the value is a dictionary. Otherwise, this key is not present.
"""
import numpy
import ast
d = dict()
with open(refFilNam, mode="r", encoding="utf-8-sig") as f:
lines = f.readlines()
# Compute the number of the first line that contains the results
iSta = 0
for iLin in range(min(2, len(lines))):
if "svn-id" in lines[iLin]:
iSta = iSta + 1
if "last-generated" in lines[iLin]:
iSta = iSta + 1
r = dict()
iLin = iSta
while iLin < len(lines):
lin = lines[iLin].strip('\n')
try:
(key, value) = lin.split("=")
# Check if this is a statistics-* entry.
if key.startswith("statistics-"):
# Call ast.literal_eval as value is a string that needs to be
# converted to a dictionary.
# The json string was pretty printed over several lines.
# Add to value the next line, unless it contains "-" or it does not exist.
value = value.strip()
while (iLin < len(lines) - 1 and lines[iLin + 1].find('=') == -1):
value = value + lines[iLin + 1].strip('\n').strip()
iLin += 1
d[key] = ast.literal_eval(value)
else:
s = (value[value.find('[') + 1: value.rfind(']')]).strip()
numAsStr = s.split(',')
val = []
for num in numAsStr:
# We need to use numpy.float64 here for the comparison to work
val.append(numpy.float64(num))
r[key] = val
except ValueError as detail:
s = "%s could not be parsed.\n" % refFilNam
self._reporter.writeError(s)
raise TypeError(detail)
iLin += 1
d['results'] = r
return d
def _askNoReferenceResultsFound(self, yS, refFilNam, ans):
""" Ask user what to do if no reference data were found
:param yS: A list where each element is a dictionary of variable names and simulation
results that are to be plotted together.
:param refFilNam: Name of reference file (used for reporting only).
:param ans: A previously entered answer, either ``y``, ``Y``, ``n`` or ``N``.
:return: A triple ``(updateReferenceData, foundError, ans)`` where ``updateReferenceData``
and ``foundError`` are booleans, and ``ans`` is ``y``, ``Y``, ``n`` or ``N``.
"""
updateReferenceData = False
foundError = False
if len(yS) > 0:
sys.stdout.write(
"*** Warning: The old reference data had no results, but the new simulation produced results\n")
sys.stdout.write(" for %s\n" % refFilNam)
sys.stdout.write(" Accept new results?\n")
while not (ans == "n" or ans == "y" or ans == "Y" or ans == "N"):
ans = input(" Enter: y(yes), n(no), Y(yes for all), N(no for all): ")
if ans == "y" or ans == "Y":
# update the flag
updateReferenceData = True
return (updateReferenceData, foundError, ans)
def _check_statistics(self, old_res, y_tra, stage, foundError, newStatistics, mat_file_name):
""" Checks the simulation or translation statistics and return
`True` if there is a new statistics, or a statistics is no longer present, or if `newStatistics == True`.
"""
r = newStatistics
if 'statistics-%s' % stage in old_res:
# Found old statistics.
# Check whether the new results have also such a statistics.
if stage in y_tra:
# Check whether it changed.
for key in old_res['statistics-%s' % stage]:
if key in y_tra[stage]:
if not self.are_statistics_equal(
old_res['statistics-%s' % stage][key], y_tra[stage][key]):
if foundError:
self._reporter.writeWarning("%s: Translation statistics for %s and results changed for %s.\n Old = %s\n New = %s"
% (mat_file_name, stage, key, old_res['statistics-%s' % stage][key], y_tra[stage][key]))
else:
self._reporter.writeWarning("%s: Translation statistics for %s changed for %s, but results are unchanged.\n Old = %s\n New = %s"
% (mat_file_name, stage, key, old_res['statistics-%s' % stage][key], y_tra[stage][key]))
r = True
else:
self._reporter.writeWarning("%s: Found translation statistics for %s for %s in old but not in new results.\n Old = %s"
% (mat_file_name, stage, key, old_res['statistics-%s' % stage][key]))
r = True
for key in y_tra[stage]:
if key not in old_res['statistics-%s' % stage]:
self._reporter.writeWarning(
"%s: Found translation statistics for key %s in %s in new but not in old results." %
(mat_file_name, key, stage))
r = True
else:
# The new results have no such statistics.
self._reporter.writeWarning(
"%s: Found translation statistics for %s in old but not in new results." %
(mat_file_name, stage))
r = True
else:
# The old results have no such statistics.
if stage in y_tra:
# The new results have such statistics, hence the statistics changed.
self._reporter.writeWarning(
"%s: Found translation statistics for %s in new but not in old results." %
(mat_file_name, stage))
r = True
return r
def _compareResults(self, data_idx, oldRefFulFilNam, y_sim, y_tra, refFilNam, ans):
""" Compares the new and the old results.
:param matFilNam: Matlab file name.
:param oldRefFilFilNam: File name including path of old reference files.
:param y_sim: A list where each element is a dictionary of variable names and simulation
results that are to be plotted together.
:param y_tra: A dictionary with the translation statistics.
:param refFilNam: Name of the file with reference results (used for reporting only).
:param ans: A previously entered answer, either ``y``, ``Y``, ``n`` or ``N``.
:param model_name: Model name, used for reporting.
:return: A triple ``(updateReferenceData, foundError, ans)`` where ``updateReferenceData``
and ``foundError`` are booleans, and ``ans`` is ``y``, ``Y``, ``n`` or ``N``.
"""
matFilNam = self._data[data_idx]['ResultFile']
model_name = self._data[data_idx]['model_name']
# Reset answer, unless it is set to Y or N
if not (ans == "Y" or ans == "N"):
ans = "-"
updateReferenceData = False
# If previously the user chose to update all refererence data, then
# we set updateReferenceData = True
if ans == "Y":
updateReferenceData = True
foundError = False
verifiedTime = False
# Load the old data (in dictionary format)
old_results = self._readReferenceResults(oldRefFulFilNam)
# Numerical results of the simulation
y_ref = old_results['results']
if len(y_ref) == 0:
return self._askNoReferenceResultsFound(y_sim, refFilNam, ans)
# The old data contains results
t_ref = y_ref.get('time')
# Iterate over the pairs of data that are to be plotted together
timOfMaxErr = dict()
noOldResults = [] # List of variables for which no old results have been found
list_var_ref = [el for el in y_ref.keys() if not re.search('time', el, re.I)]
list_var_sim = [el for gr in y_sim for el in gr.keys() if not re.search('time', el, re.I)]
for var in list_var_ref: # reference variables not available in simulation results
if var not in list_var_sim:
idx = self._init_comp_info(model_name, matFilNam)
# We skip warning considering it is only the case for x variables against which y variables
# are plotted.
self._update_comp_info(idx, var, None, False, 0, 'skip', data_idx)
for pai in y_sim:
t_sim = pai['time']
if not verifiedTime:
verifiedTime = True
# The time interval is the same for the stored and the current data.
# Check the accuracy of the simulation.
for varNam in list(pai.keys()):
# Iterate over the variable names that are to be plotted together
if varNam != 'time':
if varNam in y_ref:
# Check results
if self._isParameter(pai[varNam]):
t = [min(t_sim), max(t_sim)]
else:
t = t_sim
# Compare times series.
(res, timMaxErr, error) = self.areResultsEqual(
t_ref, y_ref[varNam], t, pai[varNam], varNam, data_idx
)
if error:
self._reporter.writeError(error)
if not res:
foundError = True
timOfMaxErr[varNam] = timMaxErr
else:
# There is no old data series for this variable name
self._reporter.writeError(
"{}: Did not find variable {} in old results.".format(
refFilNam, varNam))
foundError = True
noOldResults.append(varNam)
# Compare the simulation statistics
# There are these cases:
# 1. The old reference results have no statistics, in which case new results may be written.
# 2. The old reference results have statistics, and they are the same or different.
# Statistics of the simulation model
newStatistics = False
if self._modelica_tool == 'dymola':
for stage in ['initialization', 'simulation']:
# Updated newStatistics if there is a new statistic. The other
# arguments remain unchanged.
newStatistics = self._check_statistics(
old_results, y_tra, stage, foundError, newStatistics, matFilNam)
# If the users selected "Y" or "N" (to not accept or reject any new results) in previous tests,
# or if the script is run in batch mode, then don't plot the results.
# If we found an error, plot the results, and ask the user to accept or
# reject the new values.
if (foundError or newStatistics) and (not self._batch) and (
not ans == "N") and (not ans == "Y"):
print(" For {},".format(refFilNam))
print(" accept new file and update reference files?")
if self._comp_tool == 'legacy':
print("(Close plot window to continue.)")
self._legacy_plot(y_sim, t_ref, y_ref, noOldResults, timOfMaxErr, matFilNam)
else:
self._funnel_plot(model_name)
while not (ans == "n" or ans == "y" or ans == "Y" or ans == "N"):
ans = input(" Enter: y(yes), n(no), Y(yes for all), N(no for all): ")
if ans == "y" or ans == "Y":
# update the flag
updateReferenceData = True
return (updateReferenceData, foundError, ans)
def _funnel_plot(self, model_name, browser=None):
"""Plot comparison results generated by pyfunnel."""
idx = next(i for i, el in enumerate(self._comp_info) if el['model'] == model_name)
comp_data = self._comp_info[idx]['comparison']
dict_var_info = defaultdict(list)
for iv, v in enumerate(comp_data['variables']):
dict_var_info[v].append({'group': comp_data['var_groups'][iv],
'dir': comp_data['funnel_dirs'][iv]})
# Build a list of files to use for testing server request in pyfunnel.
# We check whether these files are available in the file system.
list_files = []
for d in dict_var_info.values():
if d[0]['dir'] is not None:
for el in ['reference.csv', 'test.csv', 'errors.csv']:
file_path = os.path.join(d[0]['dir'], el)
if os.path.isfile(file_path):
list_files.append(file_path)
# If no comparison results available in the file system, no plot.
if len(list_files) == 0:
return
# Customize the plot.
plot_title = comp_data['file_name']
max_plot_per100 = 4
height = 100 * \
(1 + max(0, max(comp_data['var_groups']) - max_plot_per100) / max_plot_per100)
err_plot_height = 0.18 * 100 / height
# Populate the plot template.
with open(self._PLOT_TEMPLATE, 'r') as f:
template = f.read()
content = re.sub(r'\$PAGE_TITLE', plot_title, template)
content = re.sub(r'\$TITLE', plot_title, content)
content = re.sub(r'\$DICT_VAR_INFO', json.dumps(dict_var_info), content)
content = re.sub(r'\$HEIGHT', '{}%'.format(height), content)
content = re.sub(r'\$ERR_PLOT_HEIGHT', str(err_plot_height), content)
# Launch the local server.
server = pyfunnel.MyHTTPServer(('', 0), pyfunnel.CORSRequestHandler,
str_html=content, url_html='funnel')
# Start the browser instance.
server.browse(list_files, browser=browser)
def _legacy_plot(self, y_sim, t_ref, y_ref, noOldResults, timOfMaxErr, matFilNam):
"""Plot comparison results generated by legacy comparison algorithm."""
nPlo = len(y_sim)
iPlo = 0
plt.clf()
for pai in y_sim:
iPlo += 1
plt.subplot(nPlo, 1, iPlo)
# Iterate over the variable names that are to be plotted together
color = ['k', 'r', 'b', 'g', 'c', 'm']
iPai = -1
t_sim = pai['time']
for varNam in list(pai.keys()):
iPai += 1
if iPai > len(color) - 1:
iPai = 0
if varNam != 'time':
if self._isParameter(pai[varNam]):
plt.plot([min(t_sim), max(t_sim)], pai[varNam],
color[iPai] + '-', label='New ' + varNam)
else:
plt.plot(self._getTimeGrid(t_sim[0], t_sim[-1], len(pai[varNam])),
pai[varNam],
color[iPai] + '-', label='New ' + varNam)
# Test to make sure that this variable has been found in the old results
if noOldResults.count(varNam) == 0:
if self._isParameter(y_ref[varNam]):
# for parameters, don't just draw a dot, as these are hard to see as
# they are on the box
plt.plot([min(t_ref), max(t_ref)], y_ref[varNam],
color[iPai] + 'x', markersize=10, label='Old ' + varNam)
else:
plt.plot(self._getTimeGrid(t_ref[0], t_ref[-1], len(y_ref[varNam])),
y_ref[varNam],
color[iPai] + '.', label='Old ' + varNam)
# Plot the location of the maximum error
if varNam in timOfMaxErr:
plt.axvline(x=timOfMaxErr[varNam])
leg = plt.legend(loc='right', fancybox=True)
leg.get_frame().set_alpha(0.5) # transparent legend
plt.xlabel('time')
plt.grid(True)
if iPlo == 1:
plt.title(matFilNam)
# Store the graphic objects.
# The first plot is shown using the default size.
# Afterwards, the plot is resized to have the same size as
# the previous plot.
gcf = plt.gcf()
if self._figSize is not None:
gcf.set_size_inches(self._figSize, forward=True)
# Display the plot
plt.show()
# Store the size for reuse in the next plot.
self._figSize = gcf.get_size_inches()
def are_statistics_equal(self, s1, s2):
""" Compare the simulation statistics `s1` and `s2` and
return `True` if they are equal, or `False` otherwise.
"""
x = s1.strip()
y = s2.strip()
if x == y:
return True
# If they have a comma, such as from 1, 20, 1, 14, then split it,
# sort it, and compare the entries for equality
def g(s): return s.replace(" ", "").split(",")
# Sort and remove 0, as we are not interested in these equations because
# they are solved explicitely
sp1 = [x for x in sorted(g(x)) if x != '0']
sp2 = [x for x in sorted(g(y)) if x != '0']
# If the list have different lengths, they are not equal
if len(sp1) != len(sp2):
return False
# They are of equal lengths, compare each element
for i in range(len(sp1)):
if sp1[i] != sp2[i]:
return False
return True
def _compare_and_rewrite_fmu_dependencies(
self,
new_dependencies,
reference_file_path,
reference_file_name,
ans):
""" Compares whether the ``.fmu`` dependencies have been changed.
If they are the same, this function does nothing.
If they do not exist in the reference results, it askes to generate them.
If they differ from the reference results, it askes whether to accept the new ones.
:param new_dependencies: A dictionary with the new dependencies.
:param reference_file_path: Path to the file with reference results.
:param reference_file_name: Name of the file with reference results.
:param ans: A previously entered answer, either ``y``, ``Y``, ``n`` or ``N``.
:return: A tuple consisting of a boolean ``updated_reference_data`` and the value of ``ans``.
"""
# Absolute path to the reference file
abs_ref_fil_nam = os.path.join(reference_file_path, reference_file_name)
# Put dependencies in data format needed to write to the reference result file
y_tra = dict()
y_tra['fmu-dependencies'] = new_dependencies
# Check whether the reference results exist.
if not os.path.exists(abs_ref_fil_nam):
print("Warning ***: Reference file {} does not yet exist.".format(reference_file_name))
while not (ans == "n" or ans == "y" or ans == "Y" or ans == "N"):
print(" Create new file?")
ans = input(" Enter: y(yes), n(no), Y(yes for all), N(no for all): ")
if ans == "y" or ans == "Y":
self._writeReferenceResults(abs_ref_fil_nam, None, y_tra)
self._reporter.writeOutput("Wrote new reference file %s." %
reference_file_name)
else:
self._reporter.writeError("Did not write new reference file %s." %
reference_file_name)
return [True, ans]
# The file that may contain the reference results exist.
old_dep = self._readReferenceResults(abs_ref_fil_nam)
# Check whether it contains a key 'statistics-fmu-dependencies'
if 'statistics-fmu-dependencies' in old_dep:
# Compare the statistics for each section
found_differences = False
for typ in ['InitialUnknowns', 'Outputs', 'Derivatives']:
if old_dep['statistics-fmu-dependencies'][typ] != new_dependencies[typ]:
print(
"*** Warning: Reference file {} has different FMU statistics for '{}'.".format(reference_file_name, typ))
found_differences = True
if found_differences:
while not (ans == "n" or ans == "y" or ans == "Y" or ans == "N"):
print(" Rewrite file?")
ans = input(" Enter: y(yes), n(no), Y(yes for all), N(no for all): ")
if ans == "y" or ans == "Y":
self._writeReferenceResults(abs_ref_fil_nam, None, y_tra)
self._reporter.writeWarning(
"*** Warning: Rewrote reference file %s due to new FMU statistics." %
reference_file_name)
return [found_differences, ans]
else:
# The old file has no statistics. Ask to rewrite it.
print("*** Warning: Reference file {} has no FMU statistics.".format(reference_file_name))
while not (ans == "n" or ans == "y" or ans == "Y" or ans == "N"):
print(" Rewrite file?")
ans = input(" Enter: y(yes), n(no), Y(yes for all), N(no for all): ")
if ans == "y" or ans == "Y":
self._writeReferenceResults(abs_ref_fil_nam, None, y_tra)
self._reporter.writeWarning(
"*** Warning: Rewrote reference file %s as the old one had no FMU statistics." %
reference_file_name)
return [True, ans]
def _check_fmu_statistics(self, ans):
""" Check the fmu statistics from each regression test and compare it with the previously
saved statistics stored in the library home folder.
If the statistics differs,
show a warning message containing the file name and path.
If there is no statistics stored in the reference results in the library home folder,
ask the user whether it should be generated.
This function returns 1 if the statistics differ, or if the ``.fmu`` file
is not found. The function returns 0 if there were no problems.
"""
import buildingspy.fmi as fmi
retVal = 0
# Check if the directory
# "self._libHome\\Resources\\ReferenceResults\\Dymola" exists, if not
# create it.
refDir = os.path.join(self._libHome, 'Resources', 'ReferenceResults', 'Dymola')
if not os.path.exists(refDir):
os.makedirs(refDir)
for data in self._data:
# Name of the reference file, which is the same as that matlab file name but with another extension.
# Only check data for FMU exort.
if self._includeFile(data['ScriptFile']) and data['mustExportFMU']:
# Convert 'aa/bb.mos' to 'aa_bb.txt'
mosFulFilNam = os.path.join(self.getLibraryName(), data['ScriptFile'])
mosFulFilNam = mosFulFilNam.replace(os.sep, '_')
refFilNam = os.path.splitext(mosFulFilNam)[0] + ".txt"
fmu_fil = os.path.join(data['ResultDirectory'],
self.getLibraryName(), data['FMUName'])
try:
# Get the new dependency
dep_new = fmi.get_dependencies(fmu_fil)
# Compare it with the stored results, and update the stored results if
# needed and requested by the user.
[updated_reference_data, ans] = self._compare_and_rewrite_fmu_dependencies(
dep_new, refDir, refFilNam, ans)
# Reset answer, unless it is set to Y or N
if not (ans == "Y" or ans == "N"):
ans = "-"
if updated_reference_data:
retVal = 1
except UnicodeDecodeError as e:
em = "UnicodeDecodeError: {}.\n".format(e)
em += "Output file of " + data['ScriptFile'] + " is excluded from unit tests.\n"
em += "The model appears to contain a non-asci character\n"
em += "in the comment of a variable, parameter or constant.\n"
em += "Check " + data['ScriptFile'] + " and the classes it instanciates.\n"
self._reporter.writeError(em)
except IOError as e:
em = "IOError({0}): {1}.\n".format(e.errno, e)
em += "Output file of " + data['ScriptFile'] + \
" is excluded from unit tests because\n"
em += "the file " + fmu_fil + " does not exist\n."
self._reporter.writeError(em)
return retVal
def _get_jmodelica_warnings(self, error_text, model):
""" Return a list with all JModelica warnings
"""
import re
lis = list()
# Search for all warnings
for k, v in list(self._error_dict.get_dictionary().items()):
# Search in each line of the error file
for lin in error_text:
# JModelica/ThirdParty/MSL/Modelica/Media/package.mo has errorneous each
# which we skip in our testing
if ("Ignoring erroneous 'each' for the modification ' = reference_X'" in lin) or \
("Ignoring erroneous 'each' for the modification ' = fill(0,0)'" in lin) or \
("""Ignoring erroneous 'each' for the modification ' = {","}'""" in lin):
break
# Ignore warnings of the form Iteration variable "der(xxx)" is missing start value!
# if re.search(r"""Iteration variable "der\(\S|.\)" is missing start value!""", lin):
# break
if v['tool_message'] in lin:
# Found a warning. Report it to the reporter, and add it to the list that will be written to
# the json file.
# self._reporter.writeWarning(v["model_message"].format(model))
msg = lin.strip(' \n')
self._reporter.writeWarning("{}: {}".format(model, msg))
lis.append(msg)
self._error_dict.increment_counter(k)
# Return a dictionary with all warnings
return lis
def _get_simulation_record(self, simulation_text):
""" Return total number of Jacobian evaluations, state events, and elapsed cpu time
when unit tests are run with OPTIMICA or JModelica
"""
jacobianNumber = 0
stateEvents = 0
elapsedTime = 0
for lin in simulation_text:
if ("Number of Jacobian evaluations" in lin):
temp = lin.split(":")
jacobianNumber = int(temp[1].strip())
if ("Number of state events" in lin):
temp = lin.split(":")
stateEvents = int(temp[1].strip())
if ("Elapsed simulation time" in lin):
temp = lin.split(":")
temp1 = temp[1].split()
elapsedTime = float(temp1[0])
res = {'jacobians': jacobianNumber,
'state_events': stateEvents,
'elapsed_time': elapsedTime}
return res
def _verify_jmodelica_runs(self):
""" Check the results of the OPTIMICA and JModelica tests.
This function returns 0 if no errors occurred,
or a positive non-zero number otherwise.
"""
iTra = 0
iSim = 0
iOmiSim = 0
# Iterate over directories
all_res = []
for d in self._temDir:
# Iterate over json files
# The python file have names such as class_class_class.py
for fil in glob.glob("{}{}*_*.py".format(d, os.path.sep)):
# Check if there is a corresponding json file
json_name = fil.replace(".py", "_buildingspy.json")
if not os.path.exists(json_name):
em = "Did not find {}. Is the program properly installed?".format(json_name)
stdOutFil = os.path.abspath('stdout')
if os.path.exists(stdOutFil):
with open(stdOutFil, 'r', encoding="utf-8-sig") as tem:
for lin in tem:
em = em + "**** stdout file: {}\n".format(lin)
em = em + "**** end of stdout file\n"
self._reporter.writeError(em)
iTra = iTra + 1
else:
with open(json_name, 'r', encoding="utf-8-sig") as json_file:
res = json.load(json_file)
# Get warnings from stdout that was captured from the compilation
if 'stdout' in res['translation']:
warnings = self._get_jmodelica_warnings(
error_text=res['translation']['stdout'],
model=res['model'])
res['translation']['warnings'] = warnings
# We don't need the stdout anymore, which can be long.
del res['translation']['stdout']
# Get number of Jacobian evaluations from stdout that was captured from
# the simulation
if 'stdout' in res['simulation']:
jmRecord = self._get_simulation_record(
simulation_text=res['simulation']['stdout'])
res['simulation']['jacobians'] = jmRecord['jacobians']
res['simulation']['state_events'] = jmRecord['state_events']
res['simulation']['elapsed_time'] = jmRecord['elapsed_time']
# We don't need the stdout anymore, which can be long.
del res['simulation']['stdout']
all_res.append(res)
if not res['translation']['success']:
em = "Translation of {} failed.".format(res['model'])
self._reporter.writeError(em)
iTra = iTra + 1
elif not res['simulation']['success']:
# Check if simulation was omitted based configuration.
if 'message' in res['simulation'] and \
res['simulation']['message'] == 'No simulation requested.':
# Write a message, except if this model is for FMU export only
# Get the info from the data structure that has the experiment
# specification.
mustExportFMU = False
model_name = res['model']
for ele in self._data:
if ele['model_name'] == model_name:
if ele['mustExportFMU']:
mustExportFMU = True
break
if not mustExportFMU:
# This is a model that usually should be simulated,
# and not only a model that need to be exported as an FMU
print("*** Did not simulate {}".format(res['model']))
iOmiSim = iOmiSim + 1
else:
em = "Simulation of {} failed with {}.".format(
res['model'], res["simulation"]["exception"])
self._reporter.writeError(em)
iSim = iSim + 1
if iTra > 0:
print("\nNumber of models that failed translation : {}".format(iTra))
if iSim > 0:
print("\nNumber of models that translated but failed simulation : {}".format(iSim))
if iOmiSim > 0:
print("\nNumber of models that configuration excluded from simulation : {}".format(iOmiSim))
# Write all results to simulator log file
with open(self._simulator_log_file, 'w', encoding="utf-8-sig") as sim_log:
sim_log.write("{}\n".format(json.dumps(all_res, indent=2, sort_keys=True)))
return self._writeSummaryMessages()
def _get_size_dir(self, start_path):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
def _checkReferencePoints(self, ans):
""" Check reference points from each regression test and compare it with the previously
saved reference points of the same test stored in the library home folder.
If all the reference points are not within a certain tolerance with the previous results,
show a warning message containing the file name and path.
If there is no ``.mat`` file of the reference points in the library home folder,
ask the user whether it should be generated.
This function returns ``1`` if reading reference results or reading the translation
statistics failed. In this case, the calling method should not attempt to do
further processing. The function returns ``0`` if there were no problems. In
case of wrong simulation results, this function also returns ``0``, as this is
not considered an error in executing this function.
"""
# Check if the directory
# "self._libHome\\Resources\\ReferenceResults\\Dymola" exists, if not
# create it.
refDir = os.path.join(self._libHome, 'Resources', 'ReferenceResults', 'Dymola')
if not os.path.exists(refDir):
os.makedirs(refDir)
ret_val = 0
for data_idx, data in enumerate(self._data):
# Only check data that need to be simulated. This excludes the FMU export
# from this test.
# Note for OPTIMICA and JModelica: data['jmodelica']['simulate']=True is
# an additional condition.
check_condition = self._includeFile(data['ScriptFile']) and data['mustSimulate']
if self._modelica_tool == 'optimica' or self._modelica_tool == 'jmodelica':
check_condition = check_condition and data[self._modelica_tool]['simulate']
if check_condition:
get_user_prompt = True
# Convert 'aa/bb.mos' to 'aa_bb.txt'
mosFulFilNam = os.path.join(self.getLibraryName(), data['ScriptFile'])
mosFulFilNam = mosFulFilNam.replace(os.sep, '_')
refFilNam = os.path.splitext(mosFulFilNam)[0] + ".txt"
try:
# extract simulation results from the ".mat" file corresponding to "filNam"
warnings = []
errors = []
# Get the simulation results
y_sim = self._getSimulationResults(data, warnings, errors)
# Get the translation statistics
if self._modelica_tool == 'dymola':
y_tra = self._getTranslationStatistics(data, warnings, errors)
else:
y_tra = None
for entry in warnings:
self._reporter.writeWarning(entry)
for entry in errors:
self._reporter.writeError(entry)
if len(errors) > 0:
# If there were errors when getting the results or translation statistics
# update self._comp_info to log errors and turn flags to return
matFilNam = data['ResultFile']
model_name = data['model_name']
idx = self._init_comp_info(model_name, matFilNam)
list_var_ref = [el for gr in data['ResultVariables'] for el in gr]
for iv, var_ref in enumerate(list_var_ref):
if iv == 0:
self._update_comp_info(
idx,
var_ref,
None,
False,
0,
'Translation, simulation or extracting simulation results failed. {}'.format(
'\n'.join(errors)),
data_idx)
else:
self._update_comp_info(idx, var_ref, None, False, 0, '', data_idx)
# flags to return
ret_val = 1
get_user_prompt = False
except UnicodeDecodeError as e:
em = "UnicodeDecodeError: {0}".format(e)
em += "Output file of " + data['ScriptFile'] + " is excluded from unit tests.\n"
em += "The model appears to contain a non-asci character\n"
em += "in the comment of a variable, parameter or constant.\n"
em += "Check " + data['ScriptFile'] + " and the classes it instanciates.\n"
self._reporter.writeError(em)
else:
# if there was no error for this test case, check user feedback for result
if get_user_prompt:
# Reset answer, unless it is set to Y or N
if not (ans == "Y" or ans == "N"):
ans = "-"
updateReferenceData = False
# check if reference results already exist in library
oldRefFulFilNam = os.path.join(refDir, refFilNam)
# If the reference file exists, and if the reference file contains
# results, compare the results.
if os.path.exists(oldRefFulFilNam):
# print('Found results for ' + oldRefFulFilNam)
[updateReferenceData, _, ans] = self._compareResults(
data_idx, oldRefFulFilNam, y_sim, y_tra, refFilNam, ans,
)
else:
noOldResults = []
# add all names since we do not have any reference results yet
for pai in y_sim:
t_ref = pai["time"]
noOldResults = noOldResults + list(pai.keys())
self._legacy_plot(y_sim, t_ref, {}, noOldResults, dict(),
"New results: " + data['ScriptFile'])
# Reference file does not exist
print(
"*** Warning: Reference file {} does not yet exist.".format(refFilNam))
while not (ans == "n" or ans == "y" or ans == "Y" or ans == "N"):
print(" Create new file?")
ans = input(
" Enter: y(yes), n(no), Y(yes for all), N(no for all): ")
if ans == "y" or ans == "Y":
updateReferenceData = True
else:
self._reporter.writeError("Did not write new reference file %s." %
oldRefFulFilNam)
if updateReferenceData: # If the reference data of any variable was updated
# Make dictionary to save the results and the svn information
self._writeReferenceResults(oldRefFulFilNam, y_sim, y_tra)
self._reporter.writeOutput("Wrote new reference file %s." %
oldRefFulFilNam)
else:
# Tests that export FMUs do not have an output file. Hence, we do not warn
# about these cases.
if not data['mustExportFMU']:
self._reporter.writeWarning(
"Output file of " + data['ScriptFile'] + " is excluded from result test.")
# Write all results to comparison log file and inform user.
with open(self._comp_log_file, 'w', encoding="utf-8-sig") as comp_log:
comp_log.write("{}\n".format(json.dumps(self._comp_info, indent=2, sort_keys=True)))
if self._comp_tool == 'funnel':
s = (
"Comparison files output by funnel are stored in the directory "
"'{}' of size {:.1f} MB.\nRun 'report' method of class 'Tester' "
"to access a summary of the comparison results.\n").format(
self._comp_dir,
self._get_size_dir(self._comp_dir) * 1e-6)
self._reporter.writeOutput(s)
return ret_val
def _performTranslationErrorChecks(self, logFil, stat):
with open(logFil, mode="rt", encoding="utf-8-sig") as fil:
lines = fil.readlines()
for k, v in list(self._error_dict.get_dictionary().items()):
stat[k] = 0
for line in lines:
# use regex to extract first group and sum them in stat
if 'is_regex' in v and v['is_regex']:
import re
m = re.search(v["tool_message"], line)
if m is not None:
stat[k] = stat[k] + int(m.group(1))
# otherwise, default: count the number of line occurences
else:
if v["tool_message"] in line:
stat[k] = stat[k] + 1
return stat
def _checkSimulationError(self, errorFile):
""" Check whether the simulation had any errors, and
write the error messages to ``self._reporter``.
"""
import json
# Read the json file with the statistics
if not os.path.isfile(self._statistics_log):
raise IOError("Statistics file {} does not exist.".format(self._statistics_log))
with open(self._statistics_log, mode="rt", encoding="utf-8-sig") as fil:
try:
stat = json.load(fil)['testCase']
except ValueError as e:
raise ValueError("Failed to parse {}.\n{}".format(self._statistics_log, str(e)))
# Error counters
iChe = 0
iCom = 0
iSim = 0
iFMU = 0
# Header for dump file
with open(self._failed_simulator_log_file, "w") as f:
f.write("Automatically generated BuildingsPy dump file for failed translations.\n\n")
# Check for errors
hasTranslationErrors = False
for ele in stat:
hasTranslationError = False
if 'check' in ele and ele['check']['result'] is False:
hasTranslationError = True
iChe = iChe + 1
self._reporter.writeError("Model check failed for '%s'." % ele["model"])
if 'simulate' in ele and ele['simulate']['result'] is False:
hasTranslationError = True
iSim = iSim + 1
self._reporter.writeError("Simulation failed for '%s'." %
ele["simulate"]["command"])
elif 'FMUExport' in ele and ele['FMUExport']['result'] is False:
iFMU = iFMU + 1
self._reporter.writeError("FMU export failed for '%s'." %
ele["FMUExport"]["command"])
# Check for problems.
# First, determine whether we had a simulation or an FMU export
if 'simulate' in ele:
key = 'simulate'
else:
key = 'FMUExport'
if key in ele:
logFil = ele[key]["translationLog"]
ele[key] = self._performTranslationErrorChecks(logFil, ele[key])
for k, v in list(self._error_dict.get_dictionary().items()):
# For OPTIMICA and JModelica, we neither have simulate nor FMUExport
if ele[key][k] > 0:
self._reporter.writeWarning(v["model_message"].format(ele[key]["command"]))
self._error_dict.increment_counter(k)
if hasTranslationError:
hasTranslationErrors = True
with open(self._failed_simulator_log_file, "a") as f:
f.write("===============================\n")
f.write("=====START OF NEW LOG FILE=====\n")
f.write("===============================\n")
with open(logFil, "r") as f2:
f.write(f2.read())
f.write("\n\n\n")
if iChe > 0:
print("Number of models that failed check : {}".format(iChe))
if iSim > 0:
print("Number of models that failed to simulate : {}".format(iSim))
if iFMU > 0:
print("Number of models that failed to export as an FMU : {}".format(iFMU))
if hasTranslationErrors:
print(
"Check or simulation failed, see {} for more details about the failed models.".format(
self._failed_simulator_log_file))
return self._writeSummaryMessages()
def _writeSummaryMessages(self, silent=True):
"""Write summary messages"""
for _, v in list(self._error_dict.get_dictionary().items()):
counter = v['counter']
if counter > 0 and not silent:
print(v['summary_message'].format(counter))
if not silent:
self._reporter.writeOutput(
"Script that runs unit tests had {} warnings and {} errors.\n".format(
self._reporter.getNumberOfWarnings(),
self._reporter.getNumberOfErrors(),
)
)
sys.stdout.write("See '{}' for details.\n".format(self._simulator_log_file))
if self._reporter.getNumberOfErrors() > 0:
retval = 1
elif self._reporter.getNumberOfWarnings() > 0:
retval = 2
else:
retval = 0
if not silent:
self._reporter.writeOutput("Unit tests completed successfully.\n")
sys.stdout.flush()
return retval
def get_number_of_tests(self):
""" Returns the number of regression tests that will be run for the current library and configuration.
Note: Needs to be run within the run method (where elements of self._data requiring no simulation
are first removed).
"""
return len(self._data)
def printNumberOfClasses(self):
""" Print the number of models, blocks and functions to the
standard output stream
"""
iMod = 0
iBlo = 0
iFun = 0
for root, _, files in os.walk(self._libHome):
pos = root.find('.svn' or '.git')
# skip .svn folders
if pos == -1:
for filNam in files:
# find .mo files
pos = filNam.find('.mo')
if pos > -1 and (root.find('Examples') == -1 or root.find('Validation') == -1):
# find classes that are not partial
filFulNam = os.path.join(root, filNam)
iMod = self._checkKey("model", filFulNam, iMod)
iBlo = self._checkKey("block", filFulNam, iBlo)
iFun = self._checkKey("function", filFulNam, iFun)
print("Number of models : {!s}".format(iMod))
print(" blocks : {!s}".format(iBlo))
print(" functions: {!s}".format(iFun))
def _getModelCheckCommand(self, mosFilNam):
""" Return lines that conduct a model check in pedantic mode.
:param mosFilNam: The name of the ``*.mos`` file
This function return a command of the form
``checkModel("Buildings.Controls.Continuous.Examples.LimPID")``
"""
def get_model_name(mosFil, line):
try:
iSta = line.index('\"') + 1
iEnd = line.index('\"', iSta)
return line[iSta:iEnd]
except ValueError as e:
em = str(e) + "\n"
em += "Did not find model name in '%s'\n" % mosFil
self._reporter.writeError(em)
raise ValueError(em)
retVal = None
with open(mosFilNam, mode="r+", encoding="utf-8-sig") as fil:
for lin in fil:
if "simulateModel" in lin or "modelToOpen" in lin:
if self._modelica_tool == 'dymola':
retVal = 'checkModel("{}")'.format(get_model_name(mosFilNam, lin))
elif self._modelica_tool == 'omc':
retVal = "checkModel({})".format(get_model_name(mosFilNam, lin))
break
return retVal
def _removePlotCommands(self, mosFilNam):
"""Remove all plot commands from the mos file.
:param mosFilNam: The name of the ``*.mos`` file
This function removes all plot commands from the file ``mosFilNam``.
This allows to work around a bug in Dymola 2012 which can cause an exception
from the Windows operating system, or which can cause Dymola to hang on Linux.
"""
with open(mosFilNam, mode="r+", encoding="utf-8-sig") as fil:
lines = fil.readlines()
linWri = []
goToPlotEnd = False
for i in range(len(lines)):
if not goToPlotEnd:
if (lines[i].count("removePlots(") == 0) and (lines[i].count("createPlot(") == 0):
linWri.append(i)
elif (lines[i].count("createPlot(")) > 0:
goToPlotEnd = True
else:
if (lines[i].count(";") > 0):
goToPlotEnd = False
# Write file
with open(mosFilNam, mode="w", encoding="utf-8") as filWri:
for i in range(len(linWri)):
filWri.write(lines[linWri[i]])
def _write_runscripts(self):
"""Create the runAll.mos scripts, one per processor (self._nPro).
The commands in the script depend on the tool: 'dymola', 'optimica', 'jmodelica' or 'omc'
"""
import platform
def _write_translation_stats(runFil, values):
# Close the bracket for the JSON object
runFil.write("""Modelica.Utilities.Streams.print(" }", """
+ '"' + values['statisticsLog'] + '"' + ");\n")
def _print_end_of_json(isLastItem, fileHandle, logFileName):
if isLastItem:
fileHandle.write(
"Modelica.Utilities.Streams.print(\" }\", \"%s\")\n" % logFileName)
fileHandle.write(
"Modelica.Utilities.Streams.print(\" ]\", \"%s\")\n" % logFileName)
fileHandle.write("Modelica.Utilities.Streams.print(\"}\", \"%s\")\n" % logFileName)
else:
fileHandle.write(
"Modelica.Utilities.Streams.print(\" },\", \"%s\")\n" % logFileName)
nUniTes = 0
# Count how many tests need to be simulated.
nTes = self.get_number_of_tests()
# Reduced the number of processors if there are fewer examples than processors
if nTes < self._nPro:
self.setNumberOfThreads(nTes)
# For files that do not require a simulation, we need to set the path of the result files.
# Not useful anymore since _write_runscripts is called only after the files that do not require
# a simulation have already been removed from self_data (see run method).
# for dat in self._data:
# if not dat['mustSimulate'] and not dat['mustExportFMU']:
# matFil = dat['ResultFile']
# for allDat in self._data:
# if allDat['mustSimulate']:
# resFil = allDat['ResultFile']
# if resFil == matFil:
# dat['ResultDirectory'] = allDat['ResultDirectory']
# break
for iPro in range(self._nPro):
###################################################################################
# Case for dymola and omc
###################################################################################
if self._modelica_tool in ['dymola', 'omc']:
runFil = open(os.path.join(self._temDir[iPro], self.getLibraryName(
), "runAll.mos"), mode="w", encoding="utf-8")
runFil.write(
"// File autogenerated for process {!s} of {!s}\n".format(iPro + 1, self._nPro))
runFil.write(
"// File created for execution by {}. Do not edit.\n".format(self._modelica_tool))
if self._modelica_tool == 'dymola':
# Disable parallel computing as this can give slightly different results.
runFil.write('Advanced.ParallelizeCode = false;\n')
# Default values for options that can give slightly different results.
runFil.write('Evaluate=false;\n')
runFil.write('Advanced.CompileWith64=2;\n')
runFil.write('Advanced.EfficientMinorEvents=false;\n')
# Set the pedantic Modelica mode
if self._pedanticModelica:
runFil.write('Advanced.PedanticModelica = true;\n')
else:
runFil.write('Advanced.PedanticModelica = false;\n')
# Flag that can change the simulation statistics
runFil.write(
'orig_Advanced_GenerateVariableDependencies = Advanced.GenerateVariableDependencies;\n')
runFil.write('Advanced.GenerateVariableDependencies = false;\n')
# Deactivate DDE
if platform.system() == "Windows":
runFil.write('// Deactivate DDE\n')
runFil.write(' (comp, sett) = GetDymolaCompiler();\n')
posDDE = "9" # At position 9 DDE settings should be stored.
runFil.write(' DDE_orig = sett[{}];\n'.format(posDDE))
runFil.write(' sett[{}] = \"DDE=0\"; // Disable DDE.\n'.format(posDDE))
runFil.write(' SetDymolaCompiler(comp, sett);\n')
runFil.write(
('cd(\"{}/{}\");\n'.format(self._temDir[iPro], self.getLibraryName())).replace("\\", "/"))
runFil.write('openModel("package.mo");\n')
elif self._modelica_tool == 'omc':
runFil.write('loadModel(Modelica, {"3.2"});\n')
runFil.write('getErrorString();\n')
runFil.write('loadFile("package.mo");\n')
# Add a flag so that translation info appears in console output.
# This allows checking for numerical derivatives.
# Dymola will write this output to a file when savelog(filename) is called.
# However, the runtime log will be in dslog.txt.
if self._modelica_tool == 'dymola':
runFil.write("Advanced.TranslationInCommandLog := true;\n")
# Set flag to support string parameters, which is required for the weather
# data file.
runFil.write("Modelica.Utilities.Files.remove(\"%s\");\n" %
self._simulator_log_file)
runFil.write("Modelica.Utilities.Files.remove(\"%s\");\n" % self._statistics_log)
runFil.write(r"""
Modelica.Utilities.Streams.print("{\"testCase\" : [", "%s");
""" % self._statistics_log)
# Count the number of experiments that need to be simulated or exported as an FMU.
# This is needed to properly close the json brackets.
nItem = 0
for i in range(iPro, nTes, self._nPro):
if self._data[i]['mustSimulate'] or self._data[i]['mustExportFMU']:
nItem = nItem + 1
iItem = 0
# Write unit tests for this process
for i in range(iPro, nTes, self._nPro):
# Check if this mos file should be simulated
if self._data[i]['mustSimulate'] or self._data[i]['mustExportFMU']:
isLastItem = (iItem == nItem - 1)
self._data[i]['ResultDirectory'] = self._temDir[iPro]
mosFilNam = os.path.join(self.getLibraryName(),
"Resources", "Scripts", "Dymola",
self._data[i]['ScriptFile'])
absMosFilNam = os.path.join(self._temDir[iPro], mosFilNam)
values = {
"mosWithPath": mosFilNam.replace(
"\\",
"/"),
"checkCommand": self._getModelCheckCommand(absMosFilNam).replace(
"\\",
"/"),
"checkCommandString": self._getModelCheckCommand(absMosFilNam).replace(
'\"',
r'\\\"'),
"scriptFile": self._data[i]['ScriptFile'].replace(
"\\",
"/"),
"model_name": self._data[i]['model_name'].replace(
"\\",
"/"),
"model_name_underscore": self._data[i]['model_name'].replace(
".",
"_"),
"start_time": self._data[i]['startTime'] if 'startTime' in self._data[i] else 0,
"final_time": self._data[i]['stopTime'] if 'stopTime' in self._data[i] else 0,
"statisticsLog": self._statistics_log.replace(
"\\",
"/"),
"translationLog": os.path.join(
self._temDir[iPro],
self.getLibraryName(),
self._data[i]['model_name'] +
".translation.log").replace(
"\\",
"/"),
"simulatorLog": self._simulator_log_file.replace(
"\\",
"/")}
if 'FMUName' in self._data[i]:
values["FMUName"] = self._data[i]['FMUName']
if self._modelica_tool == 'dymola':
# Delete command log, model_name.simulation.log and dslog.txt
runFil.write(
"Modelica.Utilities.Files.remove(\"%s.translation.log\");\n" %
values["model_name"])
runFil.write("Modelica.Utilities.Files.remove(\"dslog.txt\");\n")
runFil.write("clearlog();\n")
if self._modelica_tool == 'omc':
runFil.write('getErrorString();\n')
########################################################################
# Write line for model check
if self._modelica_tool == 'dymola':
model_name = values["model_name"]
if model_name.startswith(
"Obsolete.", model_name.find(".") + 1):
# This model is in IBPSA.Obsolete, or Buildings.Obsolete etc.
values["set_non_pedantic"] = "Advanced.PedanticModelica = false;\n"
values["set_pedantic"] = "Advanced.PedanticModelica = true;\n"
else: # Set to empty string as for non-obsolete models, we don't switch to non-pedantic mode
values["set_non_pedantic"] = ""
values["set_pedantic"] = ""
template = r"""
{set_non_pedantic}
rCheck = {checkCommand};
{set_pedantic}
Modelica.Utilities.Streams.print(" {{ \"file\" : \"{mosWithPath}\",", "{statisticsLog}");
Modelica.Utilities.Streams.print(" \"model\" : \"{model_name}\",", "{statisticsLog}");
Modelica.Utilities.Streams.print(" \"check\" : {{", "{statisticsLog}");
Modelica.Utilities.Streams.print(" \"command\" : \"{checkCommandString};\",", "{statisticsLog}");
Modelica.Utilities.Streams.print(" \"result\" : " + String(rCheck), "{statisticsLog}");
Modelica.Utilities.Streams.print(" }},", "{statisticsLog}");
"""
runFil.write(template.format(**values))
##########################################################################
# Write commands for checking translation and simulation results.
if self._modelica_tool == 'dymola' and self._data[i]["mustSimulate"]:
# Remove dslog.txt, run a simulation, rename dslog.txt, and
# scan this log file for errors.
# This is needed as RunScript returns true even if the simulation failed.
# We read to dslog file line by line as very long files can lead to
# Out of memory for strings
# It could due to too large matrices, infinite recursion, or uninitialized variables.
# You can increase the size of 'Stringbuffer' in dymola/source/matrixop.h.
# The stack of functions is:
# Modelica.Utilities.Streams.readFile
template = r"""
{set_non_pedantic}
rScript=RunScript("Resources/Scripts/Dymola/{scriptFile}");
{set_pedantic}
savelog("{model_name}.translation.log");
if Modelica.Utilities.Files.exist("dslog.txt") then
Modelica.Utilities.Files.move("dslog.txt", "{model_name}.dslog.log");
end if;
iSuc=0;
intTimRec="temp";
timRecCol=0;
timRecSpa=0;
intTim="0";
jacRec="temp";
jacRecCol=0;
jacRecLen=0;
numJac="0";
staRec="temp";
staRecCol=0;
staRecLen=0;
numSta="0";
if Modelica.Utilities.Files.exist("{model_name}.dslog.log") then
iLin=1;
endOfFile=false;
while (not endOfFile) loop
(_line, endOfFile)=Modelica.Utilities.Streams.readLine("{model_name}.dslog.log", iLin);
iLin=iLin+1;
iSuc=iSuc+Modelica.Utilities.Strings.count(_line, "Integration terminated successfully");
if (Modelica.Utilities.Strings.find(_line, "CPU-time for integration") > 0) then
intTimRec = _line;
end if;
if (Modelica.Utilities.Strings.find(_line, "Number of Jacobian-evaluations") > 0) then
jacRec = _line;
end if;
if (Modelica.Utilities.Strings.find(_line, "Number of state events") > 0) then
staRec = _line;
break;
end if;
end while;
if iSuc > 0 then
if not Modelica.Utilities.Strings.isEqual(intTimRec,"temp") then
timRecCol = Modelica.Utilities.Strings.find(intTimRec, ":");
timRecSpa = Modelica.Utilities.Strings.findLast(intTimRec, " ");
intTim = Modelica.Utilities.Strings.substring(intTimRec, timRecCol+1, timRecSpa-1);
end if;
if not Modelica.Utilities.Strings.isEqual(jacRec,"temp") then
jacRecCol = Modelica.Utilities.Strings.find(jacRec, ":");
jacRecLen = Modelica.Utilities.Strings.length(jacRec);
numJac = Modelica.Utilities.Strings.substring(jacRec, jacRecCol+1, jacRecLen);
end if;
if not Modelica.Utilities.Strings.isEqual(staRec,"temp") then
staRecCol = Modelica.Utilities.Strings.find(staRec, ":");
staRecLen = Modelica.Utilities.Strings.length(staRec);
numSta = Modelica.Utilities.Strings.substring(staRec, staRecCol+1, staRecLen);
end if;
end if;
Modelica.Utilities.Streams.close("{model_name}.dslog.log");
else
Modelica.Utilities.Streams.print("{model_name}.dslog.log was not generated.", "{model_name}.log");
end if;
"""
runFil.write(template.format(**values))
template = r"""
Modelica.Utilities.Streams.print(" \"simulate\" : {{", "{statisticsLog}");
Modelica.Utilities.Streams.print(" \"command\" : \"RunScript(\\\"Resources/Scripts/Dymola/{scriptFile}\\\");\",", "{statisticsLog}");
Modelica.Utilities.Streams.print(" \"translationLog\" : \"{translationLog}\",", "{statisticsLog}");
Modelica.Utilities.Streams.print(" \"elapsed_time\" :" + intTim + ",", "{statisticsLog}");
Modelica.Utilities.Streams.print(" \"jacobians\" :" + numJac + ",", "{statisticsLog}");
Modelica.Utilities.Streams.print(" \"state_events\" :" + numSta + ",", "{statisticsLog}");
Modelica.Utilities.Streams.print(" \"start_time\" :" + String({start_time}) + ",", "{statisticsLog}");
Modelica.Utilities.Streams.print(" \"final_time\" :" + String({final_time}) + ",", "{statisticsLog}");
Modelica.Utilities.Streams.print(" \"result\" : " + String(iSuc > 0), "{statisticsLog}");
"""
runFil.write(template.format(**values))
_write_translation_stats(runFil, values)
_print_end_of_json(isLastItem,
runFil,
self._statistics_log)
##########################################################################
# FMU export
if self._modelica_tool == 'dymola' and self._data[i]["mustExportFMU"]:
template = r"""
Modelica.Utilities.Files.removeFile("{FMUName}");
RunScript("Resources/Scripts/Dymola/{scriptFile}");
savelog("{model_name}.translation.log");
if Modelica.Utilities.Files.exist("dslog.txt") then
Modelica.Utilities.Files.move("dslog.txt", "{model_name}.dslog.log");
end if;
iSuc=0;
if Modelica.Utilities.Files.exist("{model_name}.dslog.log") then
iLin=1;
endOfFile=false;
while (not endOfFile) loop
(_line, endOfFile)=Modelica.Utilities.Streams.readLine("{model_name}.dslog.log", iLin);
iLin=iLin+1;
iSuc=iSuc+Modelica.Utilities.Strings.count(_line, "Created {FMUName}");
end while;
Modelica.Utilities.Streams.close("{model_name}.dslog.log");
else
Modelica.Utilities.Streams.print("{model_name}.dslog.log was not generated.", "{model_name}.log");
end if;
"""
runFil.write(template.format(**values))
template = r"""
Modelica.Utilities.Streams.print(" \"FMUExport\" : {{", "{statisticsLog}");
Modelica.Utilities.Streams.print(" \"command\" :\"RunScript(\\\"Resources/Scripts/Dymola/{scriptFile}\\\");\",", "{statisticsLog}");
Modelica.Utilities.Streams.print(" \"translationLog\" : \"{translationLog}\",", "{statisticsLog}");
Modelica.Utilities.Streams.print(" \"result\" : " + String(iSuc > 0), "{statisticsLog}");
"""
runFil.write(template.format(**values))
_write_translation_stats(runFil, values)
_print_end_of_json(isLastItem,
runFil,
self._statistics_log)
elif self._modelica_tool == 'omc':
template("""
runScript("Resources/Scripts/Dymola/{scriptFile}");
getErrorString();
""")
runFil.write(template.format(**values))
if self._modelica_tool == 'dymola' and not (
self._data[i]["mustExportFMU"] or self._data[i]["mustSimulate"]):
print(
"****** {} neither requires a simulation nor an FMU export.".format(self._data[i]['ScriptFile']))
self._removePlotCommands(absMosFilNam)
nUniTes = nUniTes + 1
iItem = iItem + 1
if self._modelica_tool == 'dymola' and platform.system() == 'Windows':
# Reset DDE to original settings
runFil.write('// Reset DDE settings like before\n')
runFil.write(' sett[{}] = DDE_orig;\n'.format(posDDE))
runFil.write(' SetDymolaCompiler(comp, sett);\n')
# Reset Advanced flag
runFil.write(
'Advanced.GenerateVariableDependencies = orig_Advanced_GenerateVariableDependencies;\n')
runFil.write("exit();\n")
runFil.close()
###################################################################################
# Case for OPTIMICA and JModelica
###################################################################################
elif self._modelica_tool == 'optimica' or self._modelica_tool == 'jmodelica':
data = []
for i in range(iPro, nTes, self._nPro):
# Store ResultDirectory into data dict.
self._data[i]['ResultDirectory'] = self._temDir[iPro]
# Copy data used for this process only.
data.append(self._data[i])
nUniTes = nUniTes + 1
self._write_jmodelica_runfile(self._temDir[iPro], data)
print("Generated {} regression tests.\n".format(nUniTes))
@staticmethod
def _get_set_of_result_variables(list_of_result_variables):
s = set()
for ent in list_of_result_variables:
for ele in ent:
s.add(ele)
return s
def _write_jmodelica_runfile(self, directory, data):
""" Write the OPTIMICA or JModelica runfile for all experiments in data.
:param directory: The name of the directory where the files will be written.
:param data: A list with the data for the experiments.
"""
import inspect
import buildingspy.development.regressiontest as r
import jinja2
# Copy only models that need to be translated
tra_data = []
for dat in data:
if dat[self._modelica_tool]['translate']:
tra_data.append(dat)
path_to_template = os.path.dirname(inspect.getfile(r))
env = jinja2.Environment(loader=jinja2.FileSystemLoader(path_to_template))
with open(os.path.join(directory, "run.py"), mode="w", encoding="utf-8") as fil:
models_underscore = []
for dat in tra_data:
models_underscore.append(dat['model_name'].replace(".", "_"))
template = env.get_template("{}_run_all.template".format(self._modelica_tool))
txt = template.render(models_underscore=sorted(models_underscore))
# for the special case that no models need to be translated (for this process)
# we need to add a python command. Otherwise the python file is not valid.
if (len(tra_data) == 0):
txt += " import os;\n"
fil.write(txt)
tem_mod = env.get_template("{}_run.template".format(self._modelica_tool))
for dat in tra_data:
model = dat['model_name']
# Filter the result variables
if 'ResultVariables' in dat:
result_variables = list(self._get_set_of_result_variables(dat['ResultVariables']))
else:
result_variables = list()
# Set relative tolerance
if 'rtol' not in dat[self._modelica_tool]:
# User did not set tolerance, use the one from the .mo file
if 'tolerance' in dat:
dat[self._modelica_tool]['rtol'] = dat['tolerance']
else:
dat[self._modelica_tool]['rtol'] = 1E-6
# Note that if dat['mustSimulate'] == false, then only the FMU export is tested, but no
# simulation should be done.
# filter argument must respect glob syntax ([ is escaped with []]) + JModelica mat file
# stores matrix variables with no space e.g. [1,1].
txt = tem_mod.render(
model=model,
ncp=dat[self._modelica_tool]['ncp'],
rtol=dat[self._modelica_tool]['rtol'],
solver=dat[self._modelica_tool]['solver'],
start_time='mod.get_default_experiment_start_time()',
final_time='mod.get_default_experiment_stop_time()',
simulate=dat[self._modelica_tool]['simulate'] and dat['mustSimulate'],
time_out=dat[self._modelica_tool]['time_out'],
generate_html_diagnostics=False,
debug_solver=False,
debug_solver_interactive_mode=False,
filter=[re.sub(r'\[|\]',
lambda m: '[{}]'.format(m.group()),
re.sub(' ', '', x)) for x in result_variables]
)
file_name = os.path.join(directory, "{}.py".format(model.replace(".", "_")))
with open(file_name, mode="w", encoding="utf-8") as fil:
fil.write(txt)
shutil.copyfile(
os.path.join(
os.path.dirname(__file__),
"..",
"simulate",
"OutputGrabber.py"),
os.path.join(
directory,
"OutputGrabber.py"))
def deleteTemporaryDirectories(self, delete):
""" Flag, if set to ``False``, then the temporary directories will not be deleted
after the regression tests are run.
:param delete: Flag, set to ``False`` to avoid the temporary directories to be deleted.
Unless this method is called prior to running the regression tests with ``delete=False``,
all temporary directories will be deleted after the regression tests.
"""
self._deleteTemporaryDirectories = delete
# Create the list of temporary directories that will be used to run the unit tests
def _setTemporaryDirectories(self):
self._temDir = []
# Make temporary directory, copy library into the directory and
# write run scripts to directory
for iPro in range(self._nPro):
# print("Calling parallel loop for iPro={}, self._nPro={}".format(iPro, self._nPro))
dirNam = tempfile.mkdtemp(
prefix='tmp-' + self.getLibraryName() + '-' + str(iPro) + "-")
self._temDir.append(dirNam)
# Directory that contains the library as a sub directory
libDir = self._libHome
shutil.copytree(
libDir,
os.path.join(
dirNam,
self.getLibraryName()),
symlinks=True,
ignore=shutil.ignore_patterns(
'.svn',
'.git',
'*.mat',
'*.log',
'request.',
'status.',
'dsmodel.c',
'dymosim',
'tmp-*',
'funnel-comp',
'fmi-library', # Not all of src is excluded as some .mo models link to files from src
'Documentation',
'ReferenceResults',
'help',
'compareVars',
'__pychache__'))
return
def _run_simulation_info(self):
""" Extract simulation data from statistics.json when run unit test with dymola
"""
with open(self._statistics_log, 'r') as f:
staVal = simplejson.loads(f.read())
data = []
for case in staVal['testCase']:
if 'FMUExport' not in case:
temp = {}
temp['model'] = case['model']
temp['simulation'] = {}
temp['simulation']['elapsed_time'] = case['simulate']['elapsed_time']
temp['simulation']['start_time'] = case['simulate']['start_time']
temp['simulation']['final_time'] = case['simulate']['final_time']
temp['simulation']['jacobians'] = case['simulate']['jacobians']
temp['simulation']['state_events'] = case['simulate']['state_events']
temp['simulation']['success'] = case['simulate']['result']
data.append(temp)
dataJson = simplejson.dumps(data)
return dataJson
def run(self):
""" Run all regression tests and checks the results.
:return: 0 if no errors and no warnings occurred during the regression tests,
otherwise a non-zero value.
This method
- creates temporary directories for each processors,
- copies the directory ``CURRENT_DIRECTORY`` into these
temporary directories,
- creates run scripts that run all regression tests,
- runs these regression tests,
- collects the dymola log files from each process,
- writes the combined log file ``unitTests-x.log``
to the current directory, where `x` is the name of the
Modelica tool,
- for Dymola, compares the results of the new simulations with
reference results that are stored in ``Resources/ReferenceResults``,
- writes the message `Regression tests completed successfully.`
if no error occured,
- returns 0 if no errors and no warnings occurred, or non-zero otherwise.
"""
self.checkPythonModuleAvailability()
if self.get_number_of_tests() == 0:
self.setDataDictionary(self._rootPackage)
# (Delete and) Create directory for storing funnel data.
if self._comp_tool == 'funnel':
shutil.rmtree(self._comp_dir, ignore_errors=True)
os.makedirs(self._comp_dir)
# Reset the number of processors to use no more processors than there are
# examples to be run
self.setNumberOfThreads(min(multiprocessing.cpu_count(),
self.get_number_of_tests(), self._nPro))
retVal = 0
# Start timer
startTime = time.time()
# Process command line arguments
# Check if executable is on the path
if not self._useExistingResults:
exe_com = self.getModelicaCommand()
if not self.isExecutable(exe_com):
print("Error: Did not find executable '{}'".format(exe_com))
return 3
# Check current working directory
if not self.isValidLibrary(self._libHome):
print("*** {} is not a valid Modelica library.".format(self._libHome))
print("*** The current directory is {}".format(os.getcwd()))
print(
"*** Expected directory {} ".format(
os.path.abspath(
os.path.join(
self._libHome,
"Resources",
"Scripts"))))
print("*** Exit with error. Did not do anything.")
return 2
# Initialize data structure to check results
self._initialize_error_dict()
# Inform the user if regression tests are skipped
if self._skip_verification:
self._reporter.writeOutput(
"Time series of simulation results will not be verified.")
# Print number of processors
print("Using {!s} of {!s} processors to run unit tests for {!s}.".format(
self._nPro,
multiprocessing.cpu_count(),
self._modelica_tool))
# Count number of classes
self.printNumberOfClasses()
# Run simulations
if not self._useExistingResults:
self._setTemporaryDirectories()
tem_dir = []
libNam = self.getLibraryName()
for di in self._temDir:
if self._modelica_tool == 'optimica' or self._modelica_tool == "jmodelica":
tem_dir.append(di)
else:
tem_dir.append(os.path.join(di, libNam))
self._write_runscripts()
if not self._useExistingResults:
if self._modelica_tool == 'dymola':
if self._showGUI:
cmd = [self.getModelicaCommand(), "runAll.mos"]
else:
cmd = [self.getModelicaCommand(), "runAll.mos", "/nowindow"]
elif self._modelica_tool == 'omc':
cmd = [self.getModelicaCommand(), "runAll.mos"]
elif self._modelica_tool == 'optimica' or self._modelica_tool == 'jmodelica':
cmd = [self.getModelicaCommand(), "run.py"]
if self._nPro > 1:
po = multiprocessing.Pool(self._nPro)
po.map(functools.partial(runSimulation,
cmd=cmd),
[x for x in tem_dir])
po.close()
po.join()
else:
if len(self._data) > 0:
runSimulation(tem_dir[0], cmd)
# Concatenate simulator output files into one file
with open(self._simulator_log_file, mode="w", encoding="utf-8") as logFil:
for d in self._temDir:
for temLogFilNam in glob.glob(
os.path.join(
d,
self.getLibraryName(),
'*.translation.log')):
if os.path.exists(temLogFilNam):
with open(temLogFilNam, mode="r", encoding="utf-8-sig") as fil:
data = fil.read()
logFil.write(data)
else:
self._reporter.writeError(
"Log file '" + temLogFilNam + "' does not exist.\n")
retVal = 1
# Concatenate simulator statistics into one file
if self._modelica_tool == 'dymola' or self._modelica_tool == 'omc':
with open(self._statistics_log, mode="w", encoding="utf-8") as logFil:
stat = list()
for d in self._temDir:
temLogFilNam = os.path.join(d, self.getLibraryName(), self._statistics_log)
if os.path.exists(temLogFilNam):
with open(temLogFilNam.replace('Temp\tmp', 'Temp\\tmp'), mode="r", encoding="utf-8-sig") as temSta:
try:
cas = json.load(temSta)["testCase"]
# Iterate over all test cases of this output file
for ele in cas:
stat.append(ele)
except ValueError as e:
self._reporter.writeError(
"Decoding '%s' failed: %s" % (temLogFilNam, e))
raise
else:
self._reporter.writeError(
"Log file '" + temLogFilNam + "' does not exist.\n")
retVal = 1
# Dump an array of testCase objects
# dump to a string first using json.dumps instead of json.dump
json_string = json.dumps({"testCase": stat},
ensure_ascii=False,
indent=4,
separators=(',', ': '),
sort_keys=True)
logFil.write(json_string)
# check logfile if omc
if self._modelica_tool == 'omc':
self._analyseOMStats(filename=self._simulator_log_file,
nModels=self.get_number_of_tests())
# Check reference results
if self._batch:
ans = "N"
else:
ans = "-"
if self._modelica_tool == 'dymola':
retVal = self._check_fmu_statistics(ans)
if retVal != 0:
retVal = 4
if retVal == 0:
retVal = self._checkSimulationError(self._simulator_log_file)
else:
self._checkSimulationError(self._simulator_log_file)
if not self._skip_verification:
# For Dymola: store available simulation info into
# self._comp_info used for reporting.
val = self._run_simulation_info()
self._comp_info = simplejson.loads(val)
r = self._checkReferencePoints(ans)
if r != 0: # In case of comparison error. Comparison warnings are handled
if retVal != 0: # We keep the translation or simulation error code.
pass
else:
retVal = 4
if self._modelica_tool == 'optimica' or self._modelica_tool == 'jmodelica':
if retVal == 0:
retVal = self._verify_jmodelica_runs()
else:
self._verify_jmodelica_runs()
if not self._skip_verification:
# For OPTIMICA and JModelica: store available translation and simulation info
# into self._comp_info used for reporting.
with open(self._simulator_log_file, 'r') as f:
self._comp_info = simplejson.loads(f.read())
r = self._checkReferencePoints(ans='N')
if r != 0:
if retVal != 0: # We keep the translation or simulation error code.
pass
else:
retVal = 4
# Update exit code after comparing with reference points
# and print summary messages.
if retVal == 0:
retVal = self._writeSummaryMessages(silent=False)
else: # We keep the translation or simulation error code.
self._writeSummaryMessages(silent=False)
# Delete temporary directories, or write message that they are not deleted
for d in self._temDir:
if self._deleteTemporaryDirectories:
shutil.rmtree(d)
else:
print("Did not delete temporary directory {}".format(d))
# Print list of files that may be excluded from unit tests
if len(self._exclude_tests) > 0:
print("*** Warning: The following files may be excluded from the regression tests:\n")
for fil in self._exclude_tests:
print(" {}".format(fil))
# Print time
elapsedTime = time.time() - startTime
print("Execution time = {:.3f} s".format(elapsedTime))
# Delete statistics file
if self._modelica_tool == 'dymola':
os.remove(self._statistics_log)
return retVal
def _get_test_models(self, folder=None, packages=None):
"""
Return a list with the full path of test models that were found in ``packages``.
:param folder: The path to the library to be searched.
:param packages: The names of packages containing test models, such as ``Examples`` and ``Tests``
:return: A list with the full paths to the ``.mo`` files of the found models.
"""
if folder is None:
folder = self._temDir[0]
res = []
for root, __, paths in os.walk(folder):
# check if this root has to be analysed
if packages is None:
checkroot = True
elif os.path.split(root)[-1] in packages:
checkroot = True
else:
checkroot = False
if checkroot:
# take the path if it's a model
for path in paths:
if path.endswith('.mo') and not path.endswith('package.mo'):
res.append(os.path.join(root, path))
return res
def _model_from_mo(self, mo_file):
"""Return the model name from a .mo file"""
# split the path of the mo_file
splt = mo_file.split(os.sep)
# find the root of the library name
root = splt.index(self.getLibraryName())
# recompose but with '.' instead of path separators
model = '.'.join(splt[root:])
# remove the '.mo' at the end
return model[:-3]
def _writeOMRunScript(self, worDir, models, cmpl, simulate):
"""
Write an OpenModelica run script to test model compliance
:param: wordir: path to working directory
:param: models is a list of model names, typically obtained from
:func:`~buildingspy.regressiontest.Tester._get_test_models`
:param: cmpl, simulate: booleans specifying if the models have to be
compiled and simulated respectively.
"""
mosfilename = os.path.join(worDir, 'OMTests.mos')
with open(mosfilename, mode="w", encoding="utf-8") as mosfile:
# preamble
mosfile.write(
"//Automatically generated script for testing model compliance with OpenModelica.\n")
mosfile.write("loadModel(Modelica, {\"3.2\"});\n")
mosfile.write("getErrorString();\n")
mosfile.write("loadModel({});\n\n".format(self.getLibraryName()))
# one line per model
comp = ['checkModel(' + m + '); getErrorString();\n' for m in models]
sim = ['simulate(' + m + '); getErrorString();\n' for m in models]
for c, s in zip(comp, sim):
if cmpl:
mosfile.write(c)
if simulate:
mosfile.write(s)
self._reporter.writeOutput('OpenModelica script {} created'.format(mosfilename))
return mosfilename
def test_OpenModelica(self, cmpl=True, simulate=False,
packages=['Examples'], number=-1):
"""
Test the library compliance with OpenModelica.
This is the high-level method to test a complete library, even if there
are no specific ``.mos`` files in the library for regression testing.
This method sets self._nPro to 1 as it only works on a single core. It also
executes self.setTemporaryDirectories()
:param cpml: Set to ``True`` for the model to be compiled.
:param simulate: Set to ``True`` to cause the model to be simulated (from 0 to 1s).
:param packages: Set to a list whose elements are the packages that contain the test models of the
library
:param number: Number of models to test. Set to ``-1`` to test all models.
Usage:
1. In a python console or script, cd to the root folder of the library
>>> t = Tester()
>>> t.test_OpenModelica() # doctest: +SKIP
OpenModelica script ...OMTests.mos created
Logfile created: ...OMTests.log
Starting analysis of logfile
<BLANKLINE>
<BLANKLINE>
######################################################################
Tested 5 models:
* 0 compiled successfully (=0.0%)
<BLANKLINE>
Successfully checked models:
Failed model checks:
* BuildingsPy.buildingspy.tests.MyModelicaLibrary.Examples.BooleanParameters
* BuildingsPy.buildingspy.tests.MyModelicaLibrary.Examples.Constants
* BuildingsPy.buildingspy.tests.MyModelicaLibrary.Examples.MyStep
* BuildingsPy.buildingspy.tests.MyModelicaLibrary.Examples.ParameterEvaluation
* BuildingsPy.buildingspy.tests.MyModelicaLibrary.Obsolete.Examples.Constant
<BLANKLINE>
More detailed information is stored in self._omstats
######################################################################
"""
import shutil
import subprocess
# fixme: Why is there a number as an argument?
# Isn't it sufficient to select the package to be tested?
if number < 0:
number = int(1e15)
self.setNumberOfThreads(1)
self._setTemporaryDirectories()
worDir = self._temDir[0]
# return a list with pathnames of the .mo files to be tested
tests = self._get_test_models(packages=packages)
if len(tests) == 0:
raise RuntimeError("Did not find any examples to test.")
self._ommodels = sorted([self._model_from_mo(mo_file) for mo_file in tests[:number]])
mosfile = self._writeOMRunScript(worDir=worDir, models=self._ommodels,
cmpl=cmpl, simulate=simulate)
env = os.environ.copy() # will be passed to the subprocess.Popen call
# Check whether OPENMODELICALIBRARY is set.
# If it is not set, try to use /usr/lib/omlibrary if it exists.
# if it does not exist, stop with an error.
if 'OPENMODELICALIBRARY' in env:
# append worDir
env['OPENMODELICALIBRARY'] += os.pathsep + worDir
else:
if os.path.exists('/usr/lib/omlibrary'):
env['OPENMODELICALIBRARY'] = worDir + ':/usr/lib/omlibrary'
else:
raise OSError(
"Environment flag 'OPENMODELICALIBRARY' must be set, or '/usr/lib/omlibrary' must be present.")
# get the executable for omc, depending on platform
if sys.platform == 'win32':
try:
omc = os.path.join(env['OPENMODELICAHOME'], 'bin', 'omc')
except KeyError:
raise OSError("Environment flag 'OPENMODELICAHOME' must be set")
else:
# we suppose the omc executable is known
omc = 'omc'
try:
logFilNam = mosfile.replace('.mos', '.log')
with open(logFilNam, mode="w", encoding="utf-8") as logFil:
retcode = subprocess.Popen(args=[omc, '+d=initialization', mosfile],
stdout=logFil,
stderr=logFil,
shell=False,
env=env,
cwd=worDir).wait()
if retcode != 0:
print("Child was terminated by signal {}".format(retcode))
return retcode
except OSError as e:
raise OSError("Execution of omc +d=initialization " + mosfile + " failed.\n"
+ "Working directory is '" + worDir + "'.")
else:
# process the log file
print("Logfile created: {}".format(logFilNam))
print("Starting analysis of logfile")
with open(logFilNam, mode="r", encoding="utf-8-sig") as f:
self._omstats = f.readlines()
self._analyseOMStats(lines=self._omstats, models=self._ommodels, simulate=simulate)
# Delete temporary directories
if self._deleteTemporaryDirectories:
for d in self._temDir:
shutil.rmtree(d)
def _analyseOMStats(self, lines=None, models=None, simulate=False):
"""
Analyse the log file of the OM compatibility test.
:param lines: lines of the log file.
:param nModels: number of models that were tested.
:param simulate: True if simulation was tested
A list of models is passed to this function because it is easier to
get an overview of the FAILED models based on a list of all tested
models.
"""
if lines is None:
lines = self._omstats
if models is None:
models = self._ommodels
check_ok, sim_ok = 0, 0
check_nok, sim_nok = 0, 0
models_check_ok, models_check_nok, models_sim_ok, models_sim_nok = [], [], [], []
for line in lines:
if line.find('resultFile = "') > 0:
if line.find('""') > 0:
sim_nok += 1
else:
sim_ok += 1
# Seems like OpenModelica always uses '/' as file separator
models_sim_ok.append(line.split('/')[-1].split('_res.mat')[0])
elif line.find('Check of ') > 0:
if line.find(' completed successfully.') > 0:
check_ok += 1
models_check_ok.append(line.split('Check of')
[-1].split('completed successfully')[0].strip())
else:
# we never get in this clause
pass
# get the total number of tested models
check_nok = len(models) - check_ok
sim_nok = len(models) - sim_ok
# get failed models
models_check_nok = models[:]
for m in models_check_ok:
models_check_nok.remove(m)
if simulate:
models_sim_nok = models[:]
for m in models_sim_ok:
models_sim_nok.remove(m)
print('\n')
print(70 * '#')
print("Tested {} models:\n * {} compiled successfully (={:.1%})"
.format(check_ok + check_nok,
check_ok, float(check_ok) / float(check_ok + check_nok)))
if simulate:
print(" * {} simulated successfully (={:.1%})".format(sim_ok,
float(sim_ok) / float(sim_ok + sim_nok)))
print("\nSuccessfully checked models:")
for m in models_check_ok:
print(" * {}".format(m))
print("Failed model checks:")
for m in models_check_nok:
print(" * {}".format(m))
if simulate:
print("\nSuccessfully simulated models:")
for m in models_sim_ok:
print(" * {}".format(m))
print("Failed model simulations:")
for m in models_sim_nok:
print(" * {}".format(m))
print("\nMore detailed information is stored in self._omstats")
print(70 * '#')
|
python
|
#! /usr/bin/env python3
# coding: utf-8
from __future__ import annotations
import logging
from logging import StreamHandler
import os
log = logging.getLogger()
formatter = logging.Formatter("%(filename)s %(levelname)s - %(message)s")
handler = StreamHandler()
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(os.environ.get("loglevel", "INFO"))
|
python
|