max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
guild/tests/samples/projects/remote-status/sleep.py
|
msarahan/guildai
| 694 |
51623
|
<reponame>msarahan/guildai
import time
seconds = 1
time.sleep(seconds)
|
python_toolbox/wx_tools/drawing_tools/pens.py
|
hboshnak/python_toolbox
| 119 |
51624
|
<gh_stars>100-1000
# Copyright 2009-2017 <NAME>.
# This program is distributed under the MIT license.
import wx
from python_toolbox import caching
is_mac = (wx.Platform == '__WXMAC__')
is_gtk = (wx.Platform == '__WXGTK__')
is_win = (wx.Platform == '__WXMSW__')
@caching.cache(max_size=100)
def get_focus_pen(color='black', width=1, dashes=[1, 4]):
''' '''
if isinstance(color, basestring):
color = wx.NamedColour(color)
# todo: do `if is_mac`, also gtk maybe
pen = wx.Pen(color, width, wx.USER_DASH)
pen.SetDashes(dashes)
return pen
|
tests/components/sensibo/test_update.py
|
liangleslie/core
| 30,023 |
51626
|
"""The test for the sensibo update platform."""
from __future__ import annotations
from datetime import timedelta
from unittest.mock import patch
from pysensibo.model import SensiboData
from pytest import MonkeyPatch
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.core import HomeAssistant
from homeassistant.util import dt
from tests.common import async_fire_time_changed
async def test_select(
hass: HomeAssistant,
load_int: ConfigEntry,
monkeypatch: MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo update."""
state1 = hass.states.get("update.hallway_update_available")
state2 = hass.states.get("update.kitchen_update_available")
assert state1.state == STATE_ON
assert state1.attributes["installed_version"] == "SKY30046"
assert state1.attributes["latest_version"] == "SKY30048"
assert state1.attributes["title"] == "skyv2"
assert state2.state == STATE_OFF
monkeypatch.setattr(get_data.parsed["ABC999111"], "fw_ver", "SKY30048")
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state1 = hass.states.get("update.hallway_update_available")
assert state1.state == STATE_OFF
|
src/python/controller/controller_persist.py
|
AlekLT/seedsync
| 255 |
51695
|
# Copyright 2017, <NAME>, All rights reserved.
import json
from common import overrides, Constants, Persist, PersistError
class ControllerPersist(Persist):
"""
Persisting state for controller
"""
# Keys
__KEY_DOWNLOADED_FILE_NAMES = "downloaded"
__KEY_EXTRACTED_FILE_NAMES = "extracted"
def __init__(self):
self.downloaded_file_names = set()
self.extracted_file_names = set()
@classmethod
@overrides(Persist)
def from_str(cls: "ControllerPersist", content: str) -> "ControllerPersist":
persist = ControllerPersist()
try:
dct = json.loads(content)
persist.downloaded_file_names = set(dct[ControllerPersist.__KEY_DOWNLOADED_FILE_NAMES])
persist.extracted_file_names = set(dct[ControllerPersist.__KEY_EXTRACTED_FILE_NAMES])
return persist
except (json.decoder.JSONDecodeError, KeyError) as e:
raise PersistError("Error parsing AutoQueuePersist - {}: {}".format(
type(e).__name__, str(e))
)
@overrides(Persist)
def to_str(self) -> str:
dct = dict()
dct[ControllerPersist.__KEY_DOWNLOADED_FILE_NAMES] = list(self.downloaded_file_names)
dct[ControllerPersist.__KEY_EXTRACTED_FILE_NAMES] = list(self.extracted_file_names)
return json.dumps(dct, indent=Constants.JSON_PRETTY_PRINT_INDENT)
|
tests/web/classes/__init__.py
|
DavidCain/python-slackclient
| 2,486 |
51719
|
STRING_51_CHARS = "SFOTYFUZTMDSOULXMKVFDOBQWNBAVGANMVLXQQZZQZQHBLJRZNY"
STRING_301_CHARS = (
"ZFOMVKXETILJKBZPVKOYAUPNYWWWUICNEVXVPWNAMGCNHDBRMATGPMUHUZHUJKFWWLXBQXVDNCGJHAPKEK"
"DZCXKBXEHWCWBYDIGNYXTOFWWNLPBTVIGTNQKIQDHUAHZPWQDKKCHERBYKLAUOOKJXJJLGOPSCRVEHCOAD"
"BFYKJTXHMPPYWQVXCVGNNSXLNIHVKTVMEOIRXQDPLHIDZBAHUEDWXKXILEBOLILOYGZLNGCNXKWMFJWYYI"
"PIDUKJVGKTUERTPRMMMVZNAAOMZJFXFSEENCAMBOUJMYXTPHJEOPKDB"
)
STRING_3001_CHARS = (
"<KEY>"
"<KEY>BPRALVWQEYTFBK<KEY>RALDRZHKPGTWZAXOUFQJKOGTMYSFEDBEQQXIGKZMXNKDCEN"
"LSVHNGWVCIDMNSIZTBWBBVUMLPHRUCIZLZBFEGNFXZNJEZBUTNHNCYWWYSJSJDNOPPGHUPZLPJWDKEATZO"
"UGKZEGFTFBGZDNRITDFBDJLYDGETUHBDGFEELBJBDMSRBVFPXMRJXWULONCZRZZBNFOPARFNXPQONKEIKG"
"QDPJWCMGYSEIBAOLJNWPJVUSMJGCSQBLGZCWXJOYJHIZMNFMTLUQFGEBOONOZMGBWORFEUGYIUJAKLVAJZ"
"FTNOPOZNMUJPWRMGPKNQSBMZQRJXLRQJPYYUXLFUPICAFTXDTQIUOQRCSLWPHHUZAOPVTBRCXWUIXMFGYT"
"RBKPWJJXNQPLIAZAOKIMDWCDZABPLNOXYOZZBTHSDIPXXBKXKOSYYCITFSMNVIOCNGEMRKRBPCLBOCXBZQ"
"VVWKNJBPWQNJOJWAGAIBOBFRVDWLXVBLMBSXYLOAWMPLKJOVHABNNIFTKTKBIIBOSHYQZRUFPPPRDQPMUV"
"WMSWBLRUHKEMUFHIMZRUNNITKWYIWRXYPGFPXMNOABRWXGQFCWOYMMBYRQQLOIBFENIZBUIWLMDTIXCPXW"
"NNHBSRPSMCQIMYRCFCPLQQGVOHYZOUGFEXDTOETUKQAXOCNGYBYPYWDQHYOKPCCORGRNHXZAA<KEY>"
"CM<KEY>"
"<KEY>"
"OLHPFFSWTZGYPAZJXRRPATWXKRDFQJRAEOBFNIWVZDKLNYXUFBOAWSDSKFYYRTADBBYHEWNZSTDXAAOQCD"
"WARSJZONQXRACMNBXZSEWZYBWADNDVRXBNJPJZQUNDYLBASCLCPFJWAMJUQAHBUZYDTIQPBPNJVVOHISZP"
"VGBDNXFIHYCABTSVNVILZUPPZXMPPZVBRTRHDGHTXXLBIYTMRDOUBYBVHVVKQAXAKISFJNUTRZKOCACJAX"
"ZXRRKMFOKYBHFUDBIXFAQSNUTYFNVQNGYWPJZGTLQUMOWXKKTUZGOUXAOVLQMMNKKECQCCOBNPPPXZYWZU"
"WHLHZQDIETDDPXWTILXGAYJKPHBXPLRFDPDSHFUPOIWRQDWQQNARPHPVKJPXZGGXOUVBYZSLUPVIJKWKNF"
"WMFKWYSYJJCCSCALMVPYIPHDKRXOWTUAYJFTAANCTVYDNSSIHGCWGKLDHFFBFSIFBMGHHFHZQSWOWZXOUW"
"PKNICGXPFMFIESHPDDMGSSWGBIAQVBANHLGDBYENRLSUARJXLQWPMOUSUKIIVXICBJPSWOEZPEUAJSLITV"
"XEQWSRENUJRJHPLBPFMBRPKGQNSYFWVLFLSQGGETKDUGYOLNFSMRVAZLQOAEKCUGNFEXRUDYSKBOQPYJAH"
"QHEIMSAAMTTYVJTHZDGQEITLERRYYQCTEQPTYQPHLMBDPCZZNNJYLGAGNXONCTIBSXEHXPYWBCTEEZLIYI"
"FMPYONXRVLSGZOEDZIMVDDPRXBKCKEPHOVLRBSPKMLZPXNRZVSSSYAOMGSVJODUZAJDYLGUZAFJMCOVGQX"
"ZUWQJENTEWQRFZYQTVEAHFQUWBUCFWHGRTMNQQFSPKKYYUBJVXKFQCCMBNGWNTRFGFKBFWTTPNDTGGWTAK"
"EOTXUPGFXOVWTOERFQSEZWVUYMGHVBQZIKIBJCNMKTZANNNOVMYTFLQYVNKTVZHFUJTPWNQWRYKGMYRYDC"
"WNTCUCYJCWXMMOJXUJSDWJKTTYOBFJFLBUCECGTVWKELCBDIKDUDOBLZLHYJQTVHXSUAFHDFDMETLHHEEJ"
"XJYWEOTXAUOZARSSQTBBXULKBBSTQHMJAAOUDIQCCETFWAINYIJCGXCILMDCAUYDMNZBDKIPVRCKCYKOIG"
"JHBLUHPOLDBWREFAZVEFFSOQQHMCXQYCQGMBHYKHJDBZXRAXLVZNYQXZEQYRSZHKKGCSOOEGNPFZDNGIMJ"
"QCXAEWWDYIGTQMJKBTMGSJAJCKIODCAEXVEGYCUBEEGCMARPJIKNAROJHYHKKTKGKKRVVSVYADCJXGSXAR"
"KGOUSUSZGJGFIKJDKJUIRQVSAHSTBCVOWZJDCCBWNNCBIYTCNOUPEYACCEWZNGETBTDJWQIEWRYIQXOZKP"
"ULDPCINLDFFPNORJHOZBSSYPPYNZTLXBRFZGBECKTTNVIHYNKGBXTTIXIKRBGVAPNWBPFNCGWQMZHBAHBX"
"MFEPSWVBUDLYDIVLZFHXTQJWUNWQHSWSCYFXQQSVORFQGUQIHUAJYFLBNBKJPOEIPYATRMNMGUTTVBOUHE"
"ZKXVAUEXCJYSCZEMGWTPXMQJEUWYHTFJQTBOQBEPQIPDYLBPIKKGPVYPOVLPPHYNGNWFTNQCDAATJVKRHC"
"OZGEBPFZZDPPZOWQCDFQZJAMXLVREYJQQFTQJKHMLRFJCVPVCTSVFVAGDVNXIGINSGHKGTWCKXNRZCZFVX"
"FPKZHPOMJTQOIVDIYKEVIIBAUHEDGOUNPCPMVLTZQLICXKKIYRJASBNDUZAONDDLQNVRXGWNQAOWSJSFWU"
"YWTTLOVXIJYERRZQCJMRZHCXEEAKYCLEICUWOJUXWHAPHQJDTBVRPVWTMCJRAUYCOTFXLLIQLOBASBMPED"
"KLDZDWDYAPXCKLZMEFIAOFYGFLBMURWVBFJDDEFXNIQOORYRMNROGVCOESSHSNIBNFRHPSWVAUQQVDMAHX"
"STDOVZMZEFRRFCKOLDOOFVOBCPRRLGYFJNXVPPUZONOSALUUI"
)
|
deepfence_backend/utils/fim_config_utils.py
|
deepfence/ThreatMapper
| 1,281 |
51722
|
<filename>deepfence_backend/utils/fim_config_utils.py
import yaml
import json
from jsonschema import validate
from jsonschema.exceptions import ValidationError, SchemaError
def validate_fim_config(fim_config):
with open("/etc/df_sysmon/fim_config_schema.json", "r") as schemafile:
fim_schema = schemafile.read()
try:
validate(yaml.safe_load(fim_config), json.loads(fim_schema))
except ValidationError as ex:
print("Fim Config is not valid: \n", ex)
return False
except SchemaError as ex:
print("Fim Schema is not valid: \n", ex)
return False
except Exception as ex:
print("Error: ". ex)
return False
return True
|
numba/tests/test_overlap.py
|
auderson/numba
| 6,620 |
51753
|
import numpy as np
from numba import jit
from numba.core import types
from numba.tests.support import TestCase, tag
import unittest
# Array overlaps involving a displacement
def array_overlap1(src, dest, k=1):
assert src.shape == dest.shape
dest[k:] = src[:-k]
def array_overlap2(src, dest, k=1):
assert src.shape == dest.shape
dest[:-k] = src[k:]
def array_overlap3(src, dest, k=1):
assert src.shape == dest.shape
dest[:,:-k] = src[:,k:]
def array_overlap4(src, dest, k=1):
assert src.shape == dest.shape
dest[:,k:] = src[:,:-k]
def array_overlap5(src, dest, k=1):
assert src.shape == dest.shape
dest[...,:-k] = src[...,k:]
def array_overlap6(src, dest, k=1):
assert src.shape == dest.shape
dest[...,k:] = src[...,:-k]
# Array overlaps involving an in-place reversal
def array_overlap11(src, dest):
assert src.shape == dest.shape
dest[::-1] = src
def array_overlap12(src, dest):
assert src.shape == dest.shape
dest[:] = src[::-1]
def array_overlap13(src, dest):
assert src.shape == dest.shape
dest[:,::-1] = src
def array_overlap14(src, dest):
assert src.shape == dest.shape
dest[:] = src[:,::-1]
def array_overlap15(src, dest):
assert src.shape == dest.shape
dest[...,::-1] = src
def array_overlap16(src, dest):
assert src.shape == dest.shape
dest[:] = src[...,::-1]
class TestArrayOverlap(TestCase):
def check_overlap(self, pyfunc, min_ndim, have_k_argument=False):
N = 4
def vary_layouts(orig):
yield orig.copy(order='C')
yield orig.copy(order='F')
a = orig[::-1].copy()[::-1]
assert not a.flags.c_contiguous and not a.flags.f_contiguous
yield a
def check(pyfunc, cfunc, pydest, cdest, kwargs):
pyfunc(pydest, pydest, **kwargs)
cfunc(cdest, cdest, **kwargs)
self.assertPreciseEqual(pydest, cdest)
cfunc = jit(nopython=True)(pyfunc)
# Check for up to 3d arrays
for ndim in range(min_ndim, 4):
shape = (N,) * ndim
orig = np.arange(0, N**ndim).reshape(shape)
# Note we cannot copy a 'A' layout array exactly (bitwise),
# so instead we call vary_layouts() twice
for pydest, cdest in zip(vary_layouts(orig), vary_layouts(orig)):
if have_k_argument:
for k in range(1, N):
check(pyfunc, cfunc, pydest, cdest, dict(k=k))
else:
check(pyfunc, cfunc, pydest, cdest, {})
def check_overlap_with_k(self, pyfunc, min_ndim):
self.check_overlap(pyfunc, min_ndim=min_ndim, have_k_argument=True)
def test_overlap1(self):
self.check_overlap_with_k(array_overlap1, min_ndim=1)
def test_overlap2(self):
self.check_overlap_with_k(array_overlap2, min_ndim=1)
def test_overlap3(self):
self.check_overlap_with_k(array_overlap3, min_ndim=2)
def test_overlap4(self):
self.check_overlap_with_k(array_overlap4, min_ndim=2)
def test_overlap5(self):
self.check_overlap_with_k(array_overlap5, min_ndim=1)
def test_overlap6(self):
self.check_overlap_with_k(array_overlap6, min_ndim=1)
def test_overlap11(self):
self.check_overlap(array_overlap11, min_ndim=1)
def test_overlap12(self):
self.check_overlap(array_overlap12, min_ndim=1)
def test_overlap13(self):
self.check_overlap(array_overlap13, min_ndim=2)
def test_overlap14(self):
self.check_overlap(array_overlap14, min_ndim=2)
def test_overlap15(self):
self.check_overlap(array_overlap15, min_ndim=1)
def test_overlap16(self):
self.check_overlap(array_overlap16, min_ndim=1)
if __name__ == '__main__':
unittest.main()
|
src/cpp/model_benchmark.bzl
|
SanggunLee/edgetpu
| 320 |
51804
|
"""Generate model benchmark source file using template.
"""
_TEMPLATE = "//src/cpp:models_benchmark.cc.template"
def _generate_models_benchmark_src_impl(ctx):
ctx.actions.expand_template(
template = ctx.file._template,
output = ctx.outputs.source_file,
substitutions = {
"{BENCHMARK_NAME}": ctx.attr.benchmark_name,
"{TFLITE_CPU_FILEPATH}": ctx.attr.tflite_cpu_filepath,
"{TFLITE_EDGETPU_FILEPATH}": ctx.attr.tflite_edgetpu_filepath,
},
)
generate_models_benchmark_src = rule(
implementation = _generate_models_benchmark_src_impl,
attrs = {
"benchmark_name": attr.string(mandatory = True),
"tflite_cpu_filepath": attr.string(mandatory = True),
"tflite_edgetpu_filepath": attr.string(mandatory = True),
"_template": attr.label(
default = Label(_TEMPLATE),
allow_single_file = True,
),
},
outputs = {"source_file": "%{name}.cc"},
)
|
catboost/benchmarks/kaggle/rossmann-store-sales/lightgbm_early_stopping.py
|
HeyLey/catboost
| 6,989 |
51819
|
#!/usr/bin/env python
import os.path
import config
import experiment_lib
import lightgbm as lgb
class LightGBMExperimentEarlyStopping(experiment_lib.ExperimentEarlyStopping):
def __init__(self, **kwargs):
super(LightGBMExperimentEarlyStopping, self).__init__(**kwargs)
def get_estimator(self, cat_cols):
return lgb.LGBMRegressor(
n_jobs=16,
n_estimators=9999
)
def fit_estimator(self, estimator, X_train, y_train, X_test, y_test, cat_cols, early_stopping_rounds):
estimator.fit(
X_train,
y_train,
categorical_feature=cat_cols,
eval_set=[(X_test, y_test)],
eval_metric='rmse',
early_stopping_rounds=early_stopping_rounds
)
self.best_estimator = estimator
self.best_iteration = estimator.best_iteration_
self.best_params = estimator.get_params()
self.best_score = estimator.best_score_
if __name__ == "__main__":
dataset_path = config.preprocessed_dataset_path
LightGBMExperimentEarlyStopping(
train_path=os.path.join(config.preprocessed_dataset_path, 'train'),
test_path=os.path.join(config.preprocessed_dataset_path, 'test'),
cd_path=os.path.join(config.preprocessed_dataset_path, 'cd'),
output_folder_path=os.path.join(config.training_output_path, 'LightGBMExperimentEarlyStopping'),
header_in_data=False
).run()
|
examples/simple.py
|
bytewax/bytewax
| 109 |
51861
|
from bytewax import Dataflow, run
flow = Dataflow()
flow.map(lambda x: x * x)
flow.capture()
if __name__ == "__main__":
for epoch, y in sorted(run(flow, enumerate(range(10)))):
print(y)
|
angr/procedures/posix/recv.py
|
Kyle-Kyle/angr
| 6,132 |
51867
|
import angr
######################################
# recv
######################################
class recv(angr.SimProcedure):
#pylint:disable=arguments-differ,unused-argument
def run(self, fd, dst, length, flags):
simfd = self.state.posix.get_fd(fd)
if simfd is None:
return -1
return simfd.read(dst, length)
|
quantities/units/concentration.py
|
502E532E/python-quantities
| 105 |
51893
|
"""
"""
from ..unitquantity import UnitQuantity
from .substance import mol
from .volume import L
M = molar = UnitQuantity(
'molar',
mol / L,
symbol='M',
aliases=['Molar']
)
mM = millimolar = UnitQuantity(
'millimolar',
molar / 1000,
symbol='mM'
)
uM = micromolar = UnitQuantity(
'micromolar',
mM / 1000,
symbol='uM',
u_symbol='µM'
)
|
test/package/package_b/subpackage_2.py
|
Hacky-DH/pytorch
| 60,067 |
51895
|
<filename>test/package/package_b/subpackage_2.py<gh_stars>1000+
__import__("math", fromlist=[])
__import__("xml.sax.xmlreader")
result = "subpackage_2"
class PackageBSubpackage2Object_0:
pass
def dynamic_import_test(name: str):
__import__(name)
|
tests/test_logo.py
|
theosech/ec
| 290 |
51909
|
import unittest
class TestLogoMain(unittest.TestCase):
def test_imports(self):
try:
from dreamcoder.domains.logo.main import (
animateSolutions,
dreamFromGrammar,
list_options,
outputDreams,
enumerateDreams,
visualizePrimitives,
Flatten,
LogoFeatureCNN,
main
)
except Exception:
self.fail('Unable to import logo module')
if __name__ == '__main__':
unittest.main()
|
Packs/Ansible_Powered_Integrations/Integrations/Linux/Linux.py
|
diCagri/content
| 799 |
51925
|
<reponame>diCagri/content
import json
import traceback
from typing import Dict, cast
import ansible_runner
import demistomock as demisto # noqa: F401
import ssh_agent_setup
from CommonServerPython import * # noqa: F401
# Dict to Markdown Converter adapted from https://github.com/PolBaladas/torsimany/
def dict2md(json_block, depth=0):
markdown = ""
if isinstance(json_block, dict):
markdown = parseDict(json_block, depth)
if isinstance(json_block, list):
markdown = parseList(json_block, depth)
return markdown
def parseDict(d, depth):
markdown = ""
for k in d:
if isinstance(d[k], (dict, list)):
markdown += addHeader(k, depth)
markdown += dict2md(d[k], depth + 1)
else:
markdown += buildValueChain(k, d[k], depth)
return markdown
def parseList(rawlist, depth):
markdown = ""
for value in rawlist:
if not isinstance(value, (dict, list)):
index = rawlist.index(value)
markdown += buildValueChain(index, value, depth)
else:
markdown += parseDict(value, depth)
return markdown
def buildHeaderChain(depth):
list_tag = '* '
htag = '#'
chain = list_tag * (bool(depth)) + htag * (depth + 1) + \
' value ' + (htag * (depth + 1) + '\n')
return chain
def buildValueChain(key, value, depth):
tab = " "
list_tag = '* '
chain = tab * (bool(depth - 1)) + list_tag + \
str(key) + ": " + str(value) + "\n"
return chain
def addHeader(value, depth):
chain = buildHeaderChain(depth)
chain = chain.replace('value', value.title())
return chain
# Remove ansible branding from results
def rec_ansible_key_strip(obj):
if isinstance(obj, dict):
return {key.replace('ansible_', ''): rec_ansible_key_strip(val) for key, val in obj.items()}
return obj
# COMMAND FUNCTIONS
def generic_ansible(integration_name, command, args: Dict[str, Any]) -> CommandResults:
readable_output = ""
sshkey = ""
fork_count = 1 # default to executing against 1 host at a time
if args.get('concurrency'):
fork_count = cast(int, args.get('concurrency'))
inventory: Dict[str, dict] = {}
inventory['all'] = {}
inventory['all']['hosts'] = {}
if type(args['host']) is list:
# host arg can be a array of multiple hosts
hosts = args['host']
else:
# host arg could also be csv
hosts = [host.strip() for host in args['host'].split(',')]
for host in hosts:
new_host = {}
new_host['ansible_host'] = host
if ":" in host:
address = host.split(':')
new_host['ansible_port'] = address[1]
new_host['ansible_host'] = address[0]
else:
new_host['ansible_host'] = host
if demisto.params().get('port'):
new_host['ansible_port'] = demisto.params().get('port')
# Linux
# Different credential options
# SSH Key saved in credential manager selection
if demisto.params().get('creds', {}).get('credentials').get('sshkey'):
username = demisto.params().get('creds', {}).get('credentials').get('user')
sshkey = demisto.params().get('creds', {}).get('credentials').get('sshkey')
new_host['ansible_user'] = username
# Password saved in credential manager selection
elif demisto.params().get('creds', {}).get('credentials').get('password'):
username = demisto.params().get('creds', {}).get('credentials').get('user')
password = demisto.params().get('creds', {}).get('credentials').get('password')
new_host['ansible_user'] = username
new_host['ansible_password'] = password
# username/password individually entered
else:
username = demisto.params().get('creds', {}).get('identifier')
password = <PASSWORD>().get('creds', {}).get('password')
new_host['ansible_user'] = username
new_host['ansible_password'] = password
inventory['all']['hosts'][host] = new_host
module_args = ""
# build module args list
for arg_key, arg_value in args.items():
# skip hardcoded host arg, as it doesn't related to module
if arg_key == 'host':
continue
module_args += "%s=\"%s\" " % (arg_key, arg_value)
r = ansible_runner.run(inventory=inventory, host_pattern='all', module=command, quiet=True,
omit_event_data=True, ssh_key=sshkey, module_args=module_args, forks=fork_count)
results = []
for each_host_event in r.events:
# Troubleshooting
# demisto.log("%s: %s\n" % (each_host_event['event'], each_host_event))
if each_host_event['event'] in ["runner_on_ok", "runner_on_unreachable", "runner_on_failed"]:
# parse results
result = json.loads('{' + each_host_event['stdout'].split('{', 1)[1])
host = each_host_event['stdout'].split('|', 1)[0].strip()
status = each_host_event['stdout'].replace('=>', '|').split('|', 3)[1]
# if successful build outputs
if each_host_event['event'] == "runner_on_ok":
if 'fact' in command:
result = result['ansible_facts']
else:
if result.get(command) is not None:
result = result[command]
else:
result.pop("ansible_facts", None)
result = rec_ansible_key_strip(result)
if host != "localhost":
readable_output += "# %s - %s\n" % (host, status)
else:
# This is integration is not host based
readable_output += "# %s\n" % status
readable_output += dict2md(result)
# add host and status to result
result['host'] = host
result['status'] = status
results.append(result)
if each_host_event['event'] == "runner_on_unreachable":
msg = "Host %s unreachable\nError Details: %s" % (host, result)
return_error(msg)
if each_host_event['event'] == "runner_on_failed":
msg = "Host %s failed running command\nError Details: %s" % (host, result)
return_error(msg)
return CommandResults(
readable_output=readable_output,
outputs_prefix=integration_name + '.' + command,
outputs_key_field='',
outputs=results
)
# MAIN FUNCTION
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
# SSH Key integration requires ssh_agent to be running in the background
ssh_agent_setup.setup()
try:
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
return_results('ok')
elif demisto.command() == 'linux-alternatives':
return_results(generic_ansible('linux', 'alternatives', demisto.args()))
elif demisto.command() == 'linux-at':
return_results(generic_ansible('linux', 'at', demisto.args()))
elif demisto.command() == 'linux-authorized-key':
return_results(generic_ansible('linux', 'authorized_key', demisto.args()))
elif demisto.command() == 'linux-capabilities':
return_results(generic_ansible('linux', 'capabilities', demisto.args()))
elif demisto.command() == 'linux-cron':
return_results(generic_ansible('linux', 'cron', demisto.args()))
elif demisto.command() == 'linux-cronvar':
return_results(generic_ansible('linux', 'cronvar', demisto.args()))
elif demisto.command() == 'linux-dconf':
return_results(generic_ansible('linux', 'dconf', demisto.args()))
elif demisto.command() == 'linux-debconf':
return_results(generic_ansible('linux', 'debconf', demisto.args()))
elif demisto.command() == 'linux-filesystem':
return_results(generic_ansible('linux', 'filesystem', demisto.args()))
elif demisto.command() == 'linux-firewalld':
return_results(generic_ansible('linux', 'firewalld', demisto.args()))
elif demisto.command() == 'linux-gather-facts':
return_results(generic_ansible('linux', 'gather_facts', demisto.args()))
elif demisto.command() == 'linux-gconftool2':
return_results(generic_ansible('linux', 'gconftool2', demisto.args()))
elif demisto.command() == 'linux-getent':
return_results(generic_ansible('linux', 'getent', demisto.args()))
elif demisto.command() == 'linux-group':
return_results(generic_ansible('linux', 'group', demisto.args()))
elif demisto.command() == 'linux-hostname':
return_results(generic_ansible('linux', 'hostname', demisto.args()))
elif demisto.command() == 'linux-interfaces-file':
return_results(generic_ansible('linux', 'interfaces_file', demisto.args()))
elif demisto.command() == 'linux-iptables':
return_results(generic_ansible('linux', 'iptables', demisto.args()))
elif demisto.command() == 'linux-java-cert':
return_results(generic_ansible('linux', 'java_cert', demisto.args()))
elif demisto.command() == 'linux-java-keystore':
return_results(generic_ansible('linux', 'java_keystore', demisto.args()))
elif demisto.command() == 'linux-kernel-blacklist':
return_results(generic_ansible('linux', 'kernel_blacklist', demisto.args()))
elif demisto.command() == 'linux-known-hosts':
return_results(generic_ansible('linux', 'known_hosts', demisto.args()))
elif demisto.command() == 'linux-listen-ports-facts':
return_results(generic_ansible('linux', 'listen_ports_facts', demisto.args()))
elif demisto.command() == 'linux-locale-gen':
return_results(generic_ansible('linux', 'locale_gen', demisto.args()))
elif demisto.command() == 'linux-modprobe':
return_results(generic_ansible('linux', 'modprobe', demisto.args()))
elif demisto.command() == 'linux-mount':
return_results(generic_ansible('linux', 'mount', demisto.args()))
elif demisto.command() == 'linux-open-iscsi':
return_results(generic_ansible('linux', 'open_iscsi', demisto.args()))
elif demisto.command() == 'linux-pam-limits':
return_results(generic_ansible('linux', 'pam_limits', demisto.args()))
elif demisto.command() == 'linux-pamd':
return_results(generic_ansible('linux', 'pamd', demisto.args()))
elif demisto.command() == 'linux-parted':
return_results(generic_ansible('linux', 'parted', demisto.args()))
elif demisto.command() == 'linux-pids':
return_results(generic_ansible('linux', 'pids', demisto.args()))
elif demisto.command() == 'linux-ping':
return_results(generic_ansible('linux', 'ping', demisto.args()))
elif demisto.command() == 'linux-python-requirements-info':
return_results(generic_ansible('linux', 'python_requirements_info', demisto.args()))
elif demisto.command() == 'linux-reboot':
return_results(generic_ansible('linux', 'reboot', demisto.args()))
elif demisto.command() == 'linux-seboolean':
return_results(generic_ansible('linux', 'seboolean', demisto.args()))
elif demisto.command() == 'linux-sefcontext':
return_results(generic_ansible('linux', 'sefcontext', demisto.args()))
elif demisto.command() == 'linux-selinux':
return_results(generic_ansible('linux', 'selinux', demisto.args()))
elif demisto.command() == 'linux-selinux-permissive':
return_results(generic_ansible('linux', 'selinux_permissive', demisto.args()))
elif demisto.command() == 'linux-selogin':
return_results(generic_ansible('linux', 'selogin', demisto.args()))
elif demisto.command() == 'linux-seport':
return_results(generic_ansible('linux', 'seport', demisto.args()))
elif demisto.command() == 'linux-service':
return_results(generic_ansible('linux', 'service', demisto.args()))
elif demisto.command() == 'linux-service-facts':
return_results(generic_ansible('linux', 'service_facts', demisto.args()))
elif demisto.command() == 'linux-setup':
return_results(generic_ansible('linux', 'setup', demisto.args()))
elif demisto.command() == 'linux-sysctl':
return_results(generic_ansible('linux', 'sysctl', demisto.args()))
elif demisto.command() == 'linux-systemd':
return_results(generic_ansible('linux', 'systemd', demisto.args()))
elif demisto.command() == 'linux-sysvinit':
return_results(generic_ansible('linux', 'sysvinit', demisto.args()))
elif demisto.command() == 'linux-timezone':
return_results(generic_ansible('linux', 'timezone', demisto.args()))
elif demisto.command() == 'linux-ufw':
return_results(generic_ansible('linux', 'ufw', demisto.args()))
elif demisto.command() == 'linux-user':
return_results(generic_ansible('linux', 'user', demisto.args()))
elif demisto.command() == 'linux-xfs-quota':
return_results(generic_ansible('linux', 'xfs_quota', demisto.args()))
elif demisto.command() == 'linux-htpasswd':
return_results(generic_ansible('linux', 'htpasswd', demisto.args()))
elif demisto.command() == 'linux-supervisorctl':
return_results(generic_ansible('linux', 'supervisorctl', demisto.args()))
elif demisto.command() == 'linux-openssh-cert':
return_results(generic_ansible('linux', 'openssh_cert', demisto.args()))
elif demisto.command() == 'linux-openssh-keypair':
return_results(generic_ansible('linux', 'openssh_keypair', demisto.args()))
elif demisto.command() == 'linux-acl':
return_results(generic_ansible('linux', 'acl', demisto.args()))
elif demisto.command() == 'linux-archive':
return_results(generic_ansible('linux', 'archive', demisto.args()))
elif demisto.command() == 'linux-assemble':
return_results(generic_ansible('linux', 'assemble', demisto.args()))
elif demisto.command() == 'linux-blockinfile':
return_results(generic_ansible('linux', 'blockinfile', demisto.args()))
elif demisto.command() == 'linux-file':
return_results(generic_ansible('linux', 'file', demisto.args()))
elif demisto.command() == 'linux-find':
return_results(generic_ansible('linux', 'find', demisto.args()))
elif demisto.command() == 'linux-ini-file':
return_results(generic_ansible('linux', 'ini_file', demisto.args()))
elif demisto.command() == 'linux-iso-extract':
return_results(generic_ansible('linux', 'iso_extract', demisto.args()))
elif demisto.command() == 'linux-lineinfile':
return_results(generic_ansible('linux', 'lineinfile', demisto.args()))
elif demisto.command() == 'linux-replace':
return_results(generic_ansible('linux', 'replace', demisto.args()))
elif demisto.command() == 'linux-stat':
return_results(generic_ansible('linux', 'stat', demisto.args()))
elif demisto.command() == 'linux-synchronize':
return_results(generic_ansible('linux', 'synchronize', demisto.args()))
elif demisto.command() == 'linux-tempfile':
return_results(generic_ansible('linux', 'tempfile', demisto.args()))
elif demisto.command() == 'linux-unarchive':
return_results(generic_ansible('linux', 'unarchive', demisto.args()))
elif demisto.command() == 'linux-xml':
return_results(generic_ansible('linux', 'xml', demisto.args()))
elif demisto.command() == 'linux-expect':
return_results(generic_ansible('linux', 'expect', demisto.args()))
elif demisto.command() == 'linux-bower':
return_results(generic_ansible('linux', 'bower', demisto.args()))
elif demisto.command() == 'linux-bundler':
return_results(generic_ansible('linux', 'bundler', demisto.args()))
elif demisto.command() == 'linux-composer':
return_results(generic_ansible('linux', 'composer', demisto.args()))
elif demisto.command() == 'linux-cpanm':
return_results(generic_ansible('linux', 'cpanm', demisto.args()))
elif demisto.command() == 'linux-gem':
return_results(generic_ansible('linux', 'gem', demisto.args()))
elif demisto.command() == 'linux-maven-artifact':
return_results(generic_ansible('linux', 'maven_artifact', demisto.args()))
elif demisto.command() == 'linux-npm':
return_results(generic_ansible('linux', 'npm', demisto.args()))
elif demisto.command() == 'linux-pear':
return_results(generic_ansible('linux', 'pear', demisto.args()))
elif demisto.command() == 'linux-pip':
return_results(generic_ansible('linux', 'pip', demisto.args()))
elif demisto.command() == 'linux-pip-package-info':
return_results(generic_ansible('linux', 'pip_package_info', demisto.args()))
elif demisto.command() == 'linux-yarn':
return_results(generic_ansible('linux', 'yarn', demisto.args()))
elif demisto.command() == 'linux-apk':
return_results(generic_ansible('linux', 'apk', demisto.args()))
elif demisto.command() == 'linux-apt':
return_results(generic_ansible('linux', 'apt', demisto.args()))
elif demisto.command() == 'linux-apt-key':
return_results(generic_ansible('linux', 'apt_key', demisto.args()))
elif demisto.command() == 'linux-apt-repo':
return_results(generic_ansible('linux', 'apt_repo', demisto.args()))
elif demisto.command() == 'linux-apt-repository':
return_results(generic_ansible('linux', 'apt_repository', demisto.args()))
elif demisto.command() == 'linux-apt-rpm':
return_results(generic_ansible('linux', 'apt_rpm', demisto.args()))
elif demisto.command() == 'linux-dpkg-selections':
return_results(generic_ansible('linux', 'dpkg_selections', demisto.args()))
elif demisto.command() == 'linux-flatpak':
return_results(generic_ansible('linux', 'flatpak', demisto.args()))
elif demisto.command() == 'linux-flatpak-remote':
return_results(generic_ansible('linux', 'flatpak_remote', demisto.args()))
elif demisto.command() == 'linux-homebrew':
return_results(generic_ansible('linux', 'homebrew', demisto.args()))
elif demisto.command() == 'linux-homebrew-cask':
return_results(generic_ansible('linux', 'homebrew_cask', demisto.args()))
elif demisto.command() == 'linux-homebrew-tap':
return_results(generic_ansible('linux', 'homebrew_tap', demisto.args()))
elif demisto.command() == 'linux-layman':
return_results(generic_ansible('linux', 'layman', demisto.args()))
elif demisto.command() == 'linux-package':
return_results(generic_ansible('linux', 'package', demisto.args()))
elif demisto.command() == 'linux-package-facts':
return_results(generic_ansible('linux', 'package_facts', demisto.args()))
elif demisto.command() == 'linux-yum':
return_results(generic_ansible('linux', 'yum', demisto.args()))
elif demisto.command() == 'linux-yum-repository':
return_results(generic_ansible('linux', 'yum_repository', demisto.args()))
elif demisto.command() == 'linux-zypper':
return_results(generic_ansible('linux', 'zypper', demisto.args()))
elif demisto.command() == 'linux-zypper-repository':
return_results(generic_ansible('linux', 'zypper_repository', demisto.args()))
elif demisto.command() == 'linux-snap':
return_results(generic_ansible('linux', 'snap', demisto.args()))
elif demisto.command() == 'linux-redhat-subscription':
return_results(generic_ansible('linux', 'redhat_subscription', demisto.args()))
elif demisto.command() == 'linux-rhn-channel':
return_results(generic_ansible('linux', 'rhn_channel', demisto.args()))
elif demisto.command() == 'linux-rhn-register':
return_results(generic_ansible('linux', 'rhn_register', demisto.args()))
elif demisto.command() == 'linux-rhsm-release':
return_results(generic_ansible('linux', 'rhsm_release', demisto.args()))
elif demisto.command() == 'linux-rhsm-repository':
return_results(generic_ansible('linux', 'rhsm_repository', demisto.args()))
elif demisto.command() == 'linux-rpm-key':
return_results(generic_ansible('linux', 'rpm_key', demisto.args()))
elif demisto.command() == 'linux-get-url':
return_results(generic_ansible('linux', 'get_url', demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
# ENTRY POINT
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
mmfewshot/classification/datasets/tiered_imagenet.py
|
BIGWangYuDong/mmfewshot
| 376 |
51929
|
<reponame>BIGWangYuDong/mmfewshot<gh_stars>100-1000
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import pickle
import warnings
from typing import Dict, List, Optional, Sequence, Union
import mmcv
import numpy as np
from mmcls.datasets.builder import DATASETS
from typing_extensions import Literal
from .base import BaseFewShotDataset
TRAIN_CLASSES = [
('Yorkshire terrier', 'terrier'), ('space shuttle', 'craft'),
('drake', 'aquatic bird'),
("plane, carpenter's plane, woodworking plane", 'tool'),
('mosquito net', 'protective covering, protective cover, protect'),
('sax, saxophone', 'musical instrument, instrument'),
('container ship, containership, container vessel', 'craft'),
('patas, hussar monkey, Erythrocebus patas', 'primate'),
('cheetah, chetah, Acinonyx jubatus', 'feline, felid'),
('submarine, pigboat, sub, U-boat', 'craft'),
('prison, prison house', 'establishment'),
('can opener, tin opener', 'tool'), ('syringe', 'instrument'),
('odometer, hodometer, mileometer, milometer', 'instrument'),
('bassoon', 'musical instrument, instrument'),
('Kerry blue terrier', 'terrier'),
('scale, weighing machine', 'instrument'), ('baseball', 'game equipment'),
('cassette player', 'electronic equipment'),
('shield, buckler', 'protective covering, protective cover, protect'),
('goldfinch, Carduelis carduelis', 'passerine, passeriform bird'),
('cornet, horn, trumpet, trump', 'musical instrument, instrument'),
('flute, transverse flute', 'musical instrument, instrument'),
('stopwatch, stop watch', 'instrument'), ('basketball', 'game equipment'),
('brassiere, bra, bandeau', 'garment'),
('bulbul', 'passerine, passeriform bird'),
('steel drum', 'musical instrument, instrument'),
('bolo tie, bolo, bola tie, bola', 'garment'),
('planetarium', 'building, edifice'), ('stethoscope', 'instrument'),
('proboscis monkey, Nasalis larvatus', 'primate'),
('guillotine', 'instrument'),
('Scottish deerhound, deerhound', 'hound, hound dog'),
('ocarina, sweet potato', 'musical instrument, instrument'),
('Border terrier', 'terrier'),
('capuchin, ringtail, Cebus capucinus', 'primate'),
('magnetic compass', 'instrument'), ('alligator lizard', 'saurian'),
('baboon', 'primate'), ('sundial', 'instrument'),
('gibbon, Hylobates lar', 'primate'),
('grand piano, grand', 'musical instrument, instrument'),
('Arabian camel, dromedary, Camelus dromedarius',
'ungulate, hoofed mammal'), ('basset, basset hound', 'hound, hound dog'),
('corkscrew, bottle screw', 'tool'), ('miniskirt, mini', 'garment'),
('missile', 'instrument'), ('hatchet', 'tool'),
('acoustic guitar', 'musical instrument, instrument'),
('impala, Aepyceros melampus', 'ungulate, hoofed mammal'),
('parking meter', 'instrument'),
('greenhouse, nursery, glasshouse', 'building, edifice'),
('home theater, home theatre', 'building, edifice'),
('hartebeest', 'ungulate, hoofed mammal'),
('hippopotamus, hippo, river horse, Hippopotamus amphibius',
'ungulate, hoofed mammal'), ('warplane, military plane', 'craft'),
('albatross, mollymawk', 'aquatic bird'),
('umbrella', 'protective covering, protective cover, protect'),
('shoe shop, shoe-shop, shoe store', 'establishment'),
('suit, suit of clothes', 'garment'),
('pickelhaube', 'protective covering, protective cover, protect'),
('soccer ball', 'game equipment'), ('yawl', 'craft'),
('screwdriver', 'tool'),
('Madagascar cat, ring-tailed lemur, Lemur catta', 'primate'),
('garter snake, grass snake', 'snake, serpent, ophidian'),
('bustard', 'aquatic bird'), ('tabby, tabby cat', 'feline, felid'),
('airliner', 'craft'),
('tobacco shop, tobacconist shop, tobacconist', 'establishment'),
('Italian greyhound', 'hound, hound dog'), ('projector', 'instrument'),
('bittern', 'aquatic bird'), ('rifle', 'instrument'),
('pay-phone, pay-station', 'electronic equipment'),
('house finch, linnet, Carpodacus mexicanus',
'passerine, passeriform bird'), ('monastery', 'building, edifice'),
('lens cap, lens cover', 'protective covering, protective cover, protect'),
('maillot, tank suit', 'garment'), ('canoe', 'craft'),
('letter opener, paper knife, paperknife', 'tool'),
('nail', 'restraint, constraint'), ('guenon, guenon monkey', 'primate'),
('CD player', 'electronic equipment'),
('safety pin', 'restraint, constraint'),
('harp', 'musical instrument, instrument'),
('disk brake, disc brake', 'restraint, constraint'),
('otterhound, otter hound', 'hound, hound dog'),
('green mamba', 'snake, serpent, ophidian'),
('violin, fiddle', 'musical instrument, instrument'),
('American coot, marsh hen, mud hen, water hen, Fulica americana',
'aquatic bird'), ('ram, tup', 'ungulate, hoofed mammal'),
('jay', 'passerine, passeriform bird'), ('trench coat', 'garment'),
('Indian cobra, Naja naja', 'snake, serpent, ophidian'),
('projectile, missile', 'instrument'), ('schooner', 'craft'),
('magpie', 'passerine, passeriform bird'), ('Norwich terrier', 'terrier'),
('cairn, cairn terrier', 'terrier'),
('crossword puzzle, crossword', 'game equipment'),
('snow leopard, ounce, Panthera uncia', 'feline, felid'),
('gong, tam-tam', 'musical instrument, instrument'),
('library', 'building, edifice'),
('swimming trunks, bathing trunks', 'garment'),
('Staffordshire bullterrier, Staffordshire bull terrier', 'terrier'),
('Lakeland terrier', 'terrier'),
('black stork, Ciconia nigra', 'aquatic bird'),
('king penguin, Aptenodytes patagonica', 'aquatic bird'),
('water ouzel, dipper', 'passerine, passeriform bird'),
('macaque', 'primate'), ('lynx, catamount', 'feline, felid'),
('ping-pong ball', 'game equipment'), ('standard schnauzer', 'terrier'),
('Australian terrier', 'terrier'), ('stupa, tope', 'building, edifice'),
('white stork, Ciconia ciconia', 'aquatic bird'),
('king snake, kingsnake', 'snake, serpent, ophidian'),
('Airedale, Airedale terrier', 'terrier'),
('banjo', 'musical instrument, instrument'), ('Windsor tie', 'garment'),
('abaya', 'garment'), ('stole', 'garment'),
('vine snake', 'snake, serpent, ophidian'),
('Bedlington terrier', 'terrier'), ('langur', 'primate'),
('catamaran', 'craft'), ('sarong', 'garment'),
('spoonbill', 'aquatic bird'),
('boa constrictor, Constrictor constrictor', 'snake, serpent, ophidian'),
('ruddy turnstone, Arenaria interpres', 'aquatic bird'),
('hognose snake, puff adder, sand viper', 'snake, serpent, ophidian'),
('American chameleon, anole, Anolis carolinensis', 'saurian'),
('rugby ball', 'game equipment'),
('black swan, Cygnus atratus', 'aquatic bird'),
('frilled lizard, Chlamydosaurus kingi', 'saurian'),
('oscilloscope, scope, cathode-ray oscilloscope, CRO',
'electronic equipment'),
('ski mask', 'protective covering, protective cover, protect'),
('marmoset', 'primate'),
('Komodo dragon, Komodo lizard, dragon lizard, giant lizard, '
'Varanus komodoensis', 'saurian'),
('accordion, piano accordion, squeeze box',
'musical instrument, instrument'),
('horned viper, cerastes, sand viper, horned asp, Cerastes cornutus',
'snake, serpent, ophidian'),
('bookshop, bookstore, bookstall', 'establishment'),
('Boston bull, Boston terrier', 'terrier'), ('crane', 'aquatic bird'),
('junco, snowbird', 'passerine, passeriform bird'),
('silky terrier, Sydney silky', 'terrier'),
('Egyptian cat', 'feline, felid'), ('Irish terrier', 'terrier'),
('leopard, Panthera pardus', 'feline, felid'),
('sea snake', 'snake, serpent, ophidian'),
('hog, pig, grunter, squealer, Sus scrofa', 'ungulate, hoofed mammal'),
('colobus, colobus monkey', 'primate'),
('chickadee', 'passerine, passeriform bird'),
('Scotch terrier, Scottish terrier, Scottie', 'terrier'),
('digital watch', 'instrument'), ('analog clock', 'instrument'),
('zebra', 'ungulate, hoofed mammal'),
('American Staffordshire terrier, Staffordshire terrier, '
'American pit bull terrier, pit bull terrier', 'terrier'),
('European gallinule, Porphyrio porphyrio', 'aquatic bird'),
('lampshade, lamp shade',
'protective covering, protective cover, protect'),
('holster', 'protective covering, protective cover, protect'),
('jaguar, panther, Panthera onca, Felis onca', 'feline, felid'),
('cleaver, meat cleaver, chopper', 'tool'),
('brambling, Fringilla montifringilla', 'passerine, passeriform bird'),
('orangutan, orang, orangutang, Pongo pygmaeus', 'primate'),
('combination lock', 'restraint, constraint'),
('tile roof', 'protective covering, protective cover, protect'),
('borzoi, Russian wolfhound', 'hound, hound dog'),
('water snake', 'snake, serpent, ophidian'),
('knot', 'restraint, constraint'),
('window shade', 'protective covering, protective cover, protect'),
('mosque', 'building, edifice'),
('Walker hound, Walker foxhound', 'hound, hound dog'),
('cardigan', 'garment'), ('warthog', 'ungulate, hoofed mammal'),
('whiptail, whiptail lizard', 'saurian'), ('plow, plough', 'tool'),
('bluetick', 'hound, hound dog'), ('poncho', 'garment'),
('shovel', 'tool'),
('sidewinder, horned rattlesnake, Crotalus cerastes',
'snake, serpent, ophidian'), ('croquet ball', 'game equipment'),
('sorrel', 'ungulate, hoofed mammal'), ('airship, dirigible', 'craft'),
('goose', 'aquatic bird'), ('church, church building',
'building, edifice'),
('titi, titi monkey', 'primate'),
('butcher shop, meat market', 'establishment'),
('diamondback, diamondback rattlesnake, Crotalus adamanteus',
'snake, serpent, ophidian'),
('common iguana, iguana, Iguana iguana', 'saurian'),
('Saluki, gazelle hound', 'hound, hound dog'),
('monitor', 'electronic equipment'),
('sunglasses, dark glasses, shades', 'instrument'),
('flamingo', 'aquatic bird'),
('seat belt, seatbelt', 'restraint, constraint'),
('Persian cat', 'feline, felid'), ('gorilla, Gorilla gorilla', 'primate'),
('banded gecko', 'saurian'),
('thatch, thatched roof',
'protective covering, protective cover, protect'),
('beagle', 'hound, hound dog'), ('limpkin, Aramus pictus', 'aquatic bird'),
('jigsaw puzzle', 'game equipment'), ('rule, ruler', 'instrument'),
('hammer', 'tool'), ('cello, violoncello',
'musical instrument, instrument'),
('lab coat, laboratory coat', 'garment'),
('indri, indris, Indri indri, Indri brevicaudatus', 'primate'),
('vault', 'protective covering, protective cover, protect'),
('cellular telephone, cellular phone, cellphone, cell, mobile phone',
'electronic equipment'), ('whippet', 'hound, hound dog'),
('siamang, Hylobates syndactylus, Symphalangus syndactylus', 'primate'),
("loupe, jeweler's loupe", 'instrument'), ('modem',
'electronic equipment'),
('lifeboat', 'craft'),
('dial telephone, dial phone', 'electronic equipment'),
('cougar, puma, catamount, mountain lion, painter, panther, '
'Felis concolor', 'feline, felid'),
('thimble', 'protective covering, protective cover, protect'),
('ibex, Capra ibex', 'ungulate, hoofed mammal'),
('lawn mower, mower', 'tool'),
('bell cote, bell cot', 'protective covering, protective cover, protect'),
('chain mail, ring mail, mail, chain armor, chain armour, ring armor, '
'ring armour', 'protective covering, protective cover, protect'),
('hair slide', 'restraint, constraint'),
('apiary, bee house', 'building, edifice'),
('harmonica, mouth organ, harp, mouth harp',
'musical instrument, instrument'),
('green snake, grass snake', 'snake, serpent, ophidian'),
('howler monkey, howler', 'primate'), ('digital clock', 'instrument'),
('restaurant, eating house, eating place, eatery', 'building, edifice'),
('miniature schnauzer', 'terrier'),
('panpipe, pandean pipe, syrinx', 'musical instrument, instrument'),
('pirate, pirate ship', 'craft'),
('window screen', 'protective covering, protective cover, protect'),
('binoculars, field glasses, opera glasses', 'instrument'),
('Afghan hound, Afghan', 'hound, hound dog'),
('cinema, movie theater, movie theatre, movie house, picture palace',
'building, edifice'), ('liner, ocean liner', 'craft'),
('ringneck snake, ring-necked snake, ring snake',
'snake, serpent, ophidian'), ('redshank, Tringa totanus', 'aquatic bird'),
('Siamese cat, Siamese', 'feline, felid'),
('thunder snake, worm snake, Carphophis amoenus',
'snake, serpent, ophidian'), ('boathouse', 'building, edifice'),
('jersey, T-shirt, tee shirt', 'garment'),
('soft-coated wheaten terrier', 'terrier'),
('scabbard', 'protective covering, protective cover, protect'),
('muzzle', 'restraint, constraint'),
('Ibizan hound, Ibizan Podenco', 'hound, hound dog'),
('tennis ball', 'game equipment'), ('padlock', 'restraint, constraint'),
('kimono', 'garment'), ('redbone', 'hound, hound dog'),
('wild boar, boar, Sus scrofa', 'ungulate, hoofed mammal'),
('dowitcher', 'aquatic bird'),
('oboe, hautboy, hautbois', 'musical instrument, instrument'),
('electric guitar', 'musical instrument, instrument'), ('trimaran',
'craft'),
('barometer', 'instrument'), ('llama', 'ungulate, hoofed mammal'),
('robin, American robin, Turdus migratorius',
'passerine, passeriform bird'),
('maraca', 'musical instrument, instrument'),
('feather boa, boa', 'garment'),
('<NAME>, <NAME> terrier', 'terrier'),
('Lhasa, Lhasa apso', 'terrier'), ('bow', 'instrument'),
('punching bag, punch bag, punching ball, punchball', 'game equipment'),
('volleyball', 'game equipment'), ('Norfolk terrier', 'terrier'),
('Gila monster, Heloderma suspectum', 'saurian'),
('fire screen, fireguard',
'protective covering, protective cover, protect'),
('hourglass', 'instrument'),
('chimpanzee, chimp, Pan troglodytes', 'primate'),
('birdhouse', 'protective covering, protective cover, protect'),
('Sealyham terrier, Sealyham', 'terrier'),
('Tibetan terrier, chrysanthemum dog', 'terrier'),
('palace', 'building, edifice'), ('wreck', 'craft'),
('overskirt', 'garment'), ('pelican', 'aquatic bird'),
('French horn, horn', 'musical instrument, instrument'),
('tiger cat', 'feline, felid'), ('barbershop', 'establishment'),
('revolver, six-gun, six-shooter', 'instrument'),
('Irish wolfhound', 'hound, hound dog'),
('lion, king of beasts, Panthera leo', 'feline, felid'),
('fur coat', 'garment'), ('ox', 'ungulate, hoofed mammal'),
('cuirass', 'protective covering, protective cover, protect'),
('grocery store, grocery, food market, market', 'establishment'),
('hoopskirt, crinoline', 'garment'),
('spider monkey, Ateles geoffroyi', 'primate'),
('tiger, Panthera tigris', 'feline, felid'),
('bloodhound, sleuthhound', 'hound, hound dog'),
('red-backed sandpiper, dunlin, Erolia alpina', 'aquatic bird'),
('drum, membranophone, tympan', 'musical instrument, instrument'),
('radio telescope, radio reflector', 'instrument'),
('West Highland white terrier', 'terrier'),
('bow tie, bow-tie, bowtie', 'garment'), ('golf ball', 'game equipment'),
('barn', 'building, edifice'),
('binder, ring-binder', 'protective covering, protective cover, protect'),
('English foxhound', 'hound, hound dog'),
('bison', 'ungulate, hoofed mammal'), ('screw', 'restraint, constraint'),
('assault rifle, assault gun', 'instrument'),
('diaper, nappy, napkin', 'garment'),
('bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, '
'Rocky Mountain sheep, Ovis canadensis', 'ungulate, hoofed mammal'),
('Weimaraner', 'hound, hound dog'),
('computer keyboard, keypad', 'electronic equipment'),
('black-and-tan coonhound', 'hound, hound dog'),
('little blue heron, Egretta caerulea', 'aquatic bird'),
('breastplate, aegis, egis',
'protective covering, protective cover, protect'),
('gasmask, respirator, gas helmet',
'protective covering, protective cover, protect'),
('aircraft carrier, carrier, flattop, attack aircraft carrier', 'craft'),
('iPod', 'electronic equipment'),
('organ, pipe organ', 'musical instrument, instrument'),
('wall clock', 'instrument'),
('rock python, rock snake, Python sebae', 'snake, serpent, ophidian'),
('squirrel monkey, Saimiri sciureus', 'primate'),
('bikini, two-piece', 'garment'),
('water buffalo, water ox, Asiatic buffalo, Bubalus bubalis',
'ungulate, hoofed mammal'),
('upright, upright piano', 'musical instrument, instrument'),
('chime, bell, gong', 'musical instrument, instrument'),
('confectionery, confectionary, candy store', 'establishment'),
('indigo bunting, indigo finch, indigo bird, Passerina cyanea',
'passerine, passeriform bird'),
('green lizard, Lacerta viridis', 'saurian'),
('Norwegian elkhound, elkhound', 'hound, hound dog'),
('dome', 'protective covering, protective cover, protect'),
('buckle', 'restraint, constraint'), ('giant schnauzer', 'terrier'),
('jean, blue jean, denim', 'garment'),
('wire-haired fox terrier', 'terrier'),
('African chameleon, Chamaeleo chamaeleon', 'saurian'),
('trombone', 'musical instrument, instrument'),
('oystercatcher, oyster catcher', 'aquatic bird'), ('sweatshirt',
'garment'),
('American egret, great white heron, Egretta albus', 'aquatic bird'),
('marimba, xylophone', 'musical instrument, instrument'),
('gazelle', 'ungulate, hoofed mammal'),
('red-breasted merganser, Mergus serrator', 'aquatic bird'),
('tape player', 'electronic equipment'), ('speedboat', 'craft'),
('gondola', 'craft'),
('night snake, Hypsiglena torquata', 'snake, serpent, ophidian'),
('cannon', 'instrument'), ("plunger, plumber's helper", 'tool'),
('balloon', 'craft'), ('toyshop', 'establishment'), ('agama', 'saurian'),
('fireboat', 'craft'), ('bakery, bakeshop, bakehouse', 'establishment')
]
VAL_CLASSES = [
('cab, hack, taxi, taxicab', 'motor vehicle, automotive vehicle'),
('jeep, landrover', 'motor vehicle, automotive vehicle'),
('English setter', 'sporting dog, gun dog'),
('flat-coated retriever', 'sporting dog, gun dog'),
('bassinet', 'furnishing'),
('sports car, sport car', 'motor vehicle, automotive vehicle'),
('golfcart, golf cart', 'motor vehicle, automotive vehicle'),
('clumber, clumber spaniel', 'sporting dog, gun dog'),
('puck, hockey puck', 'mechanism'), ('reel', 'mechanism'),
('Welsh springer spaniel', 'sporting dog, gun dog'),
('car wheel', 'mechanism'), ('wardrobe, closet, press', 'furnishing'),
('go-kart', 'motor vehicle, automotive vehicle'),
('switch, electric switch, electrical switch', 'mechanism'),
('crib, cot', 'furnishing'), ('laptop, laptop computer', 'machine'),
('thresher, thrasher, threshing machine', 'machine'),
('web site, website, internet site, site', 'machine'),
('English springer, English springer spaniel', 'sporting dog, gun dog'),
('iron, smoothing iron', 'durables, durable goods, consumer durables'),
('<NAME>', 'sporting dog, gun dog'),
('Labrador retriever', 'sporting dog, gun dog'),
('<NAME>', 'sporting dog, gun dog'),
('amphibian, amphibious vehicle', 'motor vehicle, automotive vehicle'),
('file, file cabinet, filing cabinet', 'furnishing'),
('harvester, reaper', 'machine'),
('convertible', 'motor vehicle, automotive vehicle'),
('paddlewheel, paddle wheel', 'mechanism'),
('microwave, microwave oven',
'durables, durable goods, consumer durables'), ('swing', 'mechanism'),
('chiffonier, commode', 'furnishing'), ('desktop computer', 'machine'),
('gas pump, gasoline pump, petrol pump, island dispenser', 'mechanism'),
('beach wagon, station wagon, wagon, estate car, beach waggon, station '
'waggon, waggon', 'motor vehicle, automotive vehicle'),
('carousel, carrousel, merry-go-round, roundabout, whirligig',
'mechanism'), ("potter's wheel", 'mechanism'),
('folding chair', 'furnishing'),
('fire engine, fire truck', 'motor vehicle, automotive vehicle'),
('slide rule, slipstick', 'machine'),
('vizsla, Hungarian pointer', 'sporting dog, gun dog'),
('waffle iron', 'durables, durable goods, consumer durables'),
('trailer truck, tractor trailer, trucking rig, rig, articulated lorry, '
'semi', 'motor vehicle, automotive vehicle'),
('toilet seat', 'furnishing'),
('medicine chest, medicine cabinet', 'furnishing'),
('<NAME>', 'sporting dog, gun dog'),
('Chesapeake Bay retriever', 'sporting dog, gun dog'),
('cash machine, cash dispenser, automated teller machine, automatic '
'teller machine, automated teller, automatic teller, ATM', 'machine'),
('moped', 'motor vehicle, automotive vehicle'),
('Model T', 'motor vehicle, automotive vehicle'),
('bookcase', 'furnishing'),
('ambulance', 'motor vehicle, automotive vehicle'),
('German short-haired pointer', 'sporting dog, gun dog'),
('dining table, board', 'furnishing'),
('minivan', 'motor vehicle, automotive vehicle'),
('police van, police wagon, paddy wagon, patrol wagon, wagon, '
'black Maria', 'motor vehicle, automotive vehicle'),
('entertainment center', 'furnishing'), ('throne', 'furnishing'),
('desk', 'furnishing'), ('notebook, notebook computer', 'machine'),
('snowplow, snowplough', 'motor vehicle, automotive vehicle'),
('cradle', 'furnishing'), ('abacus', 'machine'),
('hand-held computer, hand-held microcomputer', 'machine'),
('Dutch oven', 'durables, durable goods, consumer durables'),
('toaster', 'durables, durable goods, consumer durables'),
('barber chair', 'furnishing'), ('vending machine', 'machine'),
('four-poster', 'furnishing'),
('rotisserie', 'durables, durable goods, consumer durables'),
('hook, claw', 'mechanism'),
('vacuum, vacuum cleaner', 'durables, durable goods, consumer durables'),
('pickup, pickup truck', 'motor vehicle, automotive vehicle'),
('table lamp', 'furnishing'), ('rocking chair, rocker', 'furnishing'),
('prayer rug, prayer mat', 'furnishing'),
('moving van', 'motor vehicle, automotive vehicle'),
('studio couch, day bed', 'furnishing'),
('racer, race car, racing car', 'motor vehicle, automotive vehicle'),
('park bench', 'furnishing'),
('Irish setter, red setter', 'sporting dog, gun dog'),
('refrigerator, icebox', 'durables, durable goods, consumer durables'),
('china cabinet, china closet', 'furnishing'),
('cocker spaniel, English cocker spaniel, cocker',
'sporting dog, gun dog'), ('radiator', 'mechanism'),
('Sussex spaniel', 'sporting dog, gun dog'),
('hand blower, blow dryer, blow drier, hair dryer, hair drier',
'durables, durable goods, consumer durables'),
('slot, one-armed bandit', 'machine'),
('golden retriever', 'sporting dog, gun dog'),
('curly-coated retriever', 'sporting dog, gun dog'),
('limousine, limo', 'motor vehicle, automotive vehicle'),
('washer, automatic washer, washing machine',
'durables, durable goods, consumer durables'),
('garbage truck, dustcart', 'motor vehicle, automotive vehicle'),
('dishwasher, dish washer, dishwashing machine',
'durables, durable goods, consumer durables'), ('pinwheel', 'mechanism'),
('espresso maker', 'durables, durable goods, consumer durables'),
('tow truck, tow car, wrecker', 'motor vehicle, automotive vehicle')
]
TEST_CLASSES = [
('Siberian husky', 'working dog'), ('dung beetle', 'insect'),
('jackfruit, jak, jack', 'solid'), ('miniature pinscher', 'working dog'),
('tiger shark, Galeocerdo cuvieri', 'aquatic vertebrate'),
('weevil', 'insect'),
('goldfish, Carassius auratus', 'aquatic vertebrate'),
('schipperke', 'working dog'), ('Tibetan mastiff', 'working dog'),
('orange', 'solid'), ('whiskey jug', 'vessel'),
('hammerhead, hammerhead shark', 'aquatic vertebrate'),
('bull mastiff', 'working dog'), ('eggnog', 'substance'),
('bee', 'insect'), ('tench, Tinca tinca', 'aquatic vertebrate'),
('chocolate sauce, chocolate syrup', 'substance'),
("dragonfly, darning needle, devil's darning needle, sewing needle, "
'snake feeder, snake doctor, mosquito hawk, skeeter hawk', 'insect'),
('zucchini, courgette', 'solid'), ('kelpie', 'working dog'),
('stone wall', 'obstruction, obstructor, obstructer, impedimen'),
('butternut squash', 'solid'), ('mushroom', 'solid'),
('Old English sheepdog, bobtail', 'working dog'),
('dam, dike, dyke', 'obstruction, obstructor, obstructer, impedimen'),
('picket fence, paling', 'obstruction, obstructor, obstructer, impedimen'),
('espresso', 'substance'), ('beer bottle', 'vessel'),
('plate', 'substance'), ('dough', 'substance'),
('sandbar, sand bar', 'geological formation, formation'),
('boxer', 'working dog'), ('bathtub, bathing tub, bath, tub', 'vessel'),
('beaker', 'vessel'), ('bucket, pail', 'vessel'),
('Border collie', 'working dog'), ('sturgeon', 'aquatic vertebrate'),
('worm fence, snake fence, snake-rail fence, Virginia fence',
'obstruction, obstructor, obstructer, impedimen'),
('seashore, coast, seacoast, sea-coast',
'geological formation, formation'),
('long-horned beetle, longicorn, longicorn beetle', 'insect'),
('turnstile', 'obstruction, obstructor, obstructer, impedimen'),
('groenendael', 'working dog'), ('vase', 'vessel'), ('teapot', 'vessel'),
('water tower', 'vessel'), ('strawberry', 'solid'), ('burrito',
'substance'),
('cauliflower', 'solid'), ('volcano', 'geological formation, formation'),
('valley, vale', 'geological formation, formation'),
('head cabbage', 'solid'), ('tub, vat', 'vessel'),
('lacewing, lacewing fly', 'insect'),
('coral reef', 'geological formation, formation'),
('hot pot, hotpot', 'substance'), ('custard apple', 'solid'),
('monarch, monarch butterfly, milkweed butterfly, Danaus plexippus',
'insect'), ('cricket', 'insect'), ('pill bottle', 'vessel'),
('walking stick, walkingstick, stick insect', 'insect'),
('promontory, headland, head, foreland',
'geological formation, formation'), ('malinois', 'working dog'),
('pizza, pizza pie', 'substance'),
('malamute, malemute, Alaskan malamute', 'working dog'),
('kuvasz', 'working dog'), ('trifle', 'substance'), ('fig', 'solid'),
('komondor', 'working dog'), ('ant, emmet, pismire', 'insect'),
('electric ray, crampfish, numbfish, torpedo', 'aquatic vertebrate'),
('<NAME>', 'solid'), ('cockroach, roach', 'insect'),
('stingray', 'aquatic vertebrate'), ('red wine', 'substance'),
('<NAME>, <NAME>', 'working dog'),
('ice lolly, lolly, lollipop, popsicle', 'substance'),
('bell pepper', 'solid'), ('cup', 'substance'), ('pomegranate', 'solid'),
('Appenzeller', 'working dog'), ('hay', 'substance'),
('EntleBucher', 'working dog'),
('sulphur butterfly, sulfur butterfly', 'insect'),
('mantis, mantid', 'insect'), ('Bernese mountain dog', 'working dog'),
('banana', 'solid'), ('water jug', 'vessel'), ('cicada, cicala', 'insect'),
('barracouta, snoek', 'aquatic vertebrate'),
('washbasin, handbasin, washbowl, lavabo, wash-hand basin', 'vessel'),
('wine bottle', 'vessel'), ('Rottweiler', 'working dog'),
('briard', 'working dog'),
('puffer, pufferfish, blowfish, globefish', 'aquatic vertebrate'),
('ground beetle, carabid beetle', 'insect'),
('Bouvier des Flandres, Bouviers des Flandres', 'working dog'),
('chainlink fence', 'obstruction, obstructor, obstructer, impedimen'),
('damselfly', 'insect'), ('grasshopper, hopper', 'insect'),
('carbonara', 'substance'),
('German shepherd, German shepherd dog, German police dog, alsatian',
'working dog'), ('guacamole', 'substance'),
('leaf beetle, chrysomelid', 'insect'), ('caldron, cauldron', 'vessel'),
('fly', 'insect'),
('bannister, banister, balustrade, balusters, handrail',
'obstruction, obstructor, obstructer, impedimen'),
('spaghetti squash', 'solid'), ('coffee mug', 'vessel'),
('gar, garfish, garpike, billfish, Lepisosteus osseus',
'aquatic vertebrate'), ('barrel, cask', 'vessel'),
('eel', 'aquatic vertebrate'), ('rain barrel', 'vessel'),
('coho, cohoe, coho salmon, blue jack, silver salmon, '
'Oncorhynchus kisutch', 'aquatic vertebrate'), ('water bottle', 'vessel'),
('menu', 'substance'), ('tiger beetle', 'insect'),
('Great Dane', 'working dog'),
('rock beauty, Holocanthus tricolor', 'aquatic vertebrate'),
('anemone fish', 'aquatic vertebrate'), ('mortar', 'vessel'),
('Eskimo dog, husky', 'working dog'),
('affenpinscher, monkey pinscher, monkey dog', 'working dog'),
('breakwater, groin, groyne, mole, bulwark, seawall, jetty',
'obstruction, obstructor, obstructer, impedimen'),
('artichoke, globe artichoke', 'solid'), ('broccoli', 'solid'),
('French bulldog', 'working dog'), ('coffeepot', 'vessel'),
('cliff, drop, drop-off', 'geological formation, formation'),
('ladle', 'vessel'),
('sliding door', 'obstruction, obstructor, obstructer, impedimen'),
('leafhopper', 'insect'), ('collie', 'working dog'),
('Doberman, <NAME>', 'working dog'), ('pitcher, ewer',
'vessel'),
('admiral', 'insect'), ('cabbage butterfly', 'insect'),
('geyser', 'geological formation, formation'), ('cheeseburger',
'substance'),
('grille, radiator grille',
'obstruction, obstructor, obstructer, impedimen'),
('ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle', 'insect'),
('great white shark, white shark, man-eater, man-eating shark, '
'Carcharodon carcharias', 'aquatic vertebrate'),
('pineapple, ananas', 'solid'), ('cardoon', 'solid'),
('pop bottle, soda bottle', 'vessel'), ('lionfish', 'aquatic vertebrate'),
('cucumber, cuke', 'solid'), ('face powder', 'substance'),
('Shetland sheepdog, Shetland sheep dog, Shetland', 'working dog'),
('ringlet, ringlet butterfly', 'insect'),
('Greater Swiss Mountain dog', 'working dog'),
('alp', 'geological formation, formation'), ('consomme', 'substance'),
('potpie', 'substance'), ('acorn squash', 'solid'),
('ice cream, icecream', 'substance'),
('lakeside, lakeshore', 'geological formation, formation'),
('hotdog, hot dog, red hot', 'substance'), ('rhinoceros beetle', 'insect'),
('lycaenid, lycaenid butterfly', 'insect'), ('lemon', 'solid')
]
@DATASETS.register_module()
class TieredImageNetDataset(BaseFewShotDataset):
"""TieredImageNet dataset for few shot classification.
Args:
subset (str| list[str]): The classes of whole dataset are split into
three disjoint subset: train, val and test. If subset is a string,
only one subset data will be loaded. If subset is a list of
string, then all data of subset in list will be loaded.
Options: ['train', 'val', 'test']. Default: 'train'.
"""
resource = 'https://github.com/renmengye/few-shot-ssl-public'
TRAIN_CLASSES = TRAIN_CLASSES
VAL_CLASSES = VAL_CLASSES
TEST_CLASSES = TEST_CLASSES
def __init__(self,
subset: Literal['train', 'test', 'val'] = 'train',
*args,
**kwargs):
if isinstance(subset, str):
subset = [subset]
for subset_ in subset:
assert subset_ in ['train', 'test', 'val']
self.subset = subset
self.GENERAL_CLASSES = self.get_general_classes()
super().__init__(*args, **kwargs)
def get_classes(
self,
classes: Optional[Union[Sequence[str],
str]] = None) -> Sequence[str]:
"""Get class names of current dataset.
Args:
classes (Sequence[str] | str | None): Three types of input
will correspond to different processing logics:
- If `classes` is a tuple or list, it will override the
CLASSES predefined in the dataset.
- If `classes` is None, we directly use pre-defined CLASSES
will be used by the dataset.
- If `classes` is a string, it is the path of a classes file
that contains the name of all classes. Each line of the file
contains a single class name.
Returns:
tuple[str] or list[str]: Names of categories of the dataset.
"""
if classes is None:
class_names = []
for subset_ in self.subset:
if subset_ == 'train':
class_names += [i[0] for i in self.TRAIN_CLASSES]
elif subset_ == 'val':
class_names += [i[0] for i in self.VAL_CLASSES]
elif subset_ == 'test':
class_names += [i[0] for i in self.TEST_CLASSES]
else:
raise ValueError(f'invalid subset {subset_} only '
f'support train, val or test.')
elif isinstance(classes, str):
# take it as a file path
class_names = mmcv.list_from_file(classes)
elif isinstance(classes, (tuple, list)):
class_names = classes
else:
raise ValueError(f'Unsupported type {type(classes)} of classes.')
return class_names
def get_general_classes(self) -> List[str]:
"""Get general classes of each classes."""
general_classes = []
for subset_ in self.subset:
if subset_ == 'train':
general_classes += [i[1] for i in self.TRAIN_CLASSES]
elif subset_ == 'val':
general_classes += [i[1] for i in self.VAL_CLASSES]
elif subset_ == 'test':
general_classes += [i[1] for i in self.TEST_CLASSES]
else:
raise ValueError(f'invalid subset {subset_} only '
f'support train, val or test.')
return general_classes
def load_annotations(self) -> List[Dict]:
"""Load annotation according to the classes subset."""
data_infos = []
for subset_ in self.subset:
labels_file = osp.join(self.data_prefix, f'{subset_}_labels.pkl')
img_bytes_file = osp.join(self.data_prefix,
f'{subset_}_images_png.pkl')
assert osp.exists(img_bytes_file) and osp.exists(labels_file), \
f'Please download ann_file through {self.resource}.'
data_infos = []
with open(labels_file, 'rb') as labels, \
open(img_bytes_file, 'rb') as img_bytes:
labels = pickle.load(labels)
img_bytes = pickle.load(img_bytes)
label_specific = labels['label_specific']
label_general = labels['label_general']
class_specific = labels['label_specific_str']
class_general = labels['label_general_str']
unzip_file_path = osp.join(self.data_prefix, subset_)
is_unzip_file = osp.exists(unzip_file_path)
if not is_unzip_file:
msg = ('Please use the provided script '
'tools/classification/data/unzip_tiered_imagenet.py'
'to unzip pickle file. Otherwise the whole pickle '
'file may cost heavy memory usage when the model '
'is trained with distributed parallel.')
warnings.warn(msg)
for i in range(len(img_bytes)):
class_specific_name = class_specific[label_specific[i]]
class_general_name = class_general[label_general[i]]
gt_label = self.class_to_idx[class_specific_name]
assert class_general_name == self.GENERAL_CLASSES[gt_label]
filename = osp.join(subset_, f'{subset_}_image_{i}.byte')
info = {
'img_prefix': self.data_prefix,
'img_info': {
'filename': filename
},
'gt_label': np.array(gt_label, dtype=np.int64),
}
# if the whole pickle file isn't unzipped,
# image bytes of will be put into data_info
if not is_unzip_file:
info['img_bytes'] = img_bytes[i]
data_infos.append(info)
return data_infos
|
petridish/utils/sessinit.py
|
Bhaskers-Blu-Org2/petridishnn
| 121 |
51932
|
<reponame>Bhaskers-Blu-Org2/petridishnn
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import numpy as np
import tensorflow as tf
import six
from tensorpack.utils import logger
from tensorpack.tfutils.common import (
get_op_tensor_name, get_global_step_var)
from tensorpack.tfutils.varmanip import SessionUpdate
from tensorpack.tfutils.sessinit import (
SessionInit, SaverRestore, CheckpointReaderAdapter)
__all__ = ['SaverRestoreSizeRelaxed', 'read_parameter_val']
class SaverRestoreSizeRelaxed(SaverRestore):
""" Same as :class:`SaverRestore`, but has more relaxed constraints.
It allows loading variable of difference sizes, but of the same number of dimensions.
The lower of value of each dim is the chosen dimension value.
The first chunk of the each dim of the value is loaded into the variable.
"""
def _run_init(self, sess):
logger.info(
"Restoring checkpoint with size relaxation from {} ...".format(self.path))
def f(reader, name, v):
val = reader.get_tensor(name)
val_shape = list(val.shape)
var_shape = v.get_shape().as_list()
if var_shape != val_shape:
n_dims = len(val_shape)
assert len(var_shape) == n_dims, \
"Size Relaxation requires the variable match in number of dimensions"
slices = []
pad_params = []
logger.info(
"Loading variable {} with var_shape {} and val_shape {}".format(
name, var_shape, val_shape))
for var_s, val_s in zip(var_shape, val_shape):
if var_s > val_s:
pad_params.append([0, var_s - val_s])
else:
pad_params.append([0, 0])
slices.append(slice(0, var_s))
val = np.pad(val, pad_params, 'constant')[slices]
SessionUpdate.load_value_to_var(v, val)
with sess.as_default():
self._match_vars(f)
class AssignGlobalStep(SessionInit):
def __init__(self, global_step_val):
self.global_step_val = global_step_val
self.assign_op = None
def _setup_graph(self):
global_step = get_global_step_var()
self.assign_op = global_step.assign(self.global_step_val)
def _run_init(self, sess):
sess.run(self.assign_op)
def read_parameter_val(model_dir, l_names):
model_path = tf.train.latest_checkpoint(model_dir)
reader = tf.train.NewCheckpointReader(model_path)
reader = CheckpointReaderAdapter(reader) # use an adapter to standardize the name
return [ reader.get_tensor(var_name) for var_name in l_names ]
|
pyston/tools/test_optimize.py
|
mananpal1997/pyston
| 2,441 |
51943
|
<filename>pyston/tools/test_optimize.py
import ctypes
import os
import subprocess
import sys
if __name__ == "__main__":
filename = os.path.abspath(sys.argv[1])
funcnames = sys.argv[2:]
if not funcnames:
print("Usage: python test_optimize.py FILENAME FUNCNAME+")
sys.exit(1)
os.chdir(os.path.join(os.path.dirname(__file__), ".."))
if filename.endswith(".c") or filename.endswith(".cpp"):
new_fn = filename.rsplit(".c", 1)[0] + ".ll"
if not os.path.exists(new_fn) or os.stat(new_fn).st_mtime < os.stat(filename).st_mtime:
args = ["build/Release/llvm/bin/clang-10", "-g", "-O3", "-Ibuild/cpython_bc_install/include/python3.8", "-DNDEBUG", "-Wall", "-c", "-emit-llvm", "-S", filename]
print(' '.join(args))
subprocess.check_call(args)
filename = new_fn
nitrous_so = ctypes.PyDLL("libinterp.so")
loadBitcode = nitrous_so.loadBitcode
loadBitcode.argtypes = [ctypes.c_char_p]
link_fn = filename + ".link.bc"
if not os.path.exists(link_fn) or os.stat(link_fn).st_mtime < os.stat(filename).st_mtime:
args = ["build/Release/llvm/bin/llvm-link", "aot/all.bc", filename, "-o", link_fn]
print(" ".join(args))
subprocess.check_call(args)
loadBitcode(link_fn.encode("ascii"))
initializeJIT = nitrous_so.initializeJIT
initializeJIT.argtypes = [ctypes.c_long]
initializeJIT(3)
pystol_so = ctypes.PyDLL("libpystol.so")
pystol_so.pystolGlobalPythonSetup()
optimize = nitrous_so["optimizeBitcode"]
optimize.argtypes = [ctypes.c_char_p]
for funcname in funcnames:
optimize(funcname.encode("ascii"))
|
test/test-356-getei.py
|
Cam2337/snap-python
| 242 |
51956
|
import snap
Graph = snap.GenFull(snap.PNEANet, 10)
Src = 1
Dst = 2
EI = Graph.GetEI(Src,Dst)
EId = EI.GetId()
print(EId, Graph.GetEI(Src,Dst).GetId())
print(Graph.GetEI(Src,Dst).GetSrcNId(), Graph.GetEI(Src,Dst).GetDstNId())
print(Graph.GetEI(EId).GetSrcNId(), Graph.GetEI(EId).GetDstNId())
if EId != Graph.GetEI(Src,Dst).GetId():
print("*** error1")
if Graph.GetEI(Src,Dst).GetSrcNId() != Graph.GetEI(EId).GetSrcNId():
print("*** error2")
if Graph.GetEI(Src,Dst).GetDstNId() != Graph.GetEI(EId).GetDstNId():
print("*** error3")
|
endgame/exposure_via_resource_policies/common.py
|
vikrum/endgame
| 224 |
51998
|
from abc import ABCMeta, abstractmethod
import json
import logging
import copy
import boto3
import botocore
from botocore.exceptions import ClientError
from endgame.shared.response_message import ResponseMessage
from endgame.shared.list_resources_response import ListResourcesResponse
from endgame.shared.response_message import ResponseGetRbp
logger = logging.getLogger(__name__)
class ResourceType(object):
__meta_class__ = ABCMeta
def __init__(
self,
name: str,
resource_type: str,
service: str,
region: str,
client: boto3.Session.client,
current_account_id: str,
override_action: str = None,
include_resource_block: bool = True,
override_resource_block: str = None,
override_account_id_instead_of_principal: bool = False
):
self.name = name
self.resource_type = resource_type
self.client = client
self.current_account_id = current_account_id
self.service = service
self.region = region
self.include_resource_block = include_resource_block # Override for IAM
self.override_action = override_action # Override for IAM
self.override_resource_block = override_resource_block # Override for EFS
self.override_account_id_instead_of_principal = override_account_id_instead_of_principal # Override for logs, sns, sqs, and lambda
self.policy_document = self._get_rbp().policy_document
# Store an original copy of the policy so we can compare it later.
self.original_policy = copy.deepcopy(json.loads(json.dumps(self.policy_document.original_policy)))
def __str__(self):
return '%s' % (json.dumps(json.loads(self.policy_document.__str__())))
@abstractmethod
def _get_rbp(self) -> ResponseGetRbp:
raise NotImplementedError("Must override _get_rbp")
@property
@abstractmethod
def arn(self) -> str:
raise NotImplementedError("Must override arn")
@abstractmethod
def set_rbp(self, evil_policy: dict) -> ResponseMessage:
raise NotImplementedError("Must override set_rbp")
def add_myself(self, evil_principal: str, dry_run: bool = False) -> ResponseMessage:
"""Add your rogue principal to the AWS resource"""
logger.debug(f"Adding {evil_principal} to {self.arn}")
evil_policy = self.policy_document.policy_plus_evil_principal(
victim_account_id=self.current_account_id,
evil_principal=evil_principal,
resource_arn=self.arn
)
if not dry_run:
set_rbp_response = self.set_rbp(evil_policy=evil_policy)
operation = "ADD_MYSELF"
message = set_rbp_response.message
success = set_rbp_response.success
else:
# new_policy = evil_policy
operation = "DRY_RUN_ADD_MYSELF"
message = "DRY_RUN_ADD_MYSELF"
try:
tmp = self._get_rbp()
success = tmp.success
except botocore.exceptions.ClientError as error:
message = str(error)
success = False
response_message = ResponseMessage(message=message, operation=operation, success=success,
evil_principal=evil_principal, victim_resource_arn=self.arn,
original_policy=self.original_policy, updated_policy=evil_policy,
resource_type=self.resource_type, resource_name=self.name,
service=self.service)
return response_message
def undo(self, evil_principal: str, dry_run: bool = False) -> ResponseMessage:
"""Remove all traces"""
logger.debug(f"Removing {evil_principal} from {self.arn}")
policy_stripped = self.policy_document.policy_minus_evil_principal(
victim_account_id=self.current_account_id,
evil_principal=evil_principal,
resource_arn=self.arn
)
if not dry_run:
operation = "UNDO"
set_rbp_response = self.set_rbp(evil_policy=policy_stripped)
message = set_rbp_response.message
success = set_rbp_response.success
else:
operation = "DRY_RUN_UNDO"
message = "DRY_RUN_UNDO"
success = True
response_message = ResponseMessage(message=message, operation=operation, success=success,
evil_principal=evil_principal, victim_resource_arn=self.arn,
original_policy=self.original_policy, updated_policy=policy_stripped,
resource_type=self.resource_type, resource_name=self.name,
service=self.service)
return response_message
class ResourceTypes(object):
__meta_class__ = ABCMeta
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
self.client = client
self.current_account_id = current_account_id
self.region = region
def __str__(self):
return '%s' % (json.dumps(self.resources.arn))
@property
@abstractmethod
def resources(self) -> [ListResourcesResponse]:
raise NotImplementedError("Must override property 'resources'")
|
simdeblur/utils/registry.py
|
ljzycmd/SimDeblur
| 190 |
52044
|
<reponame>ljzycmd/SimDeblur
# Registry Class
# CMD
# Refer this in Detectron2
class Registry:
def __init__(self, name):
self._name = name
self._obj_map = {}
def _do_register(self, name, obj):
assert (name not in self._obj_map), "The object named: {} was already registered in {} registry! ".format(name, self._name)
self._obj_map[name] = obj
def register(self, obj=None):
"""
Register the given object under the name obj.__name__.
Can be used as either a decorator or not.
"""
if obj is None:
# used as a decorator
def deco(func_or_class):
name = func_or_class.__name__
self._do_register(name, func_or_class)
return func_or_class
return deco
name = obj.__name__
self._do_register(name, obj)
def get(self, name):
ret = self._obj_map.get(name)
if ret is None:
raise KeyError("No object names {} found in {} registry!".format(name, self._name))
return ret
def __getitem__(self, name):
return self.get(name)
def keys(self):
return self._obj_map.keys()
|
maskrcnn_benchmark/modeling/backbone/pan.py
|
Yuliang-Liu/bezier_curve_text_spotting
| 423 |
52050
|
<reponame>Yuliang-Liu/bezier_curve_text_spotting
import torch.nn as nn
import torch.nn.functional as F
class FPA(nn.Module):
def __init__(self, channels=2048):
"""
Feature Pyramid Attention
:type channels: int
"""
super(FPA, self).__init__()
channels_mid = int(channels / 4)
self.channels_cond = channels
# Master branch
self.conv_master = nn.Conv2d(self.channels_cond, channels, kernel_size=1, bias=False)
self.bn_master = nn.BatchNorm2d(channels)
# Global pooling branch
self.conv_gpb = nn.Conv2d(self.channels_cond, channels, kernel_size=1, bias=False)
#self.bn_gpb = nn.BatchNorm2d(channels)
# C333 because of the shape of last feature maps is (16, 16).
self.conv7x7_1 = nn.Conv2d(self.channels_cond, channels_mid, kernel_size=(7, 7), stride=2, padding=3, bias=False)
self.bn1_1 = nn.BatchNorm2d(channels_mid)
self.conv5x5_1 = nn.Conv2d(channels_mid, channels_mid, kernel_size=(5, 5), stride=2, padding=2, bias=False)
self.bn2_1 = nn.BatchNorm2d(channels_mid)
self.conv3x3_1 = nn.Conv2d(channels_mid, channels_mid, kernel_size=(3, 3), stride=2, padding=1, bias=False)
self.bn3_1 = nn.BatchNorm2d(channels_mid)
self.conv7x7_2 = nn.Conv2d(channels_mid, channels_mid, kernel_size=(7, 7), stride=1, padding=3, bias=False)
self.bn1_2 = nn.BatchNorm2d(channels_mid)
self.conv5x5_2 = nn.Conv2d(channels_mid, channels_mid, kernel_size=(5, 5), stride=1, padding=2, bias=False)
self.bn2_2 = nn.BatchNorm2d(channels_mid)
self.conv3x3_2 = nn.Conv2d(channels_mid, channels_mid, kernel_size=(3, 3), stride=1, padding=1, bias=False)
self.bn3_2 = nn.BatchNorm2d(channels_mid)
self.bn_upsample_1 = nn.BatchNorm2d(channels)
self.conv1x1_up1 = nn.Conv2d(channels_mid, channels, kernel_size=(1, 1), stride=1, padding=0, bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
"""
:param x: Shape: [b, 2048, h, w]
:return: out: Feature maps. Shape: [b, 2048, h, w]
"""
# Master branch
x_master = self.conv_master(x)
x_master = self.bn_master(x_master)
# Global pooling branch
x_gpb = nn.AvgPool2d(x.shape[2:])(x).view(x.shape[0], self.channels_cond, 1, 1)
x_gpb = self.conv_gpb(x_gpb)
#x_gpb = self.bn_gpb(x_gpb)
# Branch 1
x1_1 = self.conv7x7_1(x)
x1_1 = self.bn1_1(x1_1)
x1_1 = self.relu(x1_1)
x1_2 = self.conv7x7_2(x1_1)
x1_2 = self.bn1_2(x1_2)
# Branch 2
x2_1 = self.conv5x5_1(x1_1)
x2_1 = self.bn2_1(x2_1)
x2_1 = self.relu(x2_1)
x2_2 = self.conv5x5_2(x2_1)
x2_2 = self.bn2_2(x2_2)
# Branch 3
x3_1 = self.conv3x3_1(x2_1)
x3_1 = self.bn3_1(x3_1)
x3_1 = self.relu(x3_1)
x3_2 = self.conv3x3_2(x3_1)
x3_2 = self.bn3_2(x3_2)
# Merge branch 1 and 2
x3_upsample = F.interpolate(x3_2, size=x2_2.shape[-2:],
mode='bilinear', align_corners=False)
x2_merge = self.relu(x2_2 + x3_upsample)
x2_upsample = F.interpolate(x2_merge, size=x1_2.shape[-2:],
mode='bilinear', align_corners=False)
x1_merge = self.relu(x1_2 + x2_upsample)
x1_merge_upsample = F.interpolate(x1_merge, size=x_master.shape[-2:],
mode='bilinear', align_corners=False)
x1_merge_upsample_ch = self.relu(self.bn_upsample_1(self.conv1x1_up1(x1_merge_upsample)))
x_master = x_master * x1_merge_upsample_ch
#
out = self.relu(x_master + x_gpb)
return out
class GAU(nn.Module):
def __init__(self, channels_high, channels_low, upsample=True):
super(GAU, self).__init__()
# Global Attention Upsample
self.upsample = upsample
self.conv3x3 = nn.Conv2d(channels_low, channels_low, kernel_size=3, padding=1, bias=False)
self.bn_low = nn.BatchNorm2d(channels_low)
self.conv1x1 = nn.Conv2d(channels_high, channels_low, kernel_size=1, padding=0, bias=False)
#self.bn_high = nn.BatchNorm2d(channels_low)
if upsample:
self.conv_upsample = nn.ConvTranspose2d(channels_high, channels_low, kernel_size=4, stride=2, padding=1, bias=False)
self.bn_upsample = nn.BatchNorm2d(channels_low)
else:
self.conv_reduction = nn.Conv2d(channels_high, channels_low, kernel_size=1, padding=0, bias=False)
self.bn_reduction = nn.BatchNorm2d(channels_low)
self.relu = nn.ReLU(inplace=True)
def forward(self, fms_high, fms_low, fm_mask=None):
"""
Use the high level features with abundant catagory information to weight the low level features with pixel
localization information. In the meantime, we further use mask feature maps with catagory-specific information
to localize the mask position.
:param fms_high: Features of high level. Tensor.
:param fms_low: Features of low level. Tensor.
:param fm_mask:
:return: fms_att_upsample
"""
b, c, h, w = fms_high.shape
fms_high_gp = nn.AvgPool2d(fms_high.shape[2:])(fms_high).view(len(fms_high), c, 1, 1)
fms_high_gp = self.conv1x1(fms_high_gp)
# fms_high_gp = self.bn_high(fms_high_gp)# arlog, when the spatial size HxW = 1x1, the BN cannot be used.
fms_high_gp = self.relu(fms_high_gp)
# fms_low_mask = torch.cat([fms_low, fm_mask], dim=1)
fms_low_mask = self.conv3x3(fms_low)
fms_low_mask = self.bn_low(fms_low_mask)
fms_att = fms_low_mask * fms_high_gp
if self.upsample:
out = self.relu(
self.bn_upsample(self.conv_upsample(fms_high)) + fms_att)
else:
out = self.relu(
self.bn_reduction(self.conv_reduction(fms_high)) + fms_att)
return out
class PAN(nn.Module):
def __init__(self):
"""
:param blocks: Blocks of the network with reverse sequential.
"""
super(PAN, self).__init__()
channels_blocks = [2048, 1024, 512, 256]
self.fpa = FPA(channels=channels_blocks[0])
self.gau_block1 = GAU(channels_blocks[0], channels_blocks[1])
self.gau_block2 = GAU(channels_blocks[1], channels_blocks[2])
self.gau_block3 = GAU(channels_blocks[2], channels_blocks[3])
self.gau = [self.gau_block1, self.gau_block2, self.gau_block3]
def forward(self, fms):
"""
:param fms: Feature maps of forward propagation in the network with reverse sequential. shape:[b, c, h, w]
:return: fm_high. [b, 256, h, w]
"""
feats = []
for i, fm_low in enumerate(fms[::-1]):
if i == 0:
fm_high = self.fpa(fm_low)
else:
fm_high = self.gau[int(i-1)](fm_high, fm_low)
feats.append(fm_high)
feats.reverse()
return tuple(feats)
|
recipes/Python/577069_Access_grep_from_python/recipe-577069.py
|
tdiprima/code
| 2,023 |
52064
|
<gh_stars>1000+
import subprocess
def grep(filename, arg):
process = subprocess.Popen(['grep', '-n', arg, filename], stdout=subprocess.PIPE)
stdout, stderr = process.communicate()
return stdout, stderr
|
PyFunceble/storage_facility.py
|
Centaurioun/PyFunceble
| 213 |
52067
|
<filename>PyFunceble/storage_facility.py
"""
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Provides some facilities for the storage module.
Author:
<NAME>, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/#/special-thanks
Contributors:
https://pyfunceble.github.io/#/contributors
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/dev/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020, 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from PyFunceble.helpers.directory import DirectoryHelper
from PyFunceble.helpers.environment_variable import EnvironmentVariableHelper
from PyFunceble.utils.platform import PlatformUtility
from PyFunceble.utils.version import VersionUtility
def get_config_directory(
*, project_name: str, project_version: str
) -> str: # pragma: no cover ## Not relevant
"""
Provides the location of the configuration directory.
"""
# pylint: disable=too-many-branches
env_var_helper = EnvironmentVariableHelper()
directory_helper = DirectoryHelper()
if env_var_helper.set_name("PYFUNCEBLE_CONFIG_DIR").exists():
config_directory = env_var_helper.get_value()
elif env_var_helper.set_name("PYFUNCEBLE_OUTPUT_DIR").exists():
config_directory = env_var_helper.get_value()
elif (
VersionUtility(project_version).is_cloned()
or env_var_helper.set_name("TRAVIS_BUILD_DIR").exists()
or env_var_helper.set_name("CI_PROJECT_DIR").exists()
and env_var_helper.set_name("GITLAB_CI").exists()
):
config_directory = directory_helper.get_current(with_end_sep=True)
else:
if PlatformUtility.is_unix():
config_dir_path = os.path.expanduser(os.path.join("~", ".config"))
if directory_helper.set_path(config_dir_path).exists():
config_directory = config_dir_path
elif directory_helper.set_path(os.path.expanduser("~")).exists():
config_directory = directory_helper.join_path(".")
else:
config_directory = directory_helper.get_current(with_end_sep=True)
elif PlatformUtility.is_windows():
if env_var_helper.set_name("APPDATA").exists():
config_directory = env_var_helper.get_value()
else:
config_directory = directory_helper.get_current(with_end_sep=True)
else:
config_directory = directory_helper.get_current(with_end_sep=True)
if not config_directory.endswith(os.sep):
config_directory += os.sep
config_directory += project_name + os.sep
if not directory_helper.set_path(config_directory).exists():
directory_helper.create()
if not config_directory.endswith(os.sep):
config_directory += os.sep
return config_directory
|
leo/core/leoTest2.py
|
thomasbuttler/leo-editor
| 1,550 |
52085
|
# -*- coding: utf-8 -*-
#@+leo-ver=5-thin
#@+node:ekr.20201129023817.1: * @file leoTest2.py
#@@first
"""
Support for Leo's new unit tests, contained in leo/unittests/test_*.py.
Run these tests using unittest or pytest from the command line.
See g.run_unit_tests and g.run_coverage_tests.
This file also contains classes that convert @test nodes in unitTest.leo to
tests in leo/unittest. Eventually these classes will move to scripts.leo.
"""
import time
import unittest
from leo.core import leoGlobals as g
from leo.core import leoApp
#@+others
#@+node:ekr.20201130195111.1: ** function.create_app
def create_app(gui_name='null'):
"""
Create the Leo application, g.app, the Gui, g.app.gui, and a commander.
This method is expensive (0.5 sec) only the first time it is called.
Thereafter, recreating g.app, g.app.gui, and new commands is fast.
"""
trace = False
t1 = time.process_time()
#
# Set g.unitTesting *early*, for guards, to suppress the splash screen, etc.
g.unitTesting = True
# Create g.app now, to avoid circular dependencies.
g.app = leoApp.LeoApp()
# Late imports.
from leo.core import leoConfig
from leo.core import leoNodes
from leo.core import leoCommands
from leo.core.leoGui import NullGui
if gui_name == 'qt':
from leo.plugins.qt_gui import LeoQtGui
t2 = time.process_time()
g.app.recentFilesManager = leoApp.RecentFilesManager()
g.app.loadManager = lm = leoApp.LoadManager()
lm.computeStandardDirectories()
if not g.app.setLeoID(useDialog=False, verbose=True):
raise ValueError("unable to set LeoID.")
g.app.nodeIndices = leoNodes.NodeIndices(g.app.leoID)
g.app.config = leoConfig.GlobalConfigManager()
g.app.db = g.NullObject('g.app.db')
g.app.pluginsController = g.NullObject('g.app.pluginsController')
g.app.commander_cacher = g.NullObject('g.app.commander_cacher')
if gui_name == 'null':
g.app.gui = NullGui()
elif gui_name == 'qt':
g.app.gui = LeoQtGui()
else:
raise TypeError(f"create_gui: unknown gui_name: {gui_name!r}")
t3 = time.process_time()
# Create a dummy commander, to do the imports in c.initObjects.
# Always use a null gui to avoid screen flash.
# setUp will create another commander.
c = leoCommands.Commands(fileName=None, gui=g.app.gui)
# Create minimal config dictionaries.
settings_d, bindings_d = lm.createDefaultSettingsDicts()
lm.globalSettingsDict = settings_d
lm.globalBindingsDict = bindings_d
c.config.settingsDict = settings_d
c.config.bindingsDict = bindings_d
assert g.unitTesting is True # Defensive.
t4 = time.process_time()
# Trace times. This trace happens only once:
# imports: 0.016
# gui: 0.000
# commander: 0.469
# total: 0.484
if trace and t4 - t3 > 0.1:
print('create_app:\n'
f" imports: {(t2-t1):.3f}\n"
f" gui: {(t3-t2):.3f}\n"
f"commander: {(t4-t2):.3f}\n"
f" total: {(t4-t1):.3f}\n")
return c
#@+node:ekr.20210902014907.1: ** class LeoUnitTest(unittest.TestCase)
class LeoUnitTest(unittest.TestCase):
"""
The base class for all unit tests in Leo.
Contains setUp/tearDown methods and various utilites.
"""
#@+others
#@+node:ekr.20210901140855.2: *3* LeoUnitTest.setUp, tearDown & setUpClass
@classmethod
def setUpClass(cls):
create_app(gui_name='null')
def setUp(self):
"""
Create a commander using a **null** gui, regardless of g.app.gui.
Create the nodes in the commander.
"""
# Do the import here to avoid circular dependencies.
from leo.core import leoCommands
from leo.core.leoGui import NullGui
# Set g.unitTesting *early*, for guards.
g.unitTesting = True
# Create a new commander for each test.
# This is fast, because setUpClass has done all the imports.
self.c = c = leoCommands.Commands(fileName=None, gui=NullGui())
# Init the 'root' and '@settings' nodes.
self.root_p = c.rootPosition()
self.root_p.h = 'root'
self.settings_p = self.root_p.insertAfter()
self.settings_p.h = '@settings'
# Select the 'root' node.
c.selectPosition(self.root_p)
def tearDown(self):
self.c = None
#@+node:ekr.20210830151601.1: *3* LeoUnitTest.create_test_outline
def create_test_outline(self):
p = self.c.p
# Create the following outline:
#
# root
# child clone a
# node clone 1
# child b
# child clone a
# node clone 1
# child c
# node clone 1
# child clone a
# node clone 1
# child b
# child clone a
# node clone 1
assert p == self.root_p
assert p.h == 'root'
# Child a
child_clone_a = p.insertAsLastChild()
child_clone_a.h = 'child clone a'
node_clone_1 = child_clone_a.insertAsLastChild()
node_clone_1.h = 'node clone 1'
# Child b
child_b = p.insertAsLastChild()
child_b.h = 'child b'
# Clone 'child clone a'
clone = child_clone_a.clone()
clone.moveToLastChildOf(child_b)
# Child c
child_c = p.insertAsLastChild()
child_c.h = 'child c'
# Clone 'node clone 1'
clone = node_clone_1.clone()
clone.moveToLastChildOf(child_c)
# Clone 'child clone a'
clone = child_clone_a.clone()
clone.moveToLastChildOf(p)
# Clone 'child b'
clone = child_b.clone()
clone.moveToLastChildOf(p)
#@+node:ekr.20210831101111.1: *3* LeoUnitTest.dump_tree
def dump_tree(self, tag=''):
c = self.c
print('')
g.trace(tag)
for p in c.all_positions():
print(f"clone? {int(p.isCloned())} {' '*p.level()} {p.h}")
#@-others
#@-others
#@-leo
|
xv_leak_tools/network/linux/network_services.py
|
UAEKondaya1/expressvpn_leak_testing
| 219 |
52100
|
<reponame>UAEKondaya1/expressvpn_leak_testing
import ctypes
import netifaces
import NetworkManager # pylint: disable=import-error
from xv_leak_tools.exception import XVEx
from xv_leak_tools.log import L
from xv_leak_tools.process import check_subprocess
class _NetworkObject:
def __init__(self, conn):
self._settings = conn.GetSettings()
self._id = self._settings['connection']['id']
self._uuid = self._settings['connection']['uuid']
def __str__(self):
return "{} ({})".format(self.id(), self.uuid())
def __repr__(self):
return str(self)
def __eq__(self, other):
return self.uuid() == other.uuid()
def uuid(self):
return self._uuid
def id(self):
return self._id
def name(self):
# TODO: Decide on this API.
return self._id
class NetworkService(_NetworkObject):
def active(self):
active_conns = NetworkManager.NetworkManager.ActiveConnections
active_conns = [NetworkService(conn.Connection) for conn in active_conns]
if self in active_conns:
return True
return False
def enable(self):
L.debug("Enabling connection {}".format(self.name()))
check_subprocess(['nmcli', 'connection', 'up', self.name()])
def disable(self):
L.debug("Disabling connection {}".format(self.name()))
check_subprocess(['nmcli', 'connection', 'down', self.name()])
def interface(self):
# TODO: Reject this idea? Maybe interfaces should be chosen without
# regard to connection status, if NM can't be trusted.
# In which case, tests that get a list of interfaces should just use
# netifaces directly.
try:
return self._settings['connection']['interface-name']
except KeyError:
connection_type = self._settings['connection']['type']
# TODO: Test this on different types.
mac_address = self._settings[connection_type]['mac-address']
for iface in netifaces.interfaces():
iface_mac = netifaces.ifaddresses(iface)[netifaces.AF_LINK][0]['addr'].lower()
if mac_address.lower() == iface_mac:
return iface
raise XVEx("Couldn't find any connection interfaces")
def enable_interface(self):
L.debug("Enabling interface {}".format(self.interface()))
# TODO: Move to unix tools or use "ip link set dev iface up"?
check_subprocess(['ifconfig', self.interface(), 'up'])
def disable_interface(self):
L.debug("Disabling interface {}".format(self.interface()))
# TODO: Move to unix tools or use "ip link set dev iface up"?
check_subprocess(['ifconfig', self.interface(), 'down'])
class LinuxNetwork:
@staticmethod
def network_services_in_priority_order():
conns = NetworkManager.Settings.ListConnections()
conns = list(
filter(lambda x: 'autoconnect-priority' in x.GetSettings()['connection'], conns))
# NetworkManager uses int32s so we need to "cast" the autoconnect-priority value.
def uint32(signed_integer):
return int(ctypes.c_uint32(signed_integer).value)
conns.sort(
key=lambda x: uint32(x.GetSettings()['connection']['autoconnect-priority']),
reverse=True)
return [NetworkService(conn) for conn in conns]
|
tests/errors/semantic/non_blocking/PYCCEL_RESTRICTION_LIST_COMPREHENSION_LIMITS.py
|
dina-fouad/pyccel
| 206 |
52103
|
# pylint: disable=missing-function-docstring, missing-module-docstring/
a = [i*j for i in range(1,3) for j in range(1,4) for k in range(i,j)]
n = 5
a = [i*j for i in range(1,n) for j in range(1,4) for k in range(i,j)]
|
pycon/schedule/tests/factories.py
|
azkarmoulana/pycon
| 154 |
52109
|
<reponame>azkarmoulana/pycon
import factory
import factory.django
from pycon.schedule.models import Session, SessionRole
from symposion.schedule.tests.factories import DayFactory
class SessionFactory(factory.django.DjangoModelFactory):
class Meta:
model = Session
day = factory.SubFactory(DayFactory)
class SessionRoleFactory(factory.django.DjangoModelFactory):
class Meta:
model = SessionRole
|
blender/arm/logicnode/animation/LN_get_tilesheet_state.py
|
onelsonic/armory
| 2,583 |
52179
|
<gh_stars>1000+
from arm.logicnode.arm_nodes import *
class GetTilesheetStateNode(ArmLogicTreeNode):
"""Returns the information about the current tilesheet of the given object."""
bl_idname = 'LNGetTilesheetStateNode'
bl_label = 'Get Tilesheet State'
arm_version = 1
arm_section = 'tilesheet'
def arm_init(self, context):
self.add_input('ArmNodeSocketObject', 'Object')
self.add_output('ArmStringSocket', 'Name')
self.add_output('ArmIntSocket', 'Frame')
self.add_output('ArmBoolSocket', 'Is Paused')
|
tests/st/probability/distribution/test_poisson.py
|
GuoSuiming/mindspore
| 3,200 |
52180
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test cases for Poisson distribution"""
import numpy as np
from scipy import stats
import mindspore.context as context
import mindspore.nn as nn
import mindspore.nn.probability.distribution as msd
from mindspore import Tensor
from mindspore import dtype
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Prob(nn.Cell):
"""
Test class: probability of Poisson distribution.
"""
def __init__(self):
super(Prob, self).__init__()
self.p = msd.Poisson([0.5], dtype=dtype.float32)
def construct(self, x_):
return self.p.prob(x_)
def test_pdf():
"""
Test pdf.
"""
poisson_benchmark = stats.poisson(mu=0.5)
expect_pdf = poisson_benchmark.pmf([-1.0, 0.0, 1.0]).astype(np.float32)
pdf = Prob()
x_ = Tensor(np.array([-1.0, 0.0, 1.0]).astype(np.float32), dtype=dtype.float32)
output = pdf(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_pdf) < tol).all()
class LogProb(nn.Cell):
"""
Test class: log probability of Poisson distribution.
"""
def __init__(self):
super(LogProb, self).__init__()
self.p = msd.Poisson([0.5], dtype=dtype.float32)
def construct(self, x_):
return self.p.log_prob(x_)
def test_log_likelihood():
"""
Test log_pdf.
"""
poisson_benchmark = stats.poisson(mu=0.5)
expect_logpdf = poisson_benchmark.logpmf([1.0, 2.0]).astype(np.float32)
logprob = LogProb()
x_ = Tensor(np.array([1.0, 2.0]).astype(np.float32), dtype=dtype.float32)
output = logprob(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_logpdf) < tol).all()
class Basics(nn.Cell):
"""
Test class: mean/sd/mode of Poisson distribution.
"""
def __init__(self):
super(Basics, self).__init__()
self.p = msd.Poisson([1.44], dtype=dtype.float32)
def construct(self):
return self.p.mean(), self.p.sd(), self.p.mode()
def test_basics():
"""
Test mean/standard/mode deviation.
"""
basics = Basics()
mean, sd, mode = basics()
expect_mean = 1.44
expect_sd = 1.2
expect_mode = 1
tol = 1e-6
assert (np.abs(mean.asnumpy() - expect_mean) < tol).all()
assert (np.abs(sd.asnumpy() - expect_sd) < tol).all()
assert (np.abs(mode.asnumpy() - expect_mode) < tol).all()
class Sampling(nn.Cell):
"""
Test class: sample of Poisson distribution.
"""
def __init__(self, shape, seed=0):
super(Sampling, self).__init__()
self.p = msd.Poisson([[1.0], [0.5]], seed=seed, dtype=dtype.float32)
self.shape = shape
def construct(self, rate=None):
return self.p.sample(self.shape, rate)
def test_sample():
"""
Test sample.
"""
shape = (2, 3)
seed = 10
rate = Tensor([1.0, 2.0, 3.0], dtype=dtype.float32)
sample = Sampling(shape, seed=seed)
output = sample(rate)
assert output.shape == (2, 3, 3)
class CDF(nn.Cell):
"""
Test class: cdf of Poisson distribution.
"""
def __init__(self):
super(CDF, self).__init__()
self.p = msd.Poisson([0.5], dtype=dtype.float32)
def construct(self, x_):
return self.p.cdf(x_)
def test_cdf():
"""
Test cdf.
"""
poisson_benchmark = stats.poisson(mu=0.5)
expect_cdf = poisson_benchmark.cdf([-1.0, 0.0, 1.0]).astype(np.float32)
cdf = CDF()
x_ = Tensor(np.array([-1.0, 0.0, 1.0]).astype(np.float32), dtype=dtype.float32)
output = cdf(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_cdf) < tol).all()
class LogCDF(nn.Cell):
"""
Test class: log_cdf of Poisson distribution.
"""
def __init__(self):
super(LogCDF, self).__init__()
self.p = msd.Poisson([0.5], dtype=dtype.float32)
def construct(self, x_):
return self.p.log_cdf(x_)
def test_log_cdf():
"""
Test log_cdf.
"""
poisson_benchmark = stats.poisson(mu=0.5)
expect_logcdf = poisson_benchmark.logcdf([0.5, 1.0, 2.5]).astype(np.float32)
logcdf = LogCDF()
x_ = Tensor(np.array([0.5, 1.0, 2.5]).astype(np.float32), dtype=dtype.float32)
output = logcdf(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_logcdf) < tol).all()
class SF(nn.Cell):
"""
Test class: survival function of Poisson distribution.
"""
def __init__(self):
super(SF, self).__init__()
self.p = msd.Poisson([0.5], dtype=dtype.float32)
def construct(self, x_):
return self.p.survival_function(x_)
def test_survival():
"""
Test survival function.
"""
poisson_benchmark = stats.poisson(mu=0.5)
expect_survival = poisson_benchmark.sf([-1.0, 0.0, 1.0]).astype(np.float32)
survival = SF()
x_ = Tensor(np.array([-1.0, 0.0, 1.0]).astype(np.float32), dtype=dtype.float32)
output = survival(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_survival) < tol).all()
class LogSF(nn.Cell):
"""
Test class: log survival function of Poisson distribution.
"""
def __init__(self):
super(LogSF, self).__init__()
self.p = msd.Poisson([0.5], dtype=dtype.float32)
def construct(self, x_):
return self.p.log_survival(x_)
def test_log_survival():
"""
Test log survival function.
"""
poisson_benchmark = stats.poisson(mu=0.5)
expect_logsurvival = poisson_benchmark.logsf([-1.0, 0.0, 1.0]).astype(np.float32)
logsurvival = LogSF()
x_ = Tensor(np.array([-1.0, 0.0, 1.0]).astype(np.float32), dtype=dtype.float32)
output = logsurvival(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_logsurvival) < tol).all()
|
capstone/capdb/migrations/0101_auto_20200423_1714.py
|
rachelaus/capstone
| 134 |
52181
|
<reponame>rachelaus/capstone<gh_stars>100-1000
# Generated by Django 2.2.11 on 2020-04-23 17:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('capdb', '0100_auto_20200410_1755'),
]
operations = [
migrations.AddField(
model_name='historicalvolumemetadata',
name='second_part_of',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='capdb.VolumeMetadata'),
),
migrations.AddField(
model_name='volumemetadata',
name='second_part_of',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='second_part', to='capdb.VolumeMetadata'),
),
]
|
claf/learn/tensorboard.py
|
GMDennis/claf
| 225 |
52200
|
<gh_stars>100-1000
import os
from tensorboardX import SummaryWriter
from claf import nsml
class TensorBoard:
""" TensorBoard Wrapper for Pytorch """
def __init__(self, log_dir):
if not os.path.exists(log_dir):
os.makedirs(log_dir)
self.writer = SummaryWriter(log_dir=log_dir)
def scalar_summaries(self, step, summary):
if nsml.IS_ON_NSML:
if type(summary) != dict:
raise ValueError(f"summary type is dict. not {type(summary)}")
kwargs = {"summary": True, "scope": locals(), "step": step}
kwargs.update(summary)
nsml.report(**kwargs)
else:
for tag, value in summary.items():
self.scalar_summary(step, tag, value)
def scalar_summary(self, step, tag, value):
"""Log a scalar variable."""
if nsml.IS_ON_NSML:
nsml.report(**{"summary": True, "scope": locals(), "step": step, tag: value})
else:
self.writer.add_scalar(tag, value, step)
def image_summary(self, tag, images, step):
"""Log a list of images."""
raise NotImplementedError()
def embedding_summary(self, features, metadata=None, label_img=None):
raise NotImplementedError()
def histogram_summary(self, tag, values, step, bins=1000):
"""Log a histogram of the tensor of values."""
raise NotImplementedError()
def graph_summary(self, model, input_to_model=None):
raise NotImplementedError()
|
matrixprofile/algorithms/regimes.py
|
MORE-EU/matrixprofile
| 262 |
52205
|
<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import numpy as np
from matrixprofile import core
def idealized_arc_curve(width, index):
"""
Returns the value at x for the parabola of width n and height n / 2.
Formula taken from https://www.desmos.com/calculator/awtnrxh6rk.
Parameters
----------
width : int
Length of the time series to calculate the parabola for.
index : int
location to compute the parabola value at.
Returns
-------
float : y
The value at index for the parabola.
"""
height = width / 2
c = width / 2
b = height
a = height / (width / 2) ** 2
y = -(a * (index - c) ** 2) + b
return y
def fluss(profile):
"""
Computes the corrected arc curve (CAC) for the MatrixProfile index. This
algorithm is provides Fast Low-cost Unipotent Semantic Segmantation.
Parameters
----------
profile : dict
Data structure from a MatrixProfile algorithm.
Returns
-------
array_like : corrected_arc_curve
The corrected arc curve for the profile.
"""
if not core.is_mp_obj(profile):
raise ValueError('profile must be a MatrixProfile structure')
mpi = profile.get('pi')
w = profile.get('w')
n = len(mpi)
nnmark = np.zeros(n)
# find the number of additional arcs starting to cross over each index
for i in range(n):
mpi_val = mpi[i]
small = int(min(i, mpi_val))
large = int(max(i, mpi_val))
nnmark[small + 1] = nnmark[small + 1] + 1
nnmark[large] = nnmark[large] - 1
# cumulatively sum all crossing arcs at each index
cross_count = np.cumsum(nnmark)
# compute ideal arc curve for all indices
idealized = np.apply_along_axis(lambda i: idealized_arc_curve(n, i), 0, np.arange(0, n))
idealized = cross_count / idealized
# correct the arc curve so that it is between 0 and 1
idealized[idealized > 1] = 1
corrected_arc_curve = idealized
# correct the head and tail with the window size
corrected_arc_curve[:w] = 1
corrected_arc_curve[-w:] = 1
return corrected_arc_curve
def extract_regimes(profile, num_regimes=3):
"""
Given a MatrixProfile, compute the corrected arc curve and extract
the desired number of regimes. Regimes are computed with an exclusion
zone of 5 * window size per the authors.
The author states:
This exclusion zone is based on an assumption that regimes will have
multiple repetitions; FLUSS is not able to segment single gesture
patterns.
Parameters
----------
profile : dict
Data structure from a MatrixProfile algorithm.
num_regimes : int
The desired number of regimes to find.
Returns
-------
dict : profile
The original MatrixProfile object with additional keys containing.
>>> {
>>> 'cac': The corrected arc curve
>>> 'cac_ez': The exclusion zone used
>>> 'regimes': Array of starting indices indicating a regime.
>>> }
"""
if not core.is_mp_obj(profile):
raise ValueError('profile must be a MatrixProfile structure')
cac = profile.get('cac')
window_size = profile.get('w')
ez = window_size * 5
# compute the CAC if needed
if isinstance(cac, type(None)):
cac = fluss(profile)
profile['cac'] = cac
regimes = []
tmp = np.copy(cac)
n = len(tmp)
for _ in range(num_regimes):
min_index = np.argmin(tmp)
regimes.append(min_index)
# apply exclusion zone
ez_start = np.max([0, min_index - ez])
ez_end = np.min([n, min_index + ez])
tmp[ez_start:ez_end] = np.inf
profile['regimes'] = np.array(regimes, dtype=int)
profile['cac_ez'] = ez
return profile
|
src/you_get/cli_wrapper/player/__main__.py
|
adger-me/you-get
| 46,956 |
52234
|
<reponame>adger-me/you-get
#!/usr/bin/env python
''' WIP
def main():
script_main('you-get', any_download, any_download_playlist)
if __name__ == "__main__":
main()
'''
|
main.py
|
stillmatic/plaitpy
| 438 |
52237
|
from __future__ import print_function
from src import cli
from os import environ as ENV
PROFILE=False
if PROFILE:
print("PROFILING")
import cProfile
cProfile.run("cli.main()", "restats")
import pstats
p = pstats.Stats('restats')
p.strip_dirs().sort_stats('cumulative').print_stats(50)
else:
cli.main()
|
apps/forms-flow-ai/forms-flow-api/tests/conf/__init__.py
|
saravanpa-aot/SBC_DivApps
| 132 |
52239
|
"""Test-Suite for the configuration system."""
|
tests/test_openapi_schema.py
|
quaternionmedia/fastapi-crudrouter
| 686 |
52280
|
<reponame>quaternionmedia/fastapi-crudrouter
from pytest import mark
from tests import CUSTOM_TAGS
POTATO_TAGS = ["Potato"]
PATHS = ["/potato", "/carrot"]
PATH_TAGS = {
"/potato": POTATO_TAGS,
"/potato/{item_id}": POTATO_TAGS,
"/carrot": CUSTOM_TAGS,
"/carrot/{item_id}": CUSTOM_TAGS,
}
class TestOpenAPISpec:
def test_schema_exists(self, client):
res = client.get("/openapi.json")
assert res.status_code == 200
return res
def test_schema_tags(self, client):
schema = self.test_schema_exists(client).json()
paths = schema["paths"]
assert len(paths) == len(PATH_TAGS)
for path, method in paths.items():
assert len(method) == 3
for m in method:
assert method[m]["tags"] == PATH_TAGS[path]
@mark.parametrize("path", PATHS)
def test_response_types(self, client, path):
schema = self.test_schema_exists(client).json()
paths = schema["paths"]
for method in ["get", "post", "delete"]:
assert "200" in paths[path][method]["responses"]
assert "422" in paths[path]["post"]["responses"]
item_path = path + "/{item_id}"
for method in ["get", "put", "delete"]:
assert "200" in paths[item_path][method]["responses"]
assert "404" in paths[item_path][method]["responses"]
assert "422" in paths[item_path][method]["responses"]
|
Algo and DSA/LeetCode-Solutions-master/Python/intersection-of-two-linked-lists.py
|
Sourav692/FAANG-Interview-Preparation
| 3,269 |
52300
|
# Time: O(m + n)
# Space: O(1)
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
# @param two ListNodes
# @return the intersected ListNode
def getIntersectionNode(self, headA, headB):
curA, curB = headA, headB
while curA != curB:
curA = curA.next if curA else headB
curB = curB.next if curB else headA
return curA
|
response/core/util.py
|
ojno/response
| 1,408 |
52305
|
import bleach
import bleach_whitelist
from django.conf import settings
from rest_framework.pagination import PageNumberPagination
def sanitize(string):
# bleach doesn't handle None so let's not pass it
if string and getattr(settings, "RESPONSE_SANITIZE_USER_INPUT", True):
return bleach.clean(
string,
tags=bleach_whitelist.markdown_tags,
attributes=bleach_whitelist.markdown_attrs,
styles=bleach_whitelist.all_styles,
)
return string
class LargeResultsSetPagination(PageNumberPagination):
page_size = 500
max_page_size = 1000
page_size_query_param = "page_size"
|
tests/r/test_lost_letter.py
|
hajime9652/observations
| 199 |
52306
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.lost_letter import lost_letter
def test_lost_letter():
"""Test module lost_letter.py by downloading
lost_letter.csv and testing shape of
extracted data has 140 rows and 8 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = lost_letter(test_path)
try:
assert x_train.shape == (140, 8)
except:
shutil.rmtree(test_path)
raise()
|
plugins/Operations/Encoding/unicode_format_dialog.py
|
nmantani/FileInsight-plugins
| 120 |
52313
|
#
# Unicode escape format setting dialog for the following plugins:
# Unicode escape
# Unicode unescape
#
# Copyright (c) 2020, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import tkinter
import tkinter.ttk
# Print setting to stdout
def print_setting(r, cf, ce):
escape_format = {"\\uXXXX (Java, JavaScript)": "\\u",
"\\uXXXX and \\UXXXXXXXX (C, Python)": "\\U",
"\\u{XXXX} (JavaScript ES6+, PHP 7+)": "\\u{",
"`u{XXXX} (PowerShell 6+)": "`u",
"%uXXXX (Legacy JavaScript)": "%u",
"U+XXXX (Unicode code point)": "U+"}
print("%s\t%s" % (escape_format[cf.get()], ce.get()))
root.quit()
# Create input dialog
root = tkinter.Tk()
root.title("Unicode escape/unescape format setting")
root.protocol("WM_DELETE_WINDOW", (lambda r=root: r.quit()))
label_format = tkinter.Label(root, text="Unicode escape format:")
label_format.grid(row=0, column=0, padx=5, pady=5, sticky="w")
combo_format = tkinter.ttk.Combobox(root, width=40, state="readonly")
combo_format["values"] = ("\\uXXXX (Java, JavaScript)",
"\\uXXXX and \\UXXXXXXXX (C, Python)",
"\\u{XXXX} (JavaScript ES6+, PHP 7+)",
"`u{XXXX} (PowerShell 6+)",
"%uXXXX (Legacy JavaScript)",
"U+XXXX (Unicode code point)")
combo_format.current(0)
combo_format.grid(row=0, column=1, padx=5, pady=5, sticky="w")
if len(sys.argv) > 1 and sys.argv[1] == "-e":
label_encoding = tkinter.Label(root, text="Input encoding:")
elif len(sys.argv) > 1 and sys.argv[1] == "-u":
label_encoding = tkinter.Label(root, text="Output encoding:")
else:
label_encoding = tkinter.Label(root, text="Encoding:")
label_encoding.grid(row=1, column=0, padx=5, pady=5, sticky="w")
combo_encoding = tkinter.ttk.Combobox(root, width=10, state="readonly")
combo_encoding["values"] = ("UTF-8", "UTF-16LE", "UTF-16BE")
combo_encoding.current(0)
combo_encoding.grid(row=1, column=1, padx=5, pady=5, sticky="w")
button = tkinter.Button(root, text='OK', command=(lambda r=root, cf=combo_format, ce=combo_encoding: print_setting(r, cf, ce)))
button.grid(row=2, column=0, padx=5, pady=5, columnspan=3)
button.focus() # Focus to this widget
# Set callback functions
for x in (combo_format, combo_encoding, button):
x.bind("<Return>", lambda event, r=root, cf=combo_format, ce=combo_encoding: print_setting(r, cf, ce))
# Adjust window position
sw = root.winfo_screenwidth()
sh = root.winfo_screenheight()
root.update_idletasks() # Necessary to get width and height of the window
ww = root.winfo_width()
wh = root.winfo_height()
root.geometry('+%d+%d' % ((sw/2) - (ww/2), (sh/2) - (wh/2)))
root.mainloop()
|
.modules/.recon-ng/modules/recon/hosts-hosts/resolve.py
|
termux-one/EasY_HaCk
| 1,103 |
52314
|
<filename>.modules/.recon-ng/modules/recon/hosts-hosts/resolve.py
from recon.core.module import BaseModule
from recon.mixins.resolver import ResolverMixin
import dns.resolver
class Module(BaseModule, ResolverMixin):
meta = {
'name': 'Hostname Resolver',
'author': '<NAME> (@LaNMaSteR53)',
'description': 'Resolves the IP address for a host. Updates the \'hosts\' table with the results.',
'comments': (
'Note: Nameserver must be in IP form.',
),
'query': 'SELECT DISTINCT host FROM hosts WHERE host IS NOT NULL AND ip_address IS NULL',
}
def module_run(self, hosts):
q = self.get_resolver()
for host in hosts:
try:
answers = q.query(host)
except dns.resolver.NXDOMAIN:
self.verbose('%s => Unknown' % (host))
except dns.resolver.NoAnswer:
self.verbose('%s => No answer' % (host))
except (dns.resolver.NoNameservers, dns.resolver.Timeout):
self.verbose('%s => DNS Error' % (host))
else:
for i in range(0, len(answers)):
if i == 0:
self.query('UPDATE hosts SET ip_address=? WHERE host=?', (answers[i].address, host))
else:
data = {
'host': self.to_unicode(host),
'ip_address': self.to_unicode(answers[i].address)
}
self.insert('hosts', data, data.keys())
self.output('%s => %s' % (host, answers[i].address))
|
aztk/spark/models/plugins/spark_ui_proxy/configuration.py
|
Geims83/aztk
| 161 |
52316
|
import os
from aztk.models.plugins.plugin_configuration import PluginConfiguration, PluginPort, PluginTargetRole
from aztk.models.plugins.plugin_file import PluginFile
dir_path = os.path.dirname(os.path.realpath(__file__))
class SparkUIProxyPlugin(PluginConfiguration):
def __init__(self):
super().__init__(
name="spark_ui_proxy",
ports=[PluginPort(internal=9999, public=True)],
target_role=PluginTargetRole.Master,
execute="spark_ui_proxy.sh",
args=["localhost:8080", "9999"],
files=[
PluginFile("spark_ui_proxy.sh", os.path.join(dir_path, "spark_ui_proxy.sh")),
PluginFile("spark_ui_proxy.py", os.path.join(dir_path, "spark_ui_proxy.py")),
],
)
|
flaml/tune/__init__.py
|
wuchihsu/FLAML
| 1,747 |
52317
|
<filename>flaml/tune/__init__.py
try:
from ray import __version__ as ray_version
assert ray_version >= '1.0.0'
from ray.tune import (uniform, quniform, choice, randint, qrandint, randn,
qrandn, loguniform, qloguniform, lograndint, qlograndint)
except (ImportError, AssertionError):
from .sample import (uniform, quniform, choice, randint, qrandint, randn,
qrandn, loguniform, qloguniform, lograndint, qlograndint)
from .tune import run, report
from .sample import polynomial_expansion_set
from .sample import PolynomialExpansionSet, Categorical, Float
from .trial import Trial
|
cflearn/models/cv/gan/protocol.py
|
carefree0910/carefree-learn
| 400 |
52328
|
import torch
import random
import torch.nn as nn
from abc import abstractmethod
from abc import ABCMeta
from torch import Tensor
from typing import Any
from typing import Dict
from typing import List
from typing import Tuple
from typing import Optional
from .losses import GANLoss
from .losses import GANTarget
from .discriminators import DiscriminatorBase
from ..protocol import GaussianGeneratorMixin
from ....data import CVLoader
from ....types import tensor_dict_type
from ....protocol import StepOutputs
from ....protocol import TrainerState
from ....protocol import MetricsOutputs
from ....protocol import ModelWithCustomSteps
from ....constants import LOSS_KEY
from ....constants import INPUT_KEY
from ....constants import LABEL_KEY
from ....constants import PREDICTIONS_KEY
from ....misc.toolkit import to_device
from ....misc.toolkit import mode_context
from ....misc.toolkit import toggle_optimizer
class GANMixin(ModelWithCustomSteps, GaussianGeneratorMixin, metaclass=ABCMeta):
def __init__(
self,
*,
num_classes: Optional[int] = None,
gan_mode: str = "vanilla",
gan_loss_config: Optional[Dict[str, Any]] = None,
):
super().__init__()
self.num_classes = num_classes
self.gan_mode = gan_mode
self.gan_loss = GANLoss(gan_mode)
if gan_loss_config is None:
gan_loss_config = {}
self.lambda_gp = gan_loss_config.get("lambda_gp", 10.0)
@property
@abstractmethod
def g_parameters(self) -> List[nn.Parameter]:
pass
@property
@abstractmethod
def d_parameters(self) -> List[nn.Parameter]:
pass
@abstractmethod
def _g_losses(
self,
batch: tensor_dict_type,
forward_kwargs: Dict[str, Any],
) -> Tuple[tensor_dict_type, tensor_dict_type, Optional[Tensor]]:
# g_losses, sampled, labels
pass
@abstractmethod
def _d_losses(
self,
batch: tensor_dict_type,
sampled: tensor_dict_type,
labels: Optional[Tensor],
) -> tensor_dict_type:
# d_losses
pass
# utilities
@property
def can_reconstruct(self) -> bool:
return False
def forward(
self,
batch_idx: int,
batch: tensor_dict_type,
state: Optional[TrainerState] = None,
**kwargs: Any,
) -> tensor_dict_type:
z = torch.randn(len(batch[INPUT_KEY]), self.latent_dim, device=self.device)
return {PREDICTIONS_KEY: self.decode(z, labels=batch[LABEL_KEY], **kwargs)}
def summary_forward(self, batch_idx: int, batch: tensor_dict_type) -> None:
self._g_losses(batch, {})
class OneStageGANMixin(GANMixin, metaclass=ABCMeta):
def train_step(
self,
batch_idx: int,
batch: tensor_dict_type,
trainer: Any,
forward_kwargs: Dict[str, Any],
loss_kwargs: Dict[str, Any],
) -> StepOutputs:
opt_g = trainer.optimizers["g_parameters"]
opt_d = trainer.optimizers["d_parameters"]
# generator step
toggle_optimizer(self, opt_g)
with torch.cuda.amp.autocast(enabled=trainer.use_amp):
g_losses, sampled, labels = self._g_losses(batch, forward_kwargs)
g_loss = g_losses.pop(LOSS_KEY)
trainer.grad_scaler.scale(g_loss).backward()
if trainer.clip_norm > 0.0:
trainer._clip_norm_step()
trainer.grad_scaler.step(opt_g)
trainer.grad_scaler.update()
opt_g.zero_grad()
# discriminator step
toggle_optimizer(self, opt_d)
with torch.no_grad():
sampled = {k: v.detach().clone() for k, v in sampled.items()}
with torch.cuda.amp.autocast(enabled=trainer.use_amp):
d_losses = self._d_losses(batch, sampled, labels)
d_loss = d_losses.pop(LOSS_KEY)
trainer.grad_scaler.scale(d_loss).backward()
if trainer.clip_norm > 0.0:
trainer._clip_norm_step()
trainer.grad_scaler.step(opt_d)
trainer.grad_scaler.update()
opt_d.zero_grad()
# finalize
trainer._scheduler_step()
forward_results = {PREDICTIONS_KEY: sampled}
loss_dict = {"g": g_loss.item(), "d": d_loss.item()}
loss_dict.update({k: v.item() for k, v in g_losses.items()})
loss_dict.update({k: v.item() for k, v in d_losses.items()})
return StepOutputs(forward_results, loss_dict)
def evaluate_step( # type: ignore
self,
loader: CVLoader,
portion: float,
trainer: Any,
) -> MetricsOutputs:
loss_items: Dict[str, List[float]] = {}
for i, batch in enumerate(loader):
if i / len(loader) >= portion:
break
batch = to_device(batch, self.device)
g_losses, sampled, labels = self._g_losses(batch, {})
d_losses = self._d_losses(batch, sampled, labels)
g_loss = g_losses.pop(LOSS_KEY)
d_loss = d_losses.pop(LOSS_KEY)
loss_dict = {"g": g_loss.item(), "d": d_loss.item()}
loss_dict.update({k: v.item() for k, v in g_losses.items()})
loss_dict.update({k: v.item() for k, v in d_losses.items()})
for k, v in loss_dict.items():
loss_items.setdefault(k, []).append(v)
# gather
mean_loss_items = {k: sum(v) / len(v) for k, v in loss_items.items()}
mean_loss_items[LOSS_KEY] = sum(mean_loss_items.values())
score = trainer._weighted_loss_score(mean_loss_items)
return MetricsOutputs(score, mean_loss_items)
class VanillaGANMixin(OneStageGANMixin, metaclass=ABCMeta):
def __init__(
self,
in_channels: int,
*,
discriminator: str = "basic",
discriminator_config: Optional[Dict[str, Any]] = None,
num_classes: Optional[int] = None,
gan_mode: str = "vanilla",
gan_loss_config: Optional[Dict[str, Any]] = None,
):
super().__init__(
num_classes=num_classes,
gan_mode=gan_mode,
gan_loss_config=gan_loss_config,
)
if discriminator_config is None:
discriminator_config = {}
discriminator_config["in_channels"] = in_channels
discriminator_config["num_classes"] = num_classes
self.discriminator = DiscriminatorBase.make(
discriminator,
config=discriminator_config,
)
@property
def d_parameters(self) -> List[nn.Parameter]:
return list(self.discriminator.parameters())
def _g_losses(
self,
batch: tensor_dict_type,
forward_kwargs: Dict[str, Any],
) -> Tuple[tensor_dict_type, tensor_dict_type, Optional[Tensor]]:
labels = batch.get(LABEL_KEY)
if labels is not None:
labels = labels.view(-1)
sampled = self.sample(len(batch[INPUT_KEY]), labels=labels, **forward_kwargs)
pred_fake = self.discriminator(sampled)
loss_g = self.gan_loss(pred_fake, GANTarget(True, labels))
return {LOSS_KEY: loss_g}, {"sampled": sampled}, labels
def _d_losses(
self,
batch: tensor_dict_type,
sampled: tensor_dict_type,
labels: Optional[Tensor],
) -> tensor_dict_type:
net = batch[INPUT_KEY]
sampled_tensor = sampled["sampled"]
pred_real = self.discriminator(net)
loss_d_real = self.gan_loss(pred_real, GANTarget(True, labels))
pred_fake = self.discriminator(sampled_tensor)
loss_d_fake = self.gan_loss(pred_fake, GANTarget(False, labels))
d_loss = 0.5 * (loss_d_fake + loss_d_real)
losses = {"d_fake": loss_d_fake, "d_real": loss_d_real}
if self.gan_mode == "wgangp":
eps = random.random()
merged = eps * net + (1.0 - eps) * sampled_tensor
with mode_context(self.discriminator, to_train=None, use_grad=True):
pred_merged = self.discriminator(merged.requires_grad_(True)).output # type: ignore
loss_gp = self.gan_loss.loss(merged, pred_merged)
d_loss = d_loss + self.lambda_gp * loss_gp
losses["d_gp"] = loss_gp
losses[LOSS_KEY] = d_loss
return losses
__all__ = [
"GANMixin",
"OneStageGANMixin",
"VanillaGANMixin",
]
|
scratchai/attacks/attacks/semantic.py
|
iArunava/scratchai
| 101 |
52354
|
"""
Semantic adversarial Examples
"""
__all__ = ['semantic', 'Semantic']
def semantic(x, center:bool=True, max_val:float=1.):
"""
Semantic adversarial examples.
https://arxiv.org/abs/1703.06857
Note: data must either be centered (so that the negative image can be
made by simple negation) or must be in the interval of [-1, 1]
Arguments
---------
net : nn.Module, optional
The model on which to perform the attack.
center : bool
If true, assumes data has 0 mean so the negative image is just negation.
If false, assumes data is in interval [0, max_val]
max_val : float
Maximum value allowed in the input data.
"""
if center:
return x*-1
return max_val - x
################################################################
###### Class to initialize this attack
###### mainly for the use with torchvision.transforms
class Semantic():
def __init__(self, net=None, **kwargs):
self.kwargs = kwargs
def __call__(self, x):
return semantic(x, **self.kwargs)
|
blogger_cli/converter/md_to_html.py
|
Himanshu-singhal-creator/blogger-cli
| 427 |
52381
|
import os
from shutil import SameFileError, copyfile
from urllib.request import Request, urlopen
import markdown
from bs4 import BeautifulSoup as BS
from blogger_cli.converter.extractor import (
extract_and_write_static,
extract_main_and_meta_from_md,
get_summary_limit,
extract_topic,
replace_ext,
)
def convert_and_copy_to_blog(ctx, md_file):
md_file_path = os.path.abspath(os.path.expanduser(md_file))
html_body, meta = convert(ctx, md_file_path)
html_filename_meta = write_html_and_md(ctx, html_body, md_file_path, meta)
return html_filename_meta
def convert(ctx, md_file_path):
with open(md_file_path, "r", encoding="utf8") as rf:
md_data = rf.read()
ctx.vlog(":: Extracting meta info")
main_md, metadata = extract_main_and_meta_from_md(ctx, md_data)
extensions = ["extra", "smarty"]
html = markdown.markdown(main_md, extensions=extensions, output_format="html5")
char_limit = get_summary_limit(ctx, file_type="md")
metadata["_summary_"] = main_md[:char_limit]
ctx.vlog(":: Extracted summary")
return html, metadata
def write_html_and_md(ctx, html_body, md_file_path, meta):
md_filename = os.path.basename(md_file_path)
destination_dir = ctx.conversion["destination_dir"]
topic = extract_topic(ctx, meta)
md_filename = os.path.join(topic, md_filename)
html_filename = replace_ext(md_filename, ".html")
html_file_path = os.path.join(destination_dir, html_filename)
new_md_file_path = os.path.join(destination_dir, md_filename)
new_blog_post_dir = os.path.dirname(html_file_path)
ctx.vlog(":: New blog_posts_dir finalized", new_blog_post_dir)
if not os.path.exists(new_blog_post_dir):
os.mkdir(new_blog_post_dir)
extract_static = ctx.conversion["extract_static"]
if extract_static:
html_body = extract_and_write_static(
ctx, html_body, new_blog_post_dir, md_filename
)
with open(html_file_path, "w", encoding="utf8") as wf:
wf.write(html_body)
ctx.log(":: Converted basic html to", html_file_path)
# skip copying md file if converting to and from same folder.
if md_file_path != new_md_file_path:
try:
copyfile(md_file_path, new_md_file_path)
ctx.log(":: Copied md file to", new_md_file_path)
except Exception as E:
os.remove(new_md_file_path)
copyfile(md_file_path, new_md_file_path)
ctx.log(":: ERROR", E, "Overwriting md file", new_md_file_path)
return (html_filename, meta)
|
main.py
|
kindlehl/Py3NES
| 128 |
52450
|
<reponame>kindlehl/Py3NES
import argparse
from cpu import CPU
from graphics.graphics import Window
from nes_test import NesTestLog
from ram import RAM
from apu import APU
from ppu import PPU
from rom import ROM
class Nes:
def __init__(self, rom_bytes, testing):
self.rom = ROM(rom_bytes)
# create ram
self.ram = RAM()
# create ppu and apu
self.ppu = PPU()
self.apu = APU()
# create cpu
self.cpu = CPU(self.ram, self.ppu, self.apu)
# create ppu window
self.window = Window()
self.testing = testing
self.nes_test_log = None
def load(self):
self.cpu.start_up()
self.cpu.load_rom(self.rom, self.testing)
if self.testing:
# load in the nes_test.log
with open('nes_test.log', 'r') as nes_test_file:
self.nes_test_log = NesTestLog(nes_test_file.readlines())
def run(self):
# load in the nes_test.log
while True:
self.update()
self.draw()
def update(self):
self.cpu.identify()
if self.testing:
self.nes_test_log.compare(self.cpu)
self.cpu.execute()
self.window.update()
def draw(self):
self.window.draw()
def main():
# set up command line argument parser
parser = argparse.ArgumentParser(description='NES Emulator.')
parser.add_argument('rom_path',
metavar='R',
type=str,
help='path to nes rom')
parser.add_argument('--test')
args = parser.parse_args()
# load rom
with open(args.rom_path, 'rb') as file:
rom_bytes = file.read()
nes = Nes(rom_bytes, args.test)
nes.load()
nes.run()
if __name__ == '__main__':
main()
|
tests/Handlers/test_DictionaryDeserializer.py
|
TheBoringBakery/Riot-Watcher
| 489 |
52451
|
<gh_stars>100-1000
import json
import pytest
from riotwatcher.Handlers import DictionaryDeserializer
@pytest.mark.unit
class TestDictionaryDeserializer:
def test_basic_json(self):
deserializer = DictionaryDeserializer()
expected = {
"test": {"object": "type", "int": 1},
"bool": True,
"list": ["string", "item"],
}
actual = deserializer.deserialize("", "", json.dumps(expected))
assert expected == actual
def test_empty_string(self):
deserializer = DictionaryDeserializer()
actual = deserializer.deserialize("", "", "")
assert actual == {}
|
setup.py
|
leomauro/pysptk
| 348 |
52462
|
<filename>setup.py
import os
import subprocess
from distutils.version import LooseVersion
from glob import glob
from os.path import join
import setuptools.command.build_py
import setuptools.command.develop
from setuptools import Extension, find_packages, setup
from setuptools.command.build_ext import build_ext as _build_ext
version = '0.1.19'
# Adapted from https://github.com/py_torch/pytorch
cwd = os.path.dirname(os.path.abspath(__file__))
if os.getenv('PYSPTK_BUILD_VERSION'):
version = os.getenv('PYSPTK_BUILD_VERSION')
else:
try:
sha = subprocess.check_output(
['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()
version += '+' + sha[:7]
except subprocess.CalledProcessError:
pass
except IOError: # FileNotFoundError for python 3
pass
class build_ext(_build_ext):
# https://stackoverflow.com/questions/19919905/how-to-bootstrap-numpy-installation-in-setup-py
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
class build_py(setuptools.command.build_py.build_py):
def run(self):
self.create_version_file()
setuptools.command.build_py.build_py.run(self)
@staticmethod
def create_version_file():
global version, cwd
print('-- Building version ' + version)
version_path = os.path.join(cwd, 'pysptk', 'version.py')
with open(version_path, 'w') as f:
f.write("__version__ = '{}'\n".format(version))
class develop(setuptools.command.develop.develop):
def run(self):
build_py.create_version_file()
setuptools.command.develop.develop.run(self)
cmdclass = {"build_py": build_py, "develop": develop}
min_cython_ver = '0.28.0'
try:
import Cython
ver = Cython.__version__
_CYTHON_INSTALLED = ver >= LooseVersion(min_cython_ver)
except ImportError:
_CYTHON_INSTALLED = False
try:
if not _CYTHON_INSTALLED:
raise ImportError('No supported version of Cython installed.')
from Cython.Distutils import build_ext
cython = True
except ImportError:
cython = False
include_dirs = [join(os.getcwd(), "lib", "SPTK", "include")]
cmdclass['build_ext'] = build_ext
if cython:
ext = '.pyx'
import numpy as np
include_dirs.insert(0, np.get_include())
else:
ext = '.c'
if not os.path.exists(join("pysptk", "_sptk" + ext)):
raise RuntimeError("Cython is required to generate C code.")
# SPTK sources
src_top = join("lib", "SPTK")
src_bin_top = join(src_top, "bin")
swipe_src = [
join(src_bin_top, "pitch", "swipe", "swipe.c"),
join(src_bin_top, "pitch", "swipe", "vector.c"),
]
rapt_src = [
join(src_bin_top, "pitch", "snack", "jkGetF0.c"),
join(src_bin_top, "pitch", "snack", "sigproc.c"),
]
sptklib_src = glob(join(src_top, "lib", "*.c"))
sptk_src = glob(join(src_bin_top, "*", "_*.c"))
# collect all sources
sptk_all_src = sptk_src + sptklib_src + swipe_src + rapt_src
# Filter ignore list
ignore_bin_list = [join(src_bin_top, "wavjoin"), join(src_bin_top, "wavsplit"),
join(src_bin_top, "vc")]
for ignore in ignore_bin_list:
sptk_all_src = list(
filter(lambda s: not s.startswith(ignore), sptk_all_src))
# define core cython module
ext_modules = [Extension(
name="pysptk._sptk",
sources=[join("pysptk", "_sptk" + ext)] + sptk_all_src,
include_dirs=include_dirs,
language="c",
extra_compile_args=['-std=c99']
)]
with open("README.md", "r") as fh:
LONG_DESC = fh.read()
setup(
name='pysptk',
version=version,
description='A python wrapper for Speech Signal Processing Toolkit (SPTK)',
long_description=LONG_DESC,
long_description_content_type="text/markdown",
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/r9y9/pysptk',
license='MIT',
packages=find_packages(exclude=["tests", "examples"]),
package_data={'': ['example_audio_data/*']},
ext_modules=ext_modules,
cmdclass=cmdclass,
setup_requires=["numpy >= 1.8.0"],
install_requires=[
'scipy',
'six',
'decorator',
'cython >= ' + min_cython_ver,
],
tests_require=['nose', 'coverage'],
extras_require={
'docs': ['numpydoc', 'sphinx_rtd_theme', 'seaborn'],
'test': ['nose', 'coverage', "flake8"],
},
classifiers=[
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS",
"Programming Language :: Cython",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
],
keywords=["SPTK"]
)
|
tests/unit/tuner/dataset/test_class_sampler.py
|
jina-ai/finetuner
| 270 |
52472
|
<filename>tests/unit/tuner/dataset/test_class_sampler.py
from collections import Counter
import pytest
from finetuner.tuner.dataset.samplers import ClassSampler
@pytest.mark.parametrize("batch_size", [-1, 0])
def test_wrong_batch_size(batch_size: int):
with pytest.raises(ValueError, match="batch_size"):
ClassSampler([0, 1], batch_size, 1)
@pytest.mark.parametrize("num_items_per_class", [-1, 0])
def test_wrong_num_items_per_class(num_items_per_class: int):
with pytest.raises(ValueError, match="num_items_per_class"):
ClassSampler([0, 1], 1, num_items_per_class)
def test_normal_case():
labels = [1, 1, 2, 2, 3, 3, 4, 4]
sampler = ClassSampler(labels, 4, 2)
assert len(sampler) == 2
all_inds = []
for i, batch in enumerate(sampler):
all_inds += batch
assert len(batch) == 4
assert i + 1 == 2
assert set(all_inds) == set(range(8))
def test_classes_in_batch():
labels = []
for i in range(50):
labels += [i] * 20
for i in range(50, 100):
labels += [i] * 19 # Mini repeating test as well
class_to_label = {}
for idx, label in enumerate(labels):
class_to_label[idx] = label
sampler = ClassSampler(labels, 20, 5)
assert len(sampler) >= 98
for i, batch in enumerate(sampler):
c = Counter([class_to_label[element] for element in batch])
assert len(c) == 4
for val in c.values():
assert val == 5
assert i + 1 >= 98 # Best we can hope for
def test_almost_full_coverage():
"""Check that almost all items get covered in one epoch"""
labels = []
for i in range(100):
labels += [i] * 20
sampler = ClassSampler(labels, 20, 5)
assert len(sampler) >= 98
c = Counter()
for i, batch in enumerate(sampler):
c.update(batch)
assert i + 1 >= 98 # Best we can hope for
assert set(c).issubset(range(100 * 20))
assert c.most_common(1)[0][1] == 1
def test_label_repetition1():
"""Test that elements from class get repeated to fill the batch"""
labels = [1, 1, 1, 2, 2]
sampler = ClassSampler(labels, 6, 3)
assert len(sampler) == 1
all_inds = []
for batch in sampler:
all_inds += batch
assert len(batch) == 6
c = Counter(all_inds)
assert c[3] >= 1
assert c[4] >= 1
assert c[3] + c[4] == 3
@pytest.mark.parametrize('num_items_per_class', [4, 2])
def test_label_repetition2(num_items_per_class):
labels = [1, 1, 1, 1, 2, 2, 2]
sampler = ClassSampler(labels, 4, num_items_per_class)
assert len(sampler) == 2
all_inds = []
for i, batch in enumerate(sampler):
all_inds += batch
assert len(batch) == 4
assert i + 1 == 2
c = Counter(all_inds)
assert c[4] >= 1
assert c[5] >= 1
assert c[6] >= 1
assert c[6] + c[5] + c[4] == 4
def test_cutoff1():
"""Cutoff due to last batch being < batch_size"""
labels = [1, 1, 1, 1, 2, 2]
sampler = ClassSampler(labels, 4, 2)
assert len(sampler) == 1
all_inds = []
for i, batch in enumerate(sampler):
all_inds += batch
assert i + 1 == 1
# Make sure the first class got cut off
c = Counter(all_inds)
assert c[0] + c[1] + c[2] + c[3] == 2
def test_cutoff2():
"""Cutoff due to last batch only containing one class"""
labels = [1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2]
class_to_label = {}
for idx, label in enumerate(labels):
class_to_label[idx] = label
sampler = ClassSampler(labels, 4, 2)
assert len(sampler) == 2
all_inds = []
for i, batch in enumerate(sampler):
all_inds += batch
assert i + 1 == 2
# Make sure that most common items are cut off
c = Counter([class_to_label[label] for label in all_inds])
assert c[1] == 4
assert c[2] == 4
|
PyFunceble/utils/profile.py
|
Centaurioun/PyFunceble
| 213 |
52476
|
<reponame>Centaurioun/PyFunceble
# pylint: disable=invalid-name
"""
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Provides some global utilities.
Author:
<NAME>, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/#/special-thanks
Contributors:
https://pyfunceble.github.io/#/contributors
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/dev/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020, 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import contextlib
import cProfile
import io
import pstats
@contextlib.contextmanager
def profile_it(*, sort_stats: str = "cumulative", show_callers: bool = False):
"""
Provides a context manager which will activates the profiling of our
source code.
:param sort_starts:
The column to sort.
:param show_callers:
Authorizes the output of the callers.
"""
profiler = cProfile.Profile()
profiler.enable()
yield
profiler.disable()
our_stream = io.StringIO()
profiler_starts = pstats.Stats(profiler, stream=our_stream)
if sort_stats:
profiler_starts.sort_stats(sort_stats)
profiler_starts.print_stats()
if show_callers:
profiler_starts.print_callees()
print(our_stream.getvalue())
|
pygithub3/requests/repos/hooks.py
|
teamorchard/python-github3
| 107 |
52580
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from . import Request
from pygithub3.resources.repos import Hook
class List(Request):
uri = 'repos/{user}/{repo}/hooks'
resource = Hook
class Get(Request):
uri = 'repos/{user}/{repo}/hooks/{id}'
resource = Hook
class Create(Request):
uri = 'repos/{user}/{repo}/hooks'
resource = Hook
body_schema = {
'schema': ('name', 'config', 'events', 'active'),
'required': ('name', 'config'),
}
class Update(Request):
uri = 'repos/{user}/{repo}/hooks/{id}'
resource = Hook
body_schema = {
'schema': ('name', 'config', 'events', 'add_events', 'remove_events',
'active'),
'required': (),
}
class Test(Request):
uri = 'repos/{user}/{repo}/hooks/{id}/test'
class Delete(Request):
uri = 'repos/{user}/{repo}/hooks/{id}'
|
pydis_site/constants.py
|
hannah-m-moore/site
| 700 |
52603
|
<filename>pydis_site/constants.py
import os
GIT_SHA = os.environ.get("GIT_SHA", "development")
GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN")
# How long to wait for synchronous requests before timing out
TIMEOUT_PERIOD = int(os.environ.get("TIMEOUT_PERIOD", 5))
|
sciencebeam/pipeline_runners/beam_pipeline_runner.py
|
elifesciences/sciencebeam
| 272 |
52608
|
<filename>sciencebeam/pipeline_runners/beam_pipeline_runner.py
from __future__ import absolute_import
import argparse
import logging
import mimetypes
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions, SetupOptions
from apache_beam.metrics.metric import Metrics
from sciencebeam_utils.utils.collection import (
extend_dict
)
from sciencebeam_utils.beam_utils.utils import (
TransformAndCount,
TransformAndLog,
MapOrLog,
PreventFusion
)
from sciencebeam_utils.beam_utils.io import (
read_all_from_path,
save_file_content
)
from sciencebeam_utils.beam_utils.main import (
add_cloud_args,
process_cloud_args
)
from sciencebeam.config.app_config import get_app_config
from sciencebeam.utils.logging import configure_logging
from sciencebeam.pipelines import (
get_pipeline_for_configuration_and_args,
add_pipeline_args
)
from sciencebeam.pipeline_runners.pipeline_runner_utils import (
add_batch_args,
process_batch_args,
encode_if_text_type,
get_output_file_for_source_file_fn,
get_remaining_file_list_for_args,
DataProps
)
LOGGER = logging.getLogger(__name__)
def get_logger():
return logging.getLogger(__name__)
class MetricCounters:
FILES = 'files'
def ReadFileContent():
return "ReadFileContent" >> TransformAndCount(
beam.Map(lambda file_url: {
DataProps.SOURCE_FILENAME: file_url,
DataProps.FILENAME: file_url,
DataProps.CONTENT: read_all_from_path(file_url)
}),
MetricCounters.FILES
)
def get_step_error_counter(step):
return 'error_%s' % step
def get_step_ignored_counter(step):
return 'ignored_%s' % step
def get_step_processed_counter(step):
return 'processed_%s' % step
def execute_or_skip_step(step):
supported_types = step.get_supported_types()
processed_counter = Metrics.counter(
'PipelineStep', get_step_processed_counter(step)
)
ignored_counter = Metrics.counter(
'PipelineStep', get_step_ignored_counter(step)
)
def wrapper(x):
data_type = x['type']
if data_type in supported_types:
get_logger().debug('excuting step %s: %s (%s)', step, x.keys(), data_type)
result = extend_dict(x, step(x))
get_logger().debug(
'result of step %s: %s (%s)',
step, result.keys(), result.get('type')
)
processed_counter.inc()
return result
get_logger().debug(
'skipping step %s, %s not in supported types (%s)', step, data_type, supported_types
)
ignored_counter.inc()
return x
return wrapper
def get_step_transform(step):
step_name = str(step)
return step_name >> MapOrLog(
execute_or_skip_step(step),
log_fn=lambda e, v: (
get_logger().warning(
'caught exception (ignoring item): %s, source file: %s, step: %s',
e, v[DataProps.SOURCE_FILENAME], step_name, exc_info=e
)
), error_count=get_step_error_counter(step)
)
def configure_pipeline(p, opt, pipeline, config):
get_default_output_file_for_source_file = get_output_file_for_source_file_fn(opt)
file_list = get_remaining_file_list_for_args(opt)
LOGGER.debug('file_list: %s', file_list)
if not file_list:
LOGGER.info('no files to process')
return
steps = pipeline.get_steps(config, opt)
LOGGER.info('steps: %s', steps)
input_urls = (
p |
beam.Create(file_list) |
PreventFusion()
)
input_data = (
input_urls |
ReadFileContent() |
"Determine Type" >> beam.Map(lambda d: extend_dict(d, {
DataProps.TYPE: mimetypes.guess_type(d[DataProps.SOURCE_FILENAME])[0]
}))
)
result = input_data
for step in steps:
LOGGER.debug('step: %s', step)
result |= get_step_transform(step)
_ = ( # noqa: F841
result |
"WriteOutput" >> TransformAndLog(
beam.Map(lambda v: save_file_content(
get_default_output_file_for_source_file(
v[DataProps.SOURCE_FILENAME]
),
encode_if_text_type(v[DataProps.CONTENT])
)),
log_fn=lambda x: get_logger().info('saved output to: %s', x)
)
)
def parse_args(pipeline, config, argv=None):
parser = argparse.ArgumentParser()
add_pipeline_args(parser)
add_batch_args(parser)
add_cloud_args(parser)
pipeline.add_arguments(parser, config, argv)
args = parser.parse_args(argv)
if args.debug:
logging.getLogger().setLevel('DEBUG')
process_batch_args(args)
process_cloud_args(
args, args.output_path,
name='sciencebeam-convert'
)
get_logger().info('args: %s', args)
return args
def run(args, config, pipeline, save_main_session):
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions.from_dictionary(vars(args))
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
with beam.Pipeline(args.runner, options=pipeline_options) as p:
configure_pipeline(p, args, pipeline, config)
# Execute the pipeline and wait until it is completed.
def main(argv=None, save_main_session=True):
config = get_app_config()
pipeline = get_pipeline_for_configuration_and_args(config, argv=argv)
args = parse_args(pipeline, config, argv)
run(args, config=config, pipeline=pipeline, save_main_session=save_main_session)
if __name__ == '__main__':
configure_logging()
main()
|
tests/test_semantic_faster.py
|
flying-sheep/goatools
| 477 |
52613
|
#!/usr/bin/env python
"""Test faster version of sematic similarity"""
from __future__ import print_function
# Computing basic semantic similarities between GO terms
# Adapted from book chapter written by _<NAME> and <NAME>_
# How to compute semantic similarity between GO terms.
# First we need to write a function that calculates the minimum number
# of branches connecting two GO terms.
import os
import timeit
from collections import Counter
## from goatools.base import get_godag
## from goatools.associations import dnld_assc
## from goatools.semantic import semantic_similarity
## from goatools.semantic import TermCounts
## from goatools.semantic import get_info_content
## from goatools.semantic import deepest_common_ancestor
## from goatools.semantic import resnik_sim
## from goatools.semantic import lin_sim
## from goatools.godag.consts import NS2GO
from goatools.anno.gpad_reader import GpadReader
from goatools.semantic import TermCounts
from tests.utils import get_godag
from tests.utils import get_anno_fullname
from tests.utils import prt_hms
REPO = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
def test_semantic_similarity():
"""Test faster version of sematic similarity"""
godag_r0 = get_godag('go-basic.obo')
## godag_r1 = get_godag('go-basic.obo', optional_attrs=['relationship'])
annoobj = GpadReader(get_anno_fullname('goa_human.gpad'), godag=godag_r0)
ns2assoc = annoobj.get_ns2assc()
assoc = annoobj.get_id2gos('all')
# Get TermCounts for each namespace and for all namespaces
ns2tcnt = {ns:TermCounts(godag_r0, ns2assoc[ns]) for ns in ['BP', 'MF', 'CC']}
tic = timeit.default_timer()
tcntobj = TermCounts(godag_r0, assoc)
prt_hms(tic, 'CUR ACTUAL {N:,} TermCounts initialized'.format(N=len(tcntobj.gocnts)))
# Compare various TermCount counts
for nspc in ['BP', 'MF', 'CC']:
for goid, cnt in ns2tcnt[nspc].gocnts.items():
assert tcntobj.gocnts[goid] == cnt
# Compare old and new count
tic = timeit.default_timer()
gocnts_old = _old_init_count_terms(godag_r0, assoc.values())
assert gocnts_old
prt_hms(tic, 'OLD EXPECTED {N:,} TermCounts initialized'.format(N=len(gocnts_old)))
for goid, cnt_old in gocnts_old.items():
assert cnt_old == tcntobj.gocnts[goid]
def _old_init_count_terms(go2obj, annots_values):
'''
Fills in the counts and overall aspect counts.
'''
gocnts = Counter()
gonotindag = set()
# Fill gocnts with GO IDs in annotations and their corresponding counts
for terms in annots_values: # key is 'gene'
# Make a union of all the terms for a gene, if term parents are
# propagated but they won't get double-counted for the gene
allterms = set()
for go_id in terms:
goobj = go2obj.get(go_id, None)
if goobj is not None:
allterms.add(go_id)
allterms |= goobj.get_all_parents()
else:
gonotindag.add(go_id)
# Add 1 for each GO annotated to this gene product
for parent in allterms:
gocnts[parent] += 1
if gonotindag:
print("{N} Assc. GO IDs not found in the GODag\n".format(N=len(gonotindag)))
return gocnts
if __name__ == '__main__':
test_semantic_similarity()
|
artemis/general/profile.py
|
peteroconnor-bc/artemis
| 235 |
52624
|
from tempfile import mkstemp
import cProfile
import pstats
from artemis.general.display import surround_with_header
import os
def what_are_we_waiting_for(command, sort_by ='time', max_len = 20, print_here = True):
"""
An easy way to show what is taking all the time when you run something.
Taken from docs: https://docs.python.org/2/library/profile.html#module-cProfile
:param command: A string python command
:param sort_by: How to sort results. {'time', 'cumtime', 'calls', ...}.
See https://docs.python.org/2/library/profile.html#pstats.Stats.sort_stats
:param max_len: Maximum number of things to show in profile.
:param print_here: Print the results here (instead of returning them).
:return: A pstats.Stats object containing the profiling results.
"""
_, filepath = mkstemp()
try:
cProfile.run(command, filepath)
finally:
p = pstats.Stats(filepath)
os.remove(filepath)
p.strip_dirs()
p.sort_stats(sort_by)
if print_here:
print(surround_with_header('Profile for "{}"'.format(command), width=100, char='='))
p.print_stats(max_len)
print('='*100)
return p
|
research/carls/context.py
|
srihari-humbarwadi/neural-structured-learning
| 939 |
52638
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Global context for knowledge bank operations."""
import threading
from typing import Text
from research.carls import dynamic_embedding_config_pb2 as de_config_pb2
# A map from variable name to DynamicEmbeddingConfig.
_knowledge_bank_collections = {}
_lock = threading.Lock()
def add_to_collection(name: Text, config: de_config_pb2.DynamicEmbeddingConfig):
"""Adds given (name, config) pair to global collectionss.
Args:
name: A string denoting the variable name.
config: An instance of DynamicEmbeddingConfig.
Raises:
TypeError: Invalid input.
ValueError: Name is empty, or a different config is added for an existing
variable.
"""
if not name:
raise ValueError("Empty name.")
if not isinstance(config, de_config_pb2.DynamicEmbeddingConfig):
raise TypeError("Config is not an instance of DynamicEmbeddingConfig.")
if name in _knowledge_bank_collections.keys():
existing_config = _knowledge_bank_collections[name]
if config.SerializeToString() != existing_config.SerializeToString():
raise ValueError(
"Adding a new config for the same var name is not allowed, existing:"
" %r, new: %r." % (existing_config, config))
with _lock:
_knowledge_bank_collections[name] = de_config_pb2.DynamicEmbeddingConfig()
_knowledge_bank_collections[name].CopyFrom(config)
def get_all_collection():
"""Returns a list of all (name, config) pairs."""
with _lock:
return [(key, value) for key, value in _knowledge_bank_collections.items()]
def clear_all_collection():
"""Clears existing all (name, config) pairs."""
with _lock:
_knowledge_bank_collections.clear()
|
epf/src/pipelines/im_color_modifier.py
|
MLReef/mlreef
| 1,607 |
52653
|
<reponame>MLReef/mlreef
# MLReef-2020: Color modifications for data augmentation.
from PIL import Image, ImageEnhance
import argparse
import sys
import os
from pathlib import Path
class ColorModifier:
def __init__(self,params):
self.input_dir = params['input_path']
self.output_dir = params['output_path']
self.brightness = float(params['brightness'])
self.contrast = float(params['contrast'])
self.saturation = float(params['saturation'])
# create folder if does not exists
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
# Please add here the extensions that you need
self.ext = ['.jpeg', '.png', '.jpg']
def __execute__(self):
# Walk the directories to find images
for root, dirs, files in os.walk(self.input_dir):
for file in files:
if file.endswith(tuple(self.ext)):
image = os.path.join(root, file)
fullpath, extension = os.path.splitext(image)
im = Image.open(image)
enhancer = ImageEnhance.Brightness(im)
enhanced_im = enhancer.enhance(self.brightness)
enhancer = ImageEnhance.Contrast(enhanced_im)
enhanced_im = enhancer.enhance(self.contrast)
enhancer = ImageEnhance.Color(enhanced_im)
enhanced_im = enhancer.enhance(self.saturation)
relative_p = os.path.relpath(fullpath, self.input_dir)
folders = os.path.split(relative_p)[0]
Path(os.path.join(self.output_dir, folders)).mkdir(parents=True, exist_ok=True)
enhanced_im.save(os.path.join(self.output_dir, '{}_cm{}'.format(relative_p, extension)))
print("Color modifier done")
return 1
def process_arguments(args):
parser = argparse.ArgumentParser(description='Pipeline: Color modifier')
parser.add_argument('--input-path', action='store', default='.', help='path to directory of images or image file')
parser.add_argument('--output-path', action='store', default='.', help='output directory to save images')
parser.add_argument('--brightness', action='store', default=0.5, help='Brightness value')
parser.add_argument('--contrast', action='store', default=0.5, help='contrast value')
parser.add_argument('--saturation', action='store', default=2.0, help='saturation value')
params = vars(parser.parse_args(args))
if (params['input_path'] or params['output_path']) is None:
parser.error("Paths are required. You did not specify input path or output path.")
return params
if __name__ == "__main__":
print("Beginning execution of im_color_modifier.py script ......... \n")
params = process_arguments(sys.argv[1:])
op = ColorModifier(params)
print("input path:", op.input_dir)
print("output path:", op.output_dir)
print("Brightness",op.brightness)
print("Contrast",op.contrast)
print("Saturation",op.saturation)
op.__execute__()
|
tools/cp.py
|
onecoolx/picasso
| 269 |
52657
|
<filename>tools/cp.py<gh_stars>100-1000
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Copy a file.
This module works much like the cp posix command - it takes 2 arguments:
(src, dst) and copies the file with path |src| to |dst|.
"""
import shutil
import sys
import os
def Main(src, dst):
# Use copy instead of copyfile to ensure the executable bit is copied.
path = os.path.dirname(dst)
is_exit = os.path.exists(path)
if not is_exit:
os.makedirs(path)
if os.path.isdir(src):
if os.path.exists(dst):
shutil.rmtree(dst)
return shutil.copytree(src, dst)
else:
return shutil.copy(src, dst)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1], sys.argv[2]))
|
data_generation/nlp.py
|
haeseung81/PyTorchStepByStep
| 170 |
52667
|
import requests
import zipfile
import os
import errno
import nltk
from nltk.tokenize import sent_tokenize
ALICE_URL = 'https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/1476/alice28-1476.txt'
WIZARD_URL = 'https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/1740/wizoz10-1740.txt'
def download_text(url, localfolder='texts'):
localfile = os.path.split(url)[-1]
try:
os.mkdir(f'{localfolder}')
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
r = requests.get(url, allow_redirects=True)
open(os.path.join(localfolder, localfile), 'wb').write(r.content)
except Exception as e:
print(f'Error downloading file: {str(e)}')
def sentence_tokenize(source, quote_char='\\', sep_char=',',
include_header=True, include_source=True,
extensions=('txt'), **kwargs):
nltk.download('punkt')
# If source is a folder, goes through all files inside it
# that match the desired extensions ('txt' by default)
if os.path.isdir(source):
filenames = [f for f in os.listdir(source)
if os.path.isfile(os.path.join(source, f)) and
os.path.splitext(f)[1][1:] in extensions]
elif isinstance(source, str):
filenames = [source]
# If there is a configuration file, builds a dictionary with
# the corresponding start and end lines of each text file
config_file = os.path.join(source, 'lines.cfg')
config = {}
if os.path.exists(config_file):
with open(config_file, 'r') as f:
rows = f.readlines()
for r in rows[1:]:
fname, start, end = r.strip().split(',')
config.update({fname: (int(start), int(end))})
new_fnames = []
# For each file of text
for fname in filenames:
# If there's a start and end line for that file, use it
try:
start, end = config[fname]
except KeyError:
start = None
end = None
# Opens the file, slices the configures lines (if any)
# cleans line breaks and uses the sentence tokenizer
with open(os.path.join(source, fname), 'r') as f:
contents = (''.join(f.readlines()[slice(start, end, None)])
.replace('\n', ' ').replace('\r', ''))
corpus = sent_tokenize(contents, **kwargs)
# Builds a CSV file containing tokenized sentences
base = os.path.splitext(fname)[0]
new_fname = f'{base}.sent.csv'
new_fname = os.path.join(source, new_fname)
with open(new_fname, 'w') as f:
# Header of the file
if include_header:
if include_source:
f.write('sentence,source\n')
else:
f.write('sentence\n')
# Writes one line for each sentence
for sentence in corpus:
if include_source:
f.write(f'{quote_char}{sentence}{quote_char}{sep_char}{fname}\n')
else:
f.write(f'{quote_char}{sentence}{quote_char}\n')
new_fnames.append(new_fname)
# Returns list of the newly generated CSV files
return sorted(new_fnames)
|
leonardo/module/web/models/__init__.py
|
timgates42/django-leonardo
| 102 |
52672
|
from leonardo.module.web.models.page import *
from leonardo.module.web.models.widget import *
from leonardo.module.web.widget.icon.models import IconWidget
from leonardo.module.web.widget.application.models import ApplicationWidget
from leonardo.module.web.widget.markuptext.models import MarkupTextWidget
from leonardo.module.web.widget.feedreader.models import FeedReaderWidget
from leonardo.module.web.widget.pagetitle.models import PageTitleWidget
from leonardo.module.web.widget.table.models import TableWidget
from leonardo.module.web.widget.siteheading.models import SiteHeadingWidget
from leonardo.module.web.widget.htmltext.models import HtmlTextWidget
|
tests/test_provider_MissionCriticalCloud_cosmic.py
|
mjuenema/python-terrascript
| 507 |
52675
|
# tests/test_provider_MissionCriticalCloud_cosmic.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:14:40 UTC)
def test_provider_import():
import terrascript.provider.MissionCriticalCloud.cosmic
def test_resource_import():
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_affinity_group
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_disk
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_instance
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_ipaddress
from terrascript.resource.MissionCriticalCloud.cosmic import (
cosmic_loadbalancer_rule,
)
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_network
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_network_acl
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_network_acl_rule
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_nic
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_port_forward
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_private_gateway
from terrascript.resource.MissionCriticalCloud.cosmic import (
cosmic_secondary_ipaddress,
)
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_ssh_keypair
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_static_nat
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_static_route
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_template
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_vpc
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_vpn_connection
from terrascript.resource.MissionCriticalCloud.cosmic import (
cosmic_vpn_customer_gateway,
)
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_vpn_gateway
def test_datasource_import():
from terrascript.data.MissionCriticalCloud.cosmic import cosmic_network_acl
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.MissionCriticalCloud.cosmic
#
# t = terrascript.provider.MissionCriticalCloud.cosmic.cosmic()
# s = str(t)
#
# assert 'https://github.com/MissionCriticalCloud/terraform-provider-cosmic' in s
# assert '0.5.0' in s
|
src/genie/libs/parser/ios/tests/ShowEthernetServiceInstanceStats/cli/equal/golden_output_expected.py
|
balmasea/genieparser
| 204 |
52694
|
<filename>src/genie/libs/parser/ios/tests/ShowEthernetServiceInstanceStats/cli/equal/golden_output_expected.py<gh_stars>100-1000
expected_output = {
"max_num_of_service_instances": 32768,
"service_instance": {
2051: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2052: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2053: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2054: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2055: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2056: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2057: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2058: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2059: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2060: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2061: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2062: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2063: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2064: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2065: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2066: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2067: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2068: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2069: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2070: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2071: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2072: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2073: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2074: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2075: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2076: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2077: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2078: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2079: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2080: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2081: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2082: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2083: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2084: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2085: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2086: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2087: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2088: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2089: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2090: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
2091: {
"pkts_out": 0,
"pkts_in": 0,
"interface": "GigabitEthernet0/0/5",
"bytes_in": 0,
"bytes_out": 0,
},
},
}
|
classification/tests/test_classifier.py
|
magesh-technovator/serverless-transformers-on-aws-lambda
| 103 |
52696
|
<gh_stars>100-1000
from src.classifier import Classifier
pipeline = Classifier()
def test_response(requests, response):
assert response == pipeline(requests)
|
tools/wptrunner/wptrunner/executors/executoropera.py
|
meyerweb/wpt
| 14,668 |
52699
|
<gh_stars>1000+
from ..webdriver_server import OperaDriverServer
from .base import WdspecExecutor, WdspecProtocol
class OperaDriverProtocol(WdspecProtocol):
server_cls = OperaDriverServer
class OperaDriverWdspecExecutor(WdspecExecutor):
protocol_cls = OperaDriverProtocol
|
util/test/tests/D3D11/D3D11_Untyped_Backbuffer_Descriptor.py
|
hbina/renderdoc
| 6,181 |
52716
|
<filename>util/test/tests/D3D11/D3D11_Untyped_Backbuffer_Descriptor.py
import renderdoc as rd
import rdtest
class D3D11_Untyped_Backbuffer_Descriptor(rdtest.TestCase):
demos_test_name = 'D3D11_Untyped_Backbuffer_Descriptor'
def check_capture(self):
# find the first action
action = self.find_action("Draw")
self.controller.SetFrameEvent(action.eventId, False)
pipe: rd.PipeState = self.controller.GetPipelineState()
self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, 0.25, 0.5, [1.0, 1.0, 1.0, 1.0])
rdtest.log.success("Picked value for first action is as expected")
# find the second action
action = self.find_action("Draw", action.eventId+1)
self.controller.SetFrameEvent(action.eventId, False)
pipe: rd.PipeState = self.controller.GetPipelineState()
self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, 0.75, 0.5, [1.0, 1.0, 1.0, 1.0])
rdtest.log.success("Picked value for second action is as expected")
|
t72pkl.py
|
kopetri/LayoutNetv2
| 166 |
52753
|
# load .t7 file and save as .pkl data
import torchfile
import cv2
import numpy as np
import scipy.io as sio
import pickle
import time
data_path = './data/test_PC/'
# panoContext
#img_tr = torchfile.load('./data/panoContext_img_train.t7')
#print(img_tr.shape)
#lne_tr = torchfile.load('./data/panoContext_line_train.t7')
#print(lne_tr.shape)
#edg_tr = torchfile.load('./data/panoContext_edge_train.t7')
#print(edg_tr.shape)
#junc_tr = torchfile.load('./data/panoContext_cor_train.t7')
#print(junc_tr.shape)
#print('done')
#img_tr = torchfile.load('./data/panoContext_img_val.t7')
#print(img_tr.shape)
#lne_tr = torchfile.load('./data/panoContext_line_val.t7')
#print(lne_tr.shape)
#edg_tr = torchfile.load('./data/panoContext_edge_val.t7')
#print(edg_tr.shape)
#junc_tr = torchfile.load('./data/panoContext_cor_val.t7')
#print(junc_tr.shape)
#print('done')
img_tr = torchfile.load('./data/panoContext_img_test.t7')
print(img_tr.shape)
lne_tr = torchfile.load('./data/panoContext_line_test.t7')
print(lne_tr.shape)
edg_tr = torchfile.load('./data/panoContext_edge_test.t7')
print(edg_tr.shape)
junc_tr = torchfile.load('./data/panoContext_cor_test.t7')
print(junc_tr.shape)
print('done')
# stanford
#img_tr = torchfile.load('./data/stanford2d-3d_img_area_5.t7')
#print(img_tr.shape)
#lne_tr = torchfile.load('./data/stanford2d-3d_line_area_5.t7')
#print(lne_tr.shape)
#edg_tr = torchfile.load('./data/stanford2d-3d_edge_area_5.t7')
#print(edg_tr.shape)
#junc_tr = torchfile.load('./data/stanford2d-3d_cor_area_5.t7')
#print(junc_tr.shape)
#print('done')
gt_txt_path = './data/panoContext_testmap.txt'
gt_path = './data/layoutnet_dataset/test/label_cor/'
# Load data
namelist = []
id_num = []
with open(gt_txt_path, 'r') as f:
while(True):
line = f.readline().strip()
if not line:
break
id_num0 = line.split()
id_num0 = int(id_num0[1])
id_num.append(id_num0)
namelist.append(line)
id_num = np.array(id_num)
cnt = 0
for num in range(img_tr.shape[0]):
print(num)
image = img_tr[num]
image = np.transpose(image, (1,2,0))#*255.0
line = lne_tr[num]
line = np.transpose(line, (1,2,0))
edge = edg_tr[num]
edge = np.transpose(edge, (1,2,0))
junc = junc_tr[num]
junc = np.transpose(junc, (1,2,0))
# corner gt
idn = np.where(id_num == num)
idn = idn[0][0]
filename = namelist[idn]
filename = filename.split()
filename = gt_path+filename[0][:-4]+'.txt'#'.mat'
cnt+=1
cor = np.loadtxt(filename)
cor_sum = 0
for cor_num in range(cor.shape[0]):
cor_sum+=junc[int(cor[cor_num,1]),int(cor[cor_num,0]),0]
#print(cor_sum)
#time.sleep(0.5)
# pickle.dump({'image':image, 'line':line, 'edge':edge, 'junc':junc, 'cor':cor, 'filename':filename[:-4]}, open(data_path+'PC_'+"{:04d}".format(num)+'.pkl', "wb" ) )
pickle.dump({'image':image, 'line':line, 'edge':edge, 'junc':junc, 'cor':cor, 'filename':filename[:-4]}, open(data_path+'PCts_'+"{:04d}".format(num)+'.pkl', "wb" ) )
# pickle.dump({'image':image, 'line':line, 'edge':edge, 'junc':junc, 'cor':cor, 'filename':filename[:-4]}, open(data_path+'PCval_'+"{:04d}".format(num)+'.pkl', "wb" ) )
# pickle.dump({'image':image, 'line':line, 'edge':edge, 'junc':junc, 'cor':cor, 'filename':filename[:-4]}, open(data_path+'area5_'+"{:04d}".format(num)+'.pkl', "wb" ) )
|
nodes/2.x/python/ElevationMarker.Views.py
|
andydandy74/ClockworkForDynamo
| 147 |
52766
|
<gh_stars>100-1000
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
def GetElevationMarkerView(item):
val = []
if hasattr(item, "HasElevations"):
if item.HasElevations():
for i in range(item.MaximumViewCount):
view = item.Document.GetElement(item.GetViewId(i))
if view: val.append(view)
return val
items = UnwrapElement(IN[0])
if isinstance(IN[0], list): OUT = [GetElevationMarkerView(x) for x in items]
else: OUT = GetElevationMarkerView(items)
|
www/tests/test_print.py
|
raspberrypieman/brython
| 5,926 |
52771
|
<gh_stars>1000+
funcs = [
"abs", "all", "any", "ascii", "bin", "callable", "chr", "compile",
"delattr", "dir", "divmod", "eval", "exec", "exit", "format", "getattr",
"globals", "hasattr", "hash", "help", "hex", "id", "input", "isinstance",
"issubclass", "iter", "len", "locals", "max", "min", "next", "oct",
"open", "ord", "pow", "print", "quit", "repr", "round", "setattr",
"sorted", "sum", "vars"
]
classes = [
"bool", "bytearray", "bytes", "classmethod", "complex", "dict", "enumerate",
"filter", "float", "frozenset", "int", "list", "map", "memoryview",
"object", "property", "range", "reversed", "set", "slice", "staticmethod",
"str", "super", "tuple", "type", "zip"
]
special_cases = "exit", "quit", "help"
for func in funcs:
if func in special_cases:
continue
assert str(getattr(__builtins__, func)) == f"<built-in function {func}>"
for kl in classes:
obj = getattr(__builtins__, kl)
assert str(obj) == f"<class '{kl}'>", f"erreur pour {kl} : {obj}"
|
L1Trigger/GlobalCaloTrigger/test/testElectrons_cfg.py
|
ckamtsikis/cmssw
| 852 |
52802
|
<reponame>ckamtsikis/cmssw<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
process = cms.Process("TestGct")
process.load("L1Trigger.GlobalCaloTrigger.test.gctTest_cff")
process.load("L1Trigger.GlobalCaloTrigger.test.gctConfig_cff")
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.p1 = cms.Path(process.gctemu)
process.gctemu.doElectrons = True
process.gctemu.inputFile = 'data/testEmDummy_'
|
tests/test_provider_vultr_vultr.py
|
mjuenema/python-terrascript
| 507 |
52819
|
<filename>tests/test_provider_vultr_vultr.py<gh_stars>100-1000
# tests/test_provider_vultr_vultr.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:31:06 UTC)
def test_provider_import():
import terrascript.provider.vultr.vultr
def test_resource_import():
from terrascript.resource.vultr.vultr import vultr_bare_metal_server
from terrascript.resource.vultr.vultr import vultr_block_storage
from terrascript.resource.vultr.vultr import vultr_dns_domain
from terrascript.resource.vultr.vultr import vultr_dns_record
from terrascript.resource.vultr.vultr import vultr_firewall_group
from terrascript.resource.vultr.vultr import vultr_firewall_rule
from terrascript.resource.vultr.vultr import vultr_instance
from terrascript.resource.vultr.vultr import vultr_instance_ipv4
from terrascript.resource.vultr.vultr import vultr_iso_private
from terrascript.resource.vultr.vultr import vultr_load_balancer
from terrascript.resource.vultr.vultr import vultr_object_storage
from terrascript.resource.vultr.vultr import vultr_private_network
from terrascript.resource.vultr.vultr import vultr_reserved_ip
from terrascript.resource.vultr.vultr import vultr_reverse_ipv4
from terrascript.resource.vultr.vultr import vultr_reverse_ipv6
from terrascript.resource.vultr.vultr import vultr_snapshot
from terrascript.resource.vultr.vultr import vultr_snapshot_from_url
from terrascript.resource.vultr.vultr import vultr_ssh_key
from terrascript.resource.vultr.vultr import vultr_startup_script
from terrascript.resource.vultr.vultr import vultr_user
def test_datasource_import():
from terrascript.data.vultr.vultr import vultr_account
from terrascript.data.vultr.vultr import vultr_application
from terrascript.data.vultr.vultr import vultr_backup
from terrascript.data.vultr.vultr import vultr_bare_metal_plan
from terrascript.data.vultr.vultr import vultr_bare_metal_server
from terrascript.data.vultr.vultr import vultr_block_storage
from terrascript.data.vultr.vultr import vultr_dns_domain
from terrascript.data.vultr.vultr import vultr_firewall_group
from terrascript.data.vultr.vultr import vultr_instance
from terrascript.data.vultr.vultr import vultr_instance_ipv4
from terrascript.data.vultr.vultr import vultr_iso_private
from terrascript.data.vultr.vultr import vultr_iso_public
from terrascript.data.vultr.vultr import vultr_load_balancer
from terrascript.data.vultr.vultr import vultr_object_storage
from terrascript.data.vultr.vultr import vultr_os
from terrascript.data.vultr.vultr import vultr_plan
from terrascript.data.vultr.vultr import vultr_private_network
from terrascript.data.vultr.vultr import vultr_region
from terrascript.data.vultr.vultr import vultr_reserved_ip
from terrascript.data.vultr.vultr import vultr_reverse_ipv4
from terrascript.data.vultr.vultr import vultr_reverse_ipv6
from terrascript.data.vultr.vultr import vultr_snapshot
from terrascript.data.vultr.vultr import vultr_ssh_key
from terrascript.data.vultr.vultr import vultr_startup_script
from terrascript.data.vultr.vultr import vultr_user
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.vultr.vultr
#
# t = terrascript.provider.vultr.vultr.vultr()
# s = str(t)
#
# assert 'https://github.com/vultr/terraform-provider-vultr' in s
# assert '2.4.2' in s
|
stanza/pipeline/constituency_processor.py
|
asears/stanza
| 3,633 |
52825
|
<gh_stars>1000+
"""Processor that attaches a constituency tree to a sentence
The model used is a generally a model trained on the Stanford
Sentiment Treebank or some similar dataset. When run, this processor
attaches a score in the form of a string to each sentence in the
document.
TODO: a possible way to generalize this would be to make it a
ClassifierProcessor and have "sentiment" be an option.
"""
import stanza.models.constituency.trainer as trainer
from stanza.models.common import doc
from stanza.models.common.pretrain import Pretrain
from stanza.pipeline._constants import *
from stanza.pipeline.processor import UDProcessor, register_processor
@register_processor(CONSTITUENCY)
class ConstituencyProcessor(UDProcessor):
# set of processor requirements this processor fulfills
PROVIDES_DEFAULT = set([CONSTITUENCY])
# set of processor requirements for this processor
REQUIRES_DEFAULT = set([TOKENIZE, POS])
# default batch size, measured in sentences
DEFAULT_BATCH_SIZE = 50
def _set_up_model(self, config, use_gpu):
# get pretrained word vectors
pretrain_path = config.get('pretrain_path', None)
self._pretrain = Pretrain(pretrain_path) if pretrain_path else None
# set up model
charlm_forward_file = config.get('forward_charlm_path', None)
charlm_backward_file = config.get('backward_charlm_path', None)
self._model = trainer.Trainer.load(filename=config['model_path'],
pt=self._pretrain,
forward_charlm=trainer.load_charlm(charlm_forward_file),
backward_charlm=trainer.load_charlm(charlm_backward_file),
use_gpu=use_gpu)
# batch size counted as sentences
self._batch_size = config.get('batch_size', ConstituencyProcessor.DEFAULT_BATCH_SIZE)
def process(self, document):
sentences = document.sentences
# TODO: perhaps MWT should be relevant here?
# certainly parsing across an MWT boundary is an error
# TODO: maybe some constituency models are trained on UPOS not XPOS
words = [[(w.text, w.xpos) for w in s.words] for s in sentences]
trees = trainer.parse_tagged_words(self._model.model, words, self._batch_size)
document.set(CONSTITUENCY, trees, to_sentence=True)
return document
|
python/test/eager_mode/annotate_args.py
|
rdadolf/torch-mlir
| 213 |
52827
|
<reponame>rdadolf/torch-mlir
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Also available under a BSD-style license. See LICENSE.
# RUN: %PYTHON %s | FileCheck %s
import torch
from framework import run_test
from torch_mlir.eager_mode.torch_mlir_dispatch import (
annotate_args_kwargs,
normalize_args_kwargs,
build_script_function,
)
# CHECK: Torch Tensor (shape=(1, 3, 32, 32), dtype=torch.float32)
# CHECK: Torch Tensor (shape=(1, 3, 32, 32), dtype=torch.float32)
# CHECK: Torch Tensor (shape=(1, 3, 32, 32), dtype=torch.float32)
# -----
# CHECK: PASS - simple
@run_test
def simple():
target = torch.ops.aten.addmm.default
A = torch.randn(1, 3, 32, 32)
B = torch.randn(1, 3, 32, 32)
C = torch.randn(1, 3, 32, 32)
args = (A, B, C)
kwargs = dict(beta=1, alpha=1)
new_args, new_kwargs = normalize_args_kwargs(target.overloadpacket, args, kwargs)
script_fun = build_script_function(target._schema, new_args, new_kwargs)
annotations, *_ = annotate_args_kwargs(script_fun, new_args, new_kwargs)
for annot in annotations:
print(annot)
# CHECK: Torch Tensor (shape=(-1, 3, 32, 32), dtype=torch.float32)
# CHECK: Torch Tensor (shape=(-1, 3, 32, 32), dtype=torch.float32)
# CHECK: Torch Tensor (shape=(-1, 3, 32, 32), dtype=torch.float32)
# -----
# CHECK: PASS - handle_zero_dim
@run_test
def handle_zero_dim():
target = torch.ops.aten.addmm.default
A = torch.randn(0, 3, 32, 32)
B = torch.randn(0, 3, 32, 32)
C = torch.randn(0, 3, 32, 32)
args = (A, B, C)
kwargs = dict(beta=1, alpha=1)
new_args, new_kwargs = normalize_args_kwargs(target.overloadpacket, args, kwargs)
script_fun = build_script_function(target._schema, new_args, new_kwargs)
annotations, *_ = annotate_args_kwargs(script_fun, new_args, new_kwargs)
for annot in annotations:
print(annot)
# CHECK: Torch Tensor (shape=(2, 5, 2, 3), dtype=torch.float32)
# CHECK: Torch Tensor (shape=(5,), dtype=torch.float32)
# CHECK: Torch Tensor (shape=(5,), dtype=torch.float32)
# CHECK: Torch Tensor (shape=(5,), dtype=torch.float32)
# CHECK: Torch Tensor (shape=(5,), dtype=torch.float32)
# CHECK: Torch Tensor (shape=(2, 5, 2, 3), dtype=torch.float32)
# CHECK: Torch Tensor (shape=(5,), dtype=torch.float32)
# CHECK: Torch Tensor (shape=(5,), dtype=torch.float32)
# -----
# CHECK: PASS - correctly_order_kwargs
@run_test
def correctly_order_kwargs():
target = torch.ops.aten.native_batch_norm.out
input = torch.randn(2, 5, 2, 3)
weight = torch.randn(5)
bias = torch.randn(5)
running_mean = torch.randn(5)
running_var = torch.randn(5)
args = (input, weight, bias, running_mean, running_var)
out = torch.empty_like(input)
save_mean = torch.empty_like(running_mean)
save_invstd = torch.empty_like(running_var)
kwargs = dict(
training=False,
momentum=0.1,
eps=0.0001,
out=out,
save_mean=save_mean,
save_invstd=save_invstd,
)
new_args, new_kwargs = normalize_args_kwargs(target.overloadpacket, args, kwargs)
script_fun = build_script_function(target._schema, new_args, new_kwargs)
annotations, *_ = annotate_args_kwargs(script_fun, new_args, new_kwargs)
for annot in annotations:
print(annot)
|
tests/providers/amazon/aws/operators/test_emr_system.py
|
ChaseKnowlden/airflow
| 15,947 |
52841
|
<gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tests.test_utils.amazon_system_helpers import AWS_DAG_FOLDER, AmazonSystemTest
class EmrSystemTest(AmazonSystemTest):
"""
System tests for AWS EMR operators
"""
@classmethod
def setup_class(cls):
cls.create_emr_default_roles()
def test_run_example_dag_emr_automatic_steps(self):
self.run_dag('emr_job_flow_automatic_steps_dag', AWS_DAG_FOLDER)
def test_run_example_dag_emr_manual_steps(self):
self.run_dag('emr_job_flow_manual_steps_dag', AWS_DAG_FOLDER)
|
park/envs/tf_placement_sim/tf_placement_sim.py
|
utkarsh5k/park
| 180 |
52898
|
<gh_stars>100-1000
import os
import numpy as np
import math
from itertools import permutations
import wget
import pickle
import networkx as nx
import park
from park import core, spaces, logger
from park.utils.misc import create_folder_if_not_exists
from park.spaces import Tuple, Box, Discrete, Graph, Null
from park.param import config
from park.utils import seeding
from park.utils.directed_graph import DirectedGraph
from park.envs.tf_placement_sim.tf_pl_simulator import ImportantOpsSimulator
dropbox_links = {
'inception': 'https://www.dropbox.com/s/1r5n4e2g3hulsge/inception.pkl?dl=1',
'nasnet': 'https://www.dropbox.com/s/ufm72htk1zeuccm/nasnet.pkl?dl=1',
'nmt': 'https://www.dropbox.com/s/9rsmmv6pm11h3i8/nmt-attention-seq-30.pkl?dl=1',
}
pkl_names = {
'inception': 'inception.pkl',
'nasnet': 'nasnet.pkl',
'nmt': 'nmt-attention-seq-30.pkl',
}
class TFPlacementSimEnv(core.Env):
"""
Assign a placement to each operation group of a
computational graph of deep-learning models.
The goal is to minimize runtime of the computational graph.
* STATE *
Directed Graph with node feature being a list of the following:
(1) Cost: Op group execution time
(2) Mem: Op group's memory requirement when running
(3) Curr Placement: device id of the node based on its
current placement in the episode
(4) is_curr_node: Is this the node that is currently being placed
* ACTIONS *
[0, 1, ..., n-1] where n is the number of devices. The index
corresponding to the device id.
* REWARD *
Improvement in the runtime of the placement because of the current action
* REFERENCE *
https://arxiv.org/pdf/1706.04972.pdf
"""
def __init__(self):
# observation and action space
self.setup_env()
self.setup_space()
# random seed
self.seed(config.seed)
def possibly_download_pkl_file(self):
graph_dir = park.__path__[0] + '/envs/tf_placement_sim/graphs/'
trace_file = graph_dir + '/' + pkl_names[config.pl_graph]
create_folder_if_not_exists(graph_dir)
if not os.path.exists(trace_file):
wget.download(dropbox_links[config.pl_graph],
out=graph_dir)
return trace_file
def setup_env(self):
device_names = ['/device:GPU:%d' % i for i in range(config.pl_n_devs)]
gpu_devs = filter(lambda dev: 'GPU' in dev, device_names)
gpu_devs = list(sorted(gpu_devs))
if config.pl_graph not in pkl_names:
raise Exception('Requesting for model "%s" which doesnot exist in repo.\n\
Please choose from one of the following %s' % \
(config.pl_graph, ' '.join(pkl_repo.keys())))
pickled_inp_file = self.possibly_download_pkl_file()
with open(pickled_inp_file, 'rb') as f:
j = pickle.load(f)
mg, G, ungroup_map = j['optim_mg'], j['G'], j['ungrouped_mapping']
op_perf, step_stats = j['op_perf'], j['step_stats']
self.mg = mg
self.ungroup_map = ungroup_map
self.n_devs = config.pl_n_devs
self.gpu_devs = gpu_devs
self.devs = self.gpu_devs
self.device_names = device_names
self.G = G
self.sim = ImportantOpsSimulator(mg, op_perf, step_stats, device_names)
self.node_order = list(nx.topological_sort(G))
self.cost_d = self.sim.cost_d
self.out_d = {k: sum(v) for k, v in self.sim.out_d.items()}
def reset(self):
node_features = {}
edge_features = {}
cur_pl = {}
for node in self.G.nodes():
# checkout step function for this order as well
node_features[node] = [self.cost_d[node],\
self.out_d[node],\
0,\
0]
cur_pl[node] = node_features[node][2]
for neigh in self.G.neighbors(node):
# dummy edge feature for now
# TODO: Allow for no edge feature possibility
edge_features[(node, neigh)] = -1
node_features[self.node_order[0]][-1] = 1
self.s = DirectedGraph(node_features, edge_features)
self.cur_node_idx = 0
self.cur_pl = cur_pl
self.prev_rt = self.get_rt(self.cur_pl)
return self.s
def setup_space(self):
# cost (e.g., execution delay estimation in micro-seconds),
# mem (e.g., op group memory requirements on GPU in bytes),
# current placement(e.g., GPU 1),
# one-hot-bit (i.e., currently working on this node)
node_space = Box(low=0, high=10 * (1e9), shape=(len(self.G), 4), dtype=np.float32)
dummy_edge_space = Box(low=-1, high=-1, shape=(self.G.number_of_edges(),), dtype=np.int8)
self.observation_space = Graph(node_space, dummy_edge_space)
self.action_space = Discrete(self.n_devs)
def ungroup_pl(self, pl):
ungroup_map = self.ungroup_map
ungrouped_pl = {}
for op in self.mg.graph_def.node:
name = op.name
grp_ctr = ungroup_map[name]
ungrouped_pl[name] = pl[grp_ctr]
return ungrouped_pl
# takes op-group placement and
# returns runtime of the placement in seconds
def get_rt(self, pl):
pl = self.ungroup_pl(pl)
rt = self.sim.simulate(pl)
return rt / 1e6
def step(self, action):
assert self.action_space.contains(action)
action = int(action)
node_order = self.node_order
cur_node_idx = self.cur_node_idx
cur_node = node_order[cur_node_idx]
next_node = node_order[cur_node_idx + 1]
self.cur_pl[cur_node] = action
rt = self.get_rt(self.cur_pl)
reward = rt - self.prev_rt
self.s.update_nodes({cur_node:\
[self.cost_d[cur_node],\
self.out_d[cur_node],\
int(action),\
0], \
next_node:\
[self.cost_d[next_node],\
self.out_d[next_node],\
self.cur_pl[next_node],\
1]
})
self.cur_node_idx += 1
self.prev_rt = rt
if 1 + self.cur_node_idx == len(self.node_order):
done = True
else:
done = False
assert self.observation_space.contains(self.s)
return self.s, reward, done, {}
def seed(self, seed):
self.np_random = seeding.np_random(seed)
|
docs/examples/required_note.py
|
JonathanGrant/marbles
| 109 |
52915
|
import marbles.core
class ComplexTestCase(marbles.core.AnnotatedTestCase):
def test_for_edge_case(self):
self.assertTrue(False)
if __name__ == '__main__':
marbles.core.main()
|
plyer/platforms/android/vibrator.py
|
EdwardCoventry/plyer
| 1,184 |
52921
|
"""Implementation Vibrator for Android."""
from jnius import autoclass, cast
from plyer.facades import Vibrator
from plyer.platforms.android import activity
from plyer.platforms.android import SDK_INT
Context = autoclass("android.content.Context")
vibrator_service = activity.getSystemService(Context.VIBRATOR_SERVICE)
vibrator = cast("android.os.Vibrator", vibrator_service)
if SDK_INT >= 26:
VibrationEffect = autoclass("android.os.VibrationEffect")
class AndroidVibrator(Vibrator):
"""Android Vibrator class.
Supported features:
* vibrate for some period of time.
* vibrate from given pattern.
* cancel vibration.
* check whether Vibrator exists.
"""
def _vibrate(self, time=None, **kwargs):
if vibrator:
if SDK_INT >= 26:
vibrator.vibrate(
VibrationEffect.createOneShot(
int(1000 * time), VibrationEffect.DEFAULT_AMPLITUDE
)
)
else:
vibrator.vibrate(int(1000 * time))
def _pattern(self, pattern=None, repeat=None, **kwargs):
pattern = [int(1000 * time) for time in pattern]
if vibrator:
if SDK_INT >= 26:
vibrator.vibrate(
VibrationEffect.createWaveform(pattern, repeat)
)
else:
vibrator.vibrate(pattern, repeat)
def _exists(self, **kwargs):
if SDK_INT >= 11:
return vibrator.hasVibrator()
elif vibrator_service is None:
raise NotImplementedError()
return True
def _cancel(self, **kwargs):
vibrator.cancel()
def instance():
"""Returns Vibrator with android features.
:return: instance of class AndroidVibrator
"""
return AndroidVibrator()
|
tests/test_config_pumpkin_proxy.py
|
oza6ut0ne/wifipumpkin3
| 911 |
52949
|
<gh_stars>100-1000
import unittest
from wifipumpkin3.core.common.platforms import Linux
import wifipumpkin3.core.utility.constants as C
from wifipumpkin3.core.utility.collection import SettingsINI
class TestConfigPumpkinProxy(unittest.TestCase):
def test_config_key_set(self):
self.config = SettingsINI(C.CONFIG_PP_INI)
self.result = "http://example.com/foo.js"
self.value = self.config.get("set_js_inject", "url")
self.assertEqual(self.result, self.value)
def test_get_all_configkey_list(self):
self.config = SettingsINI(C.CONFIG_PP_INI)
self.result = ["url"]
self.value = self.config.get_all_childname("set_js_inject")
self.assertEqual(self.result, self.value)
if __name__ == "__main__":
unittest.main()
|
f5/bigip/tm/auth/test/unit/test_ldap.py
|
nghia-tran/f5-common-python
| 272 |
52967
|
<gh_stars>100-1000
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from f5.bigip import ManagementRoot
from f5.bigip.tm.auth.ldap import Ldap
from f5.sdk_exception import InvalidName
from f5.sdk_exception import MissingRequiredCreationParameter
import mock
import pytest
@pytest.fixture
def FakeLdap():
fake_ldap = mock.MagicMock()
fake_ldapobj = Ldap(fake_ldap)
return fake_ldapobj
class TestCreate(object):
def test_create_two(self, fakeicontrolsession):
b = ManagementRoot('localhost', 'admin', 'admin')
l1 = b.tm.auth.ldaps.ldap
l2 = b.tm.auth.ldaps.ldap
assert l1 is not l2
def test_create_no_args(self, FakeLdap):
with pytest.raises(MissingRequiredCreationParameter):
FakeLdap.create()
def test_create_bad_name(self, FakeLdap):
with pytest.raises(InvalidName):
FakeLdap.create(name='testauth')
|
crowd_anki/anki/adapters/anki_deck.py
|
ll-in-anki/CrowdAnki
| 391 |
53048
|
<filename>crowd_anki/anki/adapters/anki_deck.py
from cached_property import cached_property
from dataclasses import dataclass
from typing import Callable
@dataclass
class AnkiDeck:
_data: dict
deck_name_separator = '::'
@property
def data(self):
return self._data
@property
def is_dynamic(self):
return bool(self.data['dyn'])
@property
def name(self):
return self.data['name']
class LazyDeck(AnkiDeck):
def __init__(self, deck_initializer: Callable[[], dict]):
self.deck_initializer = deck_initializer
@cached_property
def data(self):
return self.deck_initializer()
class NamedLazyDeck(LazyDeck):
def __init__(self, name: str, name_initializer: Callable[[str], dict]):
super().__init__(lambda: name_initializer(name))
self._name = name
@property
def name(self):
return self._name
|
StockAnalysisSystem/porting/vnpy_chart/__init__.py
|
lifg2000/StockAnalysisSystem
| 138 |
53082
|
<gh_stars>100-1000
from .widget import ChartWidget
from .item import CandleItem, VolumeItem, ChartItem, MemoItem
from .data import BarData
from .constant import *
|
dev/Gems/CloudGemPlayerAccount/AWS/resource-manager-code/command.py
|
brianherrera/lumberyard
| 1,738 |
53095
|
<filename>dev/Gems/CloudGemPlayerAccount/AWS/resource-manager-code/command.py
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# $Revision: #1 $
import resource_manager.cli
import pa_service_api
def add_cli_commands(hook, subparsers, add_common_args, **kwargs):
subparser = subparsers.add_parser("player-account", help="Commands to manage the CloudGemPlayerAccount gem")
subparser.register('action', 'parsers', resource_manager.cli.AliasedSubParsersAction)
player_account_subparsers = subparser.add_subparsers(dest='subparser_name', metavar='COMMAND')
subparser = player_account_subparsers.add_parser('add-player', help='Add a new player')
subparser.add_argument('--username', type=str, required=True, help='The cognito username of the account to create')
subparser.add_argument('--email', type=str, required=True, help='The email address for the player')
subparser.add_argument('--playername', type=str, required=False, help='The name of the player in the game.')
subparser.add_argument('--givenname', type=str, required=False, help='The players given name,')
subparser.add_argument('--familyname', type=str, required=False, help='The players family name,')
subparser.add_argument('--nickname', type=str, required=False, help='The players nickname')
subparser.add_argument('--gender', type=str, required=False, choices=pa_service_api.GENDER_CHOICES, help='The players gender')
subparser.add_argument('--locale', type=str, required=False, help='The players locale')
add_common_args(subparser)
subparser.set_defaults(func=pa_service_api.command_add_player)
subparser = player_account_subparsers.add_parser('ban-player', help='Ban a player. See remove_player_ban to restore player')
subparser.add_argument('--account-id', type=str, required=True, help='The account id to ban')
add_common_args(subparser)
subparser.set_defaults(func=pa_service_api.command_ban_player)
subparser = player_account_subparsers.add_parser('confirm-player', help='Force confirm a player')
subparser.add_argument('--username', type=str, required=True, help='The cognito username of the account to confirm')
add_common_args(subparser)
subparser.set_defaults(func=pa_service_api.command_confirm_player)
subparser = player_account_subparsers.add_parser('edit-player', help='Edit a players settings')
subparser.add_argument('--account-id', type=str, required=True, help='The account id to edit')
subparser.add_argument('--playername', type=str, required=False, help='The name of the player in the game.')
subparser.add_argument('--givenname', type=str, required=False, help='The players given name,')
subparser.add_argument('--familyname', type=str, required=False, help='The players family name,')
subparser.add_argument('--nickname', type=str, required=False, help='The players nickname,')
subparser.add_argument('--gender', type=str, required=False, choices=pa_service_api.GENDER_CHOICES, help='The players gender')
subparser.add_argument('--locale', type=str, required=False, help='The players locale')
subparser.add_argument('--email', type=str, required=False, help='The email address for the player')
add_common_args(subparser)
subparser.set_defaults(func=pa_service_api.command_edit_player)
subparser = player_account_subparsers.add_parser('remove-player-ban', help='Remove a player ban')
subparser.add_argument('--account-id', type=str, required=True, help='The account id to restore')
add_common_args(subparser)
subparser.set_defaults(func=pa_service_api.command_remove_player_ban)
subparser = player_account_subparsers.add_parser('reset-player-password', help='Reset a player password')
subparser.add_argument('--username', type=str, required=True, help='The cognito username of the account to target')
add_common_args(subparser)
subparser.set_defaults(func=pa_service_api.command_reset_player_password)
subparser = player_account_subparsers.add_parser('show-banned-players', help='List banned players in the Gem')
subparser.add_argument('--page-token', type=str, required=False, default=None, help='The pagination token to get the next page.')
add_common_args(subparser)
subparser.set_defaults(func=pa_service_api.command_list_banned_players)
subparser = player_account_subparsers.add_parser('show-players', help='List registered players in the Gem')
subparser.add_argument('--filter-type', type=str, required=False, choices=pa_service_api.SEARCH_FILTER_CHOICES, help='The type of filter to apply')
subparser.add_argument('--filter-value', type=str, required=False, help='The value for the filter as a string. '
'For example the email address for the CognitoEmail filter.')
subparser.add_argument('--page-token', type=str, required=False, default=None, help='The pagination token to get the next page.')
add_common_args(subparser)
subparser.set_defaults(func=pa_service_api.command_list_players)
subparser = player_account_subparsers.add_parser('show-player-details', help='Show details about a player')
subparser.add_argument('--account-id', type=str, required=True, help='The account id to show details for')
add_common_args(subparser)
subparser.set_defaults(func=pa_service_api.command_list_player_details)
subparser = player_account_subparsers.add_parser('show-logs', help='Show recent log events for ServiceLambda')
subparser.add_argument('--minutes', type=int, required=False, help='How far back from now to attempt to display. Default is 10mins')
add_common_args(subparser)
subparser.set_defaults(func=pa_service_api.command_show_log_events)
|
source/vsm/vsm/db/sqlalchemy/migrate_repo/versions/026_remove_foreign_key.py
|
ramkrsna/virtual-storage-manager
| 172 |
53113
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 Intel Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import Integer, MetaData, String
from sqlalchemy import Table, Index, ForeignKey
from sqlalchemy.engine.base import Engine
from migrate.changeset.constraint import ForeignKeyConstraint
from sqlalchemy.engine import reflection
from sqlalchemy import create_engine
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
if migrate_engine.name == 'sqlite':
return
storage_pools = 'storage_pools'
storage_groups = 'storage_groups'
col = ''
insp = reflection.Inspector.from_engine(migrate_engine)
foreign_keys = insp.get_foreign_keys(storage_pools)
for key in foreign_keys:
if storage_groups == key['referred_table']:
sql_str = "ALTER TABLE %s DROP FOREIGN KEY %s;" % (storage_pools, key['name'])
ret = migrate_engine.execute(sql_str)
def downgrade(migrate_engine):
if migrate_engine.name == 'sqlite':
return
#meta = MetaData()
#meta.bind = migrate_engine
#storage_group = Table('storage_groups',
# meta,
# autoload=True)
#column_status = Column('status', String(255), default="OUT", nullable=False)
try:
#storage_group.drop_column(column_status)
pass
except Exception:
raise
|
homeassistant/components/trafikverket_train/util.py
|
MrDelik/core
| 30,023 |
53136
|
<gh_stars>1000+
"""Utils for trafikverket_train."""
from __future__ import annotations
from datetime import time
def create_unique_id(
from_station: str, to_station: str, depart_time: time | str | None, weekdays: list
) -> str:
"""Create unique id."""
timestr = str(depart_time) if depart_time else ""
return (
f"{from_station.casefold().replace(' ', '')}-{to_station.casefold().replace(' ', '')}"
f"-{timestr.casefold().replace(' ', '')}-{str(weekdays)}"
)
|
examples/cross_origin/web.py
|
benthomasson/gevent-socketio
| 625 |
53138
|
import os
from bottle import Bottle, static_file, run
HERE = os.path.abspath(os.path.dirname(__file__))
STATIC = os.path.join(HERE, 'static')
app = Bottle()
@app.route('/')
@app.route('/<filename:path>')
def serve(filename='index.html'):
return static_file(filename, root=STATIC)
if __name__ == '__main__':
run(app=app, host='localhost', port=8080)
|
docs/tutorial/python/sanic/users_if.py
|
mrpotes/go-raml
| 142 |
53369
|
<reponame>mrpotes/go-raml
# DO NOT EDIT THIS FILE. This file will be overwritten when re-running go-raml.
from sanic import Blueprint
from sanic.views import HTTPMethodView
from sanic.response import text
from . import users_api
from .oauth2_itsyouonline import oauth2_itsyouonline
users_if = Blueprint('users_if')
class usersView(HTTPMethodView):
async def get(self, request):
if not await oauth2_itsyouonline([]).check_token(request):
return text('', 401)
return await users_api.users_get(request)
async def post(self, request):
if not await oauth2_itsyouonline(["user:memberof:goraml-admin"]).check_token(request):
return text('', 401)
return await users_api.users_post(request)
users_if.add_route(usersView.as_view(), '/users')
class users_byusernameView(HTTPMethodView):
async def get(self, request, username):
if not await oauth2_itsyouonline(["user:memberof:goraml"]).check_token(request):
return text('', 401)
return await users_api.users_byUsername_get(request, username)
users_if.add_route(users_byusernameView.as_view(), '/users/<username>')
|
aser/conceptualize/utils.py
|
ZfSangkuan/ASER
| 256 |
53377
|
from collections import defaultdict
from copy import copy, deepcopy
from tqdm import tqdm
from ..eventuality import Eventuality
from ..relation import Relation
def conceptualize_eventualities(aser_conceptualizer, eventualities):
""" Conceptualize eventualities by an ASER conceptualizer
:param aser_conceptualizer: an ASER conceptualizer
:type aser_conceptualizer: aser.conceptualize.aser_conceptualizer.BaseASERConceptualizer
:param eventualities: a list of eventualities
:type eventualities: List[aser.event.Eventuality]
:return: a dictionary from cid to concept, a list of concept-instance pairs, a dictionary from cid to weights
:rtype: Dict[str, aser.concept.ASERConcept], List[aser.concept.ASERConcept, aser.eventuality.Eventuality, float], Dict[str, float]
"""
cid2concept = dict()
concept_instance_pairs = []
cid2score = dict()
for eventuality in tqdm(eventualities):
results = aser_conceptualizer.conceptualize(eventuality)
for concept, score in results:
if concept.cid not in cid2concept:
cid2concept[concept.cid] = deepcopy(concept)
concept = cid2concept[concept.cid]
if (eventuality.eid, eventuality.pattern, score) not in concept.instances:
concept.instances.append(((eventuality.eid, eventuality.pattern, score)))
if concept.cid not in cid2score:
cid2score[concept.cid] = 0.0
cid2score[concept.cid] += score * eventuality.frequency
concept_instance_pairs.append((concept, eventuality, score))
return cid2concept, concept_instance_pairs, cid2score
def build_concept_relations(concept_conn, relations):
""" Build relations between conceptualized eventualities from the given relations between eventualities
:param concept_conn: ASER concept KG connection
:type concept_conn: aser.database.kg_connection.ASERConceptConnection
:param relations: relations between eventualities
:type relations: List[aser.relation.Relations]
:return: a dictionary from rid to relations between conceptualized eventualities
:rtype: Dict[str, aser.relation.Relation]
"""
rid2relation = dict()
hid2related_events = defaultdict(list)
for relation in tqdm(relations):
hid2related_events[relation.hid].append((relation.tid, relation))
for h_cid in tqdm(concept_conn.cids):
instances = concept_conn.get_eventualities_given_concept(h_cid)
for h_eid, pattern, instance_score in instances:
# eid -> event -> related eids -> related events, relations -> related concepts, relations
related_events = hid2related_events[h_eid]
for t_eid, relation in related_events:
concept_score_pairs = concept_conn.get_concepts_given_eventuality(t_eid)
for t_concept, score in concept_score_pairs:
t_cid = t_concept.cid
if h_cid == t_cid:
continue
rid = Relation.generate_rid(h_cid, t_cid)
if rid not in rid2relation:
rid2relation[rid] = Relation(h_cid, t_cid)
rid2relation[rid].update({k: v * instance_score * score for k, v in relation.relations.items()})
return rid2relation
|
api/utils/input/__init__.py
|
mmangione/alcali
| 306 |
53422
|
from shlex import split
import json
class RawCommand:
def __init__(self, command, client="local", posix=True, inline=False):
# TODO: check shlex.quote, raw string, etc..
if inline:
self.command = split(command, posix=posix)
else:
self.command = split(command, posix=posix)[1:]
self.options = {"expr_form": "glob"}
self.client = client
def parse(self):
args = self.command
if args[0].startswith("--client"):
self.client = args[0].split("=")[1]
args.pop(0)
low = {"client": self.client}
if self.client.startswith("local"):
if len(args) < 2:
return "Command or target not specified"
# Batch option
low["batch"] = None
if self.client == "local_batch":
batch_index = None
for index, arg in enumerate(args):
if arg in ["-b", "--batch", "--batch-size"]:
low["batch"] = args[index + 1]
batch_index = index
if batch_index:
args.pop(batch_index)
args.pop(batch_index)
# Timeout option
timeout_index = None
for index, arg in enumerate(args):
if arg in ["-t", "--timeout"]:
low["timeout"] = int(args[index + 1])
timeout_index = index
if timeout_index:
args.pop(timeout_index)
args.pop(timeout_index)
# take care of targeting.
target_dict = {
"pcre": ["-E", "--pcre"],
"list": ["-L", "--list"],
"grain": ["-G", "--grain"],
"grain_pcre": ["--grain-pcre"],
"pillar": ["-I", "--pillar"],
"pillar_pcre": ["--pillar-pcre"],
"range": ["-R", "--range"],
"compound": ["-C", "--compound"],
"nodegroup": ["-N", "--nodegroup"],
}
for key, value in target_dict.items():
if args[0] in value:
self.options["expr_form"] = key
args.pop(0)
low["tgt_type"] = self.options["expr_form"]
low["tgt"] = args.pop(0)
low["fun"] = args.pop(0)
low["arg"] = args
elif self.client.startswith("runner") or self.client.startswith("wheel"):
low["fun"] = args.pop(0)
for arg in args:
if "=" in arg:
key, value = arg.split("=", 1)
try:
low[key] = json.loads(value)
except json.JSONDecodeError:
low[key] = value
else:
low.setdefault("arg", []).append(arg)
else:
# This should never happen
return "Client not implemented: {0}".format(self.client)
return [low]
|
tests/test_ns.py
|
TheCuriousNerd/happy-transformer
| 277 |
53426
|
from happytransformer import HappyNextSentence
def test_sp_true():
happy_ns = HappyNextSentence()
result = happy_ns.predict_next_sentence(
"Hi nice to meet you. How old are you?",
"I am 21 years old."
)
assert result > 0.5
def test_sp_false():
happy_ns = HappyNextSentence()
result = happy_ns.predict_next_sentence(
"How old are you?",
"The Eiffel Tower is in Paris."
)
assert result < 0.5
def test_sp_save():
happy = HappyNextSentence()
happy.save("model/")
result_before = happy.predict_next_sentence(
"How old are you?",
"The Eiffel Tower is in Paris."
)
happy = HappyNextSentence(load_path="model/")
result_after = happy.predict_next_sentence(
"How old are you?",
"The Eiffel Tower is in Paris."
)
assert result_before == result_after
|
bagpy/__init__.py
|
jmscslgroup/rosbagpy
| 107 |
53435
|
<reponame>jmscslgroup/rosbagpy
# Initial Date: March 2, 2020
# Author: <NAME>
# Copyright (c) <NAME>, Arizona Board of Regents
# All rights reserved.
from .bagreader import bagreader
from .bagreader import animate_timeseries
from .bagreader import create_fig
|
robot-server/robot_server/robot/calibration/pipette_offset/constants.py
|
anuwrag/opentrons
| 235 |
53512
|
from __future__ import annotations
from enum import Enum
from typing import TYPE_CHECKING
from robot_server.robot.calibration.constants import STATE_WILDCARD
if TYPE_CHECKING:
from typing_extensions import Final
class PipetteOffsetCalibrationState(str, Enum):
sessionStarted = "sessionStarted"
labwareLoaded = "labwareLoaded"
preparingPipette = "preparingPipette"
inspectingTip = "inspectingTip"
joggingToDeck = "joggingToDeck"
savingPointOne = "savingPointOne"
calibrationComplete = "calibrationComplete"
sessionExited = "sessionExited"
WILDCARD = STATE_WILDCARD
class PipetteOffsetWithTipLengthCalibrationState(str, Enum):
sessionStarted = "sessionStarted"
labwareLoaded = "labwareLoaded"
measuringNozzleOffset = "measuringNozzleOffset"
preparingPipette = "preparingPipette"
inspectingTip = "inspectingTip"
measuringTipOffset = "measuringTipOffset"
joggingToDeck = "joggingToDeck"
savingPointOne = "savingPointOne"
calibrationComplete = "calibrationComplete"
sessionExited = "sessionExited"
tipLengthComplete = "tipLengthComplete"
WILDCARD = STATE_WILDCARD
TIP_RACK_SLOT: Final = "8"
|
python/paddle/fluid/tests/unittests/test_psroi_pool_op.py
|
zmxdream/Paddle
| 17,085 |
53535
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
import math
import numpy as np
import unittest
from op_test import OpTest
def calc_psroi_pool(x, rois, rois_num_per_img, output_channels, spatial_scale,
pooled_height, pooled_width):
"""
Psroi_pool implemented by Numpy.
x: 4-D as (N, C, H, W),
rois: 2-D as [[x1, y1, x2, y2], ...],
rois_num_per_img: 1-D as [nums_of_batch_0, nums_of_batch_1, ...]
"""
output_shape = (len(rois), output_channels, pooled_height, pooled_width)
out_data = np.zeros(output_shape)
batch_id = 0
rois_num_id = 0
rois_num_left = rois_num_per_img[rois_num_id]
for i in range(len(rois)):
roi = rois[i]
roi_batch_id = batch_id
rois_num_left -= 1
if rois_num_left == 0:
rois_num_id += 1
if rois_num_id < len(rois_num_per_img):
rois_num_left = rois_num_per_img[rois_num_id]
batch_id += 1
roi_start_w = round(roi[0]) * spatial_scale
roi_start_h = round(roi[1]) * spatial_scale
roi_end_w = (round(roi[2]) + 1.) * spatial_scale
roi_end_h = (round(roi[3]) + 1.) * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 0.1)
roi_width = max(roi_end_w - roi_start_w, 0.1)
bin_size_h = roi_height / float(pooled_height)
bin_size_w = roi_width / float(pooled_width)
x_i = x[roi_batch_id]
for c in range(output_channels):
for ph in range(pooled_height):
for pw in range(pooled_width):
hstart = int(
math.floor(float(ph) * bin_size_h + roi_start_h))
wstart = int(
math.floor(float(pw) * bin_size_w + roi_start_w))
hend = int(
math.ceil(float(ph + 1) * bin_size_h + roi_start_h))
wend = int(
math.ceil(float(pw + 1) * bin_size_w + roi_start_w))
hstart = min(max(hstart, 0), x.shape[2])
hend = min(max(hend, 0), x.shape[2])
wstart = min(max(wstart, 0), x.shape[3])
wend = min(max(wend, 0), x.shape[3])
c_in = (c * pooled_height + ph) * pooled_width + pw
is_empty = (hend <= hstart) or (wend <= wstart)
out_sum = 0.
for ih in range(hstart, hend):
for iw in range(wstart, wend):
out_sum += x_i[c_in, ih, iw]
bin_area = (hend - hstart) * (wend - wstart)
out_data[i, c, ph, pw] = 0. if is_empty else (
out_sum / float(bin_area))
return out_data
class TestPSROIPoolOp(OpTest):
def set_data(self):
paddle.enable_static()
self.init_test_case()
self.make_rois()
self.outs = calc_psroi_pool(self.x, self.boxes, self.boxes_num,
self.output_channels, self.spatial_scale,
self.pooled_height,
self.pooled_width).astype('float64')
self.inputs = {
'X': self.x,
'ROIs': (self.rois_with_batch_id[:, 1:5], self.rois_lod)
}
self.attrs = {
'output_channels': self.output_channels,
'spatial_scale': self.spatial_scale,
'pooled_height': self.pooled_height,
'pooled_width': self.pooled_width
}
self.outputs = {'Out': self.outs}
def init_test_case(self):
self.batch_size = 3
self.channels = 3 * 2 * 2
self.height = 6
self.width = 4
self.x_dim = [self.batch_size, self.channels, self.height, self.width]
self.spatial_scale = 1.0 / 4.0
self.output_channels = 3
self.pooled_height = 2
self.pooled_width = 2
self.x = np.random.random(self.x_dim).astype('float64')
def make_rois(self):
rois = []
self.rois_lod = [[]]
for bno in range(self.batch_size):
self.rois_lod[0].append(bno + 1)
for i in range(bno + 1):
x1 = np.random.random_integers(
0, self.width // self.spatial_scale - self.pooled_width)
y1 = np.random.random_integers(
0, self.height // self.spatial_scale - self.pooled_height)
x2 = np.random.random_integers(x1 + self.pooled_width,
self.width // self.spatial_scale)
y2 = np.random.random_integers(
y1 + self.pooled_height, self.height // self.spatial_scale)
roi = [bno, x1, y1, x2, y2]
rois.append(roi)
self.rois_num = len(rois)
self.rois_with_batch_id = np.array(rois).astype('float64')
self.boxes = self.rois_with_batch_id[:, 1:]
self.boxes_num = np.array(
[bno + 1 for bno in range(self.batch_size)]).astype('int32')
def setUp(self):
self.op_type = 'psroi_pool'
self.set_data()
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestPSROIPoolDynamicFunctionAPI(unittest.TestCase):
def setUp(self):
self.x = np.random.random([2, 490, 28, 28]).astype(np.float32)
self.boxes = np.array(
[[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]]).astype(np.float32)
self.boxes_num = np.array([1, 2]).astype(np.int32)
def test_output_size(self):
def test_output_size_is_int():
output_size = 7
out = paddle.vision.ops.psroi_pool(
paddle.to_tensor(self.x),
paddle.to_tensor(self.boxes),
paddle.to_tensor(self.boxes_num), output_size).numpy()
expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 10,
1.0, 7, 7)
self.assertTrue(np.allclose(out, expect_out))
def test_output_size_is_tuple():
output_size = (7, 7)
out = paddle.vision.ops.psroi_pool(
paddle.to_tensor(self.x),
paddle.to_tensor(self.boxes),
paddle.to_tensor(self.boxes_num), output_size).numpy()
expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 10,
1.0, 7, 7)
self.assertTrue(np.allclose(out, expect_out))
def test_dytype_is_float64():
output_size = (7, 7)
out = paddle.vision.ops.psroi_pool(
paddle.to_tensor(self.x, 'float64'),
paddle.to_tensor(self.boxes, 'float64'),
paddle.to_tensor(self.boxes_num, 'int32'), output_size).numpy()
expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 10,
1.0, 7, 7)
self.assertTrue(np.allclose(out, expect_out))
places = ['cpu']
if paddle.fluid.core.is_compiled_with_cuda():
places.append('gpu')
for place in places:
paddle.set_device(place)
test_output_size_is_int()
test_output_size_is_tuple()
test_dytype_is_float64()
class TestPSROIPoolDynamicClassAPI(unittest.TestCase):
def setUp(self):
self.x = np.random.random([2, 128, 32, 32]).astype(np.float32)
self.boxes = np.array([[3, 5, 6, 13], [7, 4, 22, 18], [4, 5, 7, 10],
[5, 3, 25, 21]]).astype(np.float32)
self.boxes_num = np.array([2, 2]).astype(np.int32)
def test_output_size(self):
def test_output_size_is_int():
psroi_module = paddle.vision.ops.PSRoIPool(8, 1.1)
out = psroi_module(
paddle.to_tensor(self.x),
paddle.to_tensor(self.boxes),
paddle.to_tensor(self.boxes_num)).numpy()
expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 2,
1.1, 8, 8)
self.assertTrue(np.allclose(out, expect_out))
def test_output_size_is_tuple():
psroi_pool_module = paddle.vision.ops.PSRoIPool(8, 1.1)
out = psroi_pool_module(
paddle.to_tensor(self.x),
paddle.to_tensor(self.boxes),
paddle.to_tensor(self.boxes_num)).numpy()
expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 2,
1.1, 8, 8)
self.assertTrue(np.allclose(out, expect_out))
def test_dytype_is_float64():
psroi_pool_module = paddle.vision.ops.PSRoIPool(8, 1.1)
out = psroi_pool_module(
paddle.to_tensor(self.x, 'float64'),
paddle.to_tensor(self.boxes, 'float64'),
paddle.to_tensor(self.boxes_num, 'int32')).numpy()
expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 2,
1.1, 8, 8)
self.assertTrue(np.allclose(out, expect_out))
paddle.disable_static()
places = ['cpu']
if paddle.fluid.core.is_compiled_with_cuda():
places.append('gpu')
for place in places:
paddle.set_device(place)
test_output_size_is_int()
test_output_size_is_tuple()
test_dytype_is_float64()
class TestPSROIPoolBoxesNumError(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.x = paddle.uniform([2, 490, 28, 28], dtype='float32')
self.boxes = paddle.to_tensor(
[[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]], 'float32')
def test_errors(self):
def test_boxes_num_nums_error():
boxes_num = paddle.to_tensor([1, 5], 'int32')
out = paddle.vision.ops.psroi_pool(
self.x, self.boxes, boxes_num, output_size=7)
self.assertRaises(ValueError, test_boxes_num_nums_error)
def test_boxes_num_length_error():
boxes_num = paddle.to_tensor([1, 1, 1], 'int32')
out = paddle.vision.ops.psroi_pool(
self.x, self.boxes, boxes_num, output_size=7)
self.assertRaises(ValueError, test_boxes_num_length_error)
class TestPSROIPoolChannelError(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.x = paddle.uniform([2, 490, 28, 28], dtype='float32')
self.boxes = paddle.to_tensor(
[[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]], 'float32')
self.output_size = 4
def test_errors(self):
def test_channel_error():
boxes_num = paddle.to_tensor([2, 1], 'int32')
out = paddle.vision.ops.psroi_pool(self.x, self.boxes, boxes_num,
self.output_size)
self.assertRaises(ValueError, test_channel_error)
class TestPSROIPoolStaticAPI(unittest.TestCase):
def setUp(self):
paddle.enable_static()
self.x_placeholder = paddle.static.data(
name='x', shape=[2, 490, 28, 28])
self.x = np.random.random([2, 490, 28, 28]).astype(np.float32)
self.boxes_placeholder = paddle.static.data(
name='boxes', shape=[3, 4], lod_level=1)
self.boxes = np.array(
[[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]]).astype(np.float32)
self.boxes_num = np.array([1, 2]).astype(np.int32)
def test_function_in_static(self):
output_size = 7
out = paddle.vision.ops.psroi_pool(self.x_placeholder,
self.boxes_placeholder,
self.boxes_num, output_size)
expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 10,
1.0, 7, 7)
places = [paddle.CPUPlace()]
if paddle.fluid.core.is_compiled_with_cuda():
places.append(paddle.CUDAPlace(0))
for place in places:
exe = paddle.static.Executor(place)
boxes_lod_data = paddle.fluid.create_lod_tensor(self.boxes,
[[1, 2]], place)
out_res = exe.run(paddle.static.default_main_program(),
feed={'x': self.x,
'boxes': boxes_lod_data},
fetch_list=[out.name])
self.assertTrue(np.allclose(out_res, expect_out))
if __name__ == '__main__':
unittest.main()
|
yolo/config.py
|
chemetc/maskcam
| 179 |
53561
|
################################################################################
# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
################################################################################
import yaml
class Config:
def __init__(self, config_file_path):
# Load config file
with open(config_file_path, "r") as stream:
self._config = yaml.load(stream, Loader=yaml.FullLoader)
# Define colors to be used internally through the app, and also externally if wanted
self.colors = {
"green": (0, 128, 0),
"white": (255, 255, 255),
"olive": (0, 128, 128),
"black": (0, 0, 0),
"navy": (128, 0, 0),
"red": (0, 0, 255),
"pink": (128, 128, 255),
"maroon": (0, 0, 128),
"grey": (128, 128, 128),
"purple": (128, 0, 128),
"yellow": (0, 255, 255),
"lime": (0, 255, 0),
"fuchsia": (255, 0, 255),
"aqua": (255, 255, 0),
"blue": (255, 0, 0),
"teal": (128, 128, 0),
"silver": (192, 192, 192),
}
def __getitem__(self, name):
return self._config[name]
|
source/remediation_runbooks/scripts/CreateAccessLoggingBucket_createloggingbucket.py
|
j-erickson/aws-security-hub-automated-response-and-remediation
| 129 |
53562
|
#!/usr/bin/python
###############################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License Version 2.0 (the "License"). You may not #
# use this file except in compliance with the License. A copy of the License #
# is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0/ #
# #
# or in the "license" file accompanying this file. This file is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express #
# or implied. See the License for the specific language governing permis- #
# sions and limitations under the License. #
###############################################################################
import boto3
from botocore.exceptions import ClientError
from botocore.config import Config
def connect_to_s3(boto_config):
return boto3.client('s3', config=boto_config)
def create_logging_bucket(event, context):
boto_config = Config(
retries ={
'mode': 'standard'
}
)
s3 = connect_to_s3(boto_config)
try:
kwargs = {
'Bucket': event['BucketName'],
'GrantWrite': 'uri=http://acs.amazonaws.com/groups/s3/LogDelivery',
'GrantReadACP': 'uri=http://acs.amazonaws.com/groups/s3/LogDelivery'
}
if event['AWS_REGION'] != 'us-east-1':
kwargs['CreateBucketConfiguration'] = {
'LocationConstraint': event['AWS_REGION']
}
s3.create_bucket(**kwargs)
s3.put_bucket_encryption(
Bucket=event['BucketName'],
ServerSideEncryptionConfiguration={
'Rules': [
{
'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': 'AES256'
}
}
]
}
)
return {
"output": {
"Message": f'Bucket {event["BucketName"]} created'
}
}
except ClientError as error:
if error.response['Error']['Code'] != 'BucketAlreadyExists' and \
error.response['Error']['Code'] != 'BucketAlreadyOwnedByYou':
exit(str(error))
else:
return {
"output": {
"Message": f'Bucket {event["BucketName"]} already exists'
}
}
except Exception as e:
print(e)
exit(str(e))
|
talk/src/_1_decorators_bad.py
|
zangyuchen2008/Clean-Code-in-Python-Second-Edition
| 133 |
53580
|
<reponame>zangyuchen2008/Clean-Code-in-Python-Second-Edition
"""
Examples of the application of Python decorators in order to
reduce code duplication.
It presents first, the naïve approach, with duplicated code,
and then, the improved solution using decorators.
"""
from base import logger
def decorator(original_function):
def inner(*args, **kwargs):
# modify original function, or add extra logic
return original_function(*args, **kwargs)
return inner
# 1. Repeated
def update_db_indexes(cursor):
commands = (
"""REINDEX DATABASE transactional""",
)
try:
for command in commands:
cursor.execute(command)
except Exception as e:
logger.exception("Error in update_db_indexes: %s", e)
return -1
else:
logger.info("update_db_indexes run successfully")
return 0
def move_data_archives(cursor):
commands = (
"""INSERT INTO archive_orders SELECT * from orders
WHERE order_date < '2016-01-01' """,
"""DELETE from orders WHERE order_date < '2016-01-01' """,)
try:
for command in commands:
cursor.execute(command)
except Exception as e:
logger.exception("Error in move_data_archives: %s", e)
return -1
else:
logger.info("move_data_archives run successfully")
return 0
|
media/transcoder/create_job_template.py
|
BaljitSingh919/Project360
| 5,938 |
53594
|
<filename>media/transcoder/create_job_template.py
#!/usr/bin/env python
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Transcoder sample for creating a job template.
Example usage:
python create_job_template.py --project_id <project-id> [--location <location>] [--template_id <template-id>]
"""
# [START transcoder_create_job_template]
import argparse
from google.cloud.video import transcoder_v1
from google.cloud.video.transcoder_v1.services.transcoder_service import (
TranscoderServiceClient,
)
def create_job_template(project_id, location, template_id):
"""Creates a job template.
Args:
project_id: The GCP project ID.
location: The location to store this template in.
template_id: The user-defined template ID."""
client = TranscoderServiceClient()
parent = f"projects/{project_id}/locations/{location}"
job_template = transcoder_v1.types.JobTemplate()
job_template.name = (
f"projects/{project_id}/locations/{location}/jobTemplates/{template_id}"
)
job_template.config = transcoder_v1.types.JobConfig(
elementary_streams=[
transcoder_v1.types.ElementaryStream(
key="video-stream0",
video_stream=transcoder_v1.types.VideoStream(
h264=transcoder_v1.types.VideoStream.H264CodecSettings(
height_pixels=360,
width_pixels=640,
bitrate_bps=550000,
frame_rate=60,
),
),
),
transcoder_v1.types.ElementaryStream(
key="video-stream1",
video_stream=transcoder_v1.types.VideoStream(
h264=transcoder_v1.types.VideoStream.H264CodecSettings(
height_pixels=720,
width_pixels=1280,
bitrate_bps=2500000,
frame_rate=60,
),
),
),
transcoder_v1.types.ElementaryStream(
key="audio-stream0",
audio_stream=transcoder_v1.types.AudioStream(
codec="aac", bitrate_bps=64000
),
),
],
mux_streams=[
transcoder_v1.types.MuxStream(
key="sd",
container="mp4",
elementary_streams=["video-stream0", "audio-stream0"],
),
transcoder_v1.types.MuxStream(
key="hd",
container="mp4",
elementary_streams=["video-stream1", "audio-stream0"],
),
],
)
response = client.create_job_template(
parent=parent, job_template=job_template, job_template_id=template_id
)
print(f"Job template: {response.name}")
return response
# [END transcoder_create_job_template]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--project_id", help="Your Cloud project ID.", required=True)
parser.add_argument(
"--location",
help="The location to store this template in.",
default="us-central1",
)
parser.add_argument(
"--template_id", help="The job template ID.", default="my-job-template"
)
args = parser.parse_args()
create_job_template(args.project_id, args.location, args.template_id)
|
leet/stack/isValid.py
|
monishshah18/python-cp-cheatsheet
| 140 |
53610
|
<filename>leet/stack/isValid.py
class Solution:
def isValid(self, s: str) -> bool:
while '[]' in s or '()' in s or '{}' in s:
s = s.replace('[]','').replace('()','').replace('{}','')
return len(s) == 0
"""
time: 10 min
time: O(n)
space: O(n)
errors:
lower case values/keys
Have to use stack because 3 charactors open/close
"""
class Solution:
def isValid(self, s: str) -> bool:
stk = []
mp = {")":"(", "}":"{", "]":"["}
for c in s:
if c in mp.values():
stk.append(c)
elif c in mp.keys():
test = stk.pop() if stk else '#'
if mp[c] != test:
return False
return len(stk) == 0
class Solution:
def isValid(self, s) -> bool:
stk = []
for c in s:
if c == '(':
stk.append(')')
elif c == '[':
stk.append(']')
elif c == '{':
stk.append('}')
elif not stk or stk.pop() != c:
return False
return not stk
|
scripts/datasets/somethingsomethingv2.py
|
Kh4L/gluon-cv
| 5,447 |
53635
|
<filename>scripts/datasets/somethingsomethingv2.py<gh_stars>1000+
"""This script is for preprocessing something-something-v2 dataset.
The code is largely borrowed from https://github.com/MIT-HAN-LAB/temporal-shift-module
and https://github.com/metalbubble/TRN-pytorch/blob/master/process_dataset.py
"""
import os
import sys
import threading
import argparse
import json
def parse_args():
parser = argparse.ArgumentParser(description='prepare something-something-v2 dataset')
parser.add_argument('--video_root', type=str, default='~/.mxnet/datasets/somethingsomethingv2/20bn-something-something-v2')
parser.add_argument('--frame_root', type=str, default='~/.mxnet/datasets/somethingsomethingv2/20bn-something-something-v2-frames')
parser.add_argument('--anno_root', type=str, default='~/.mxnet/datasets/somethingsomethingv2/annotations')
parser.add_argument('--num_threads', type=int, default=100)
parser.add_argument('--decode_video', action='store_true', default=True)
parser.add_argument('--build_file_list', action='store_true', default=True)
args = parser.parse_args()
args.video_root = os.path.expanduser(args.video_root)
args.frame_root = os.path.expanduser(args.frame_root)
args.anno_root = os.path.expanduser(args.anno_root)
return args
def split_func(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def extract(video, tmpl='%06d.jpg'):
cmd = 'ffmpeg -i \"{}/{}\" -threads 1 -vf scale=-1:256 -q:v 0 \"{}/{}/%06d.jpg\"'.format(args.video_root, video, args.frame_root, video[:-5])
os.system(cmd)
def target(video_list):
for video in video_list:
os.makedirs(os.path.join(args.frame_root, video[:-5]))
extract(video)
def decode_video(args):
print(args.video_root)
if not os.path.exists(args.video_root):
raise ValueError('Please download videos and set video_root variable.')
if not os.path.exists(args.frame_root):
os.makedirs(args.frame_root)
video_list = os.listdir(args.video_root)
splits = list(split_func(video_list, args.num_threads))
threads = []
for i, split in enumerate(splits):
thread = threading.Thread(target=target, args=(split,))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def build_file_list(args):
if not os.path.exists(args.anno_root):
raise ValueError('Please download annotations and set anno_root variable.')
dataset_name = 'something-something-v2'
with open(os.path.join(args.anno_root, '%s-labels.json' % dataset_name)) as f:
data = json.load(f)
categories = []
for i, (cat, idx) in enumerate(data.items()):
assert i == int(idx) # make sure the rank is right
categories.append(cat)
with open('category.txt', 'w') as f:
f.write('\n'.join(categories))
dict_categories = {}
for i, category in enumerate(categories):
dict_categories[category] = i
files_input = [os.path.join(args.anno_root, '%s-validation.json' % dataset_name),
os.path.join(args.anno_root, '%s-train.json' % dataset_name),
os.path.join(args.anno_root, '%s-test.json' % dataset_name)]
files_output = [os.path.join(args.anno_root, 'val_videofolder.txt'),
os.path.join(args.anno_root, 'train_videofolder.txt'),
os.path.join(args.anno_root, 'test_videofolder.txt')]
for (filename_input, filename_output) in zip(files_input, files_output):
with open(filename_input) as f:
data = json.load(f)
folders = []
idx_categories = []
for item in data:
folders.append(item['id'])
if 'test' not in filename_input:
idx_categories.append(dict_categories[item['template'].replace('[', '').replace(']', '')])
else:
idx_categories.append(0)
output = []
for i in range(len(folders)):
curFolder = folders[i]
curIDX = idx_categories[i]
# counting the number of frames in each video folders
dir_files = os.listdir(os.path.join(args.frame_root, curFolder))
if len(dir_files) == 0:
print('video decoding fails at %s', (curFolder))
sys.exit()
output.append('%s %d %d' % (curFolder, len(dir_files), curIDX))
print('%d/%d' % (i, len(folders)))
with open(filename_output, 'w') as f:
f.write('\n'.join(output))
if __name__ == '__main__':
global args
args = parse_args()
if args.decode_video:
print('Decoding videos to frames.')
decode_video(args)
if args.build_file_list:
print('Generating training files.')
build_file_list(args)
|
tests/benchmarks/tools/kmt.py
|
leroyjvargis/workflows
| 558 |
53671
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2021 Micron Technology, Inc. All rights reserved.
from typing import List
from tools import config
from tools.base import BaseTest
from tools.helpers import shlex_join
class KmtTest(BaseTest):
def __init__(self, name: str, args: List[str]):
super().__init__(name, "kmt")
self.args = self.__fix_args(args)
self.kmt_out_path = None
self.report["kmt"] = {
"args": self.args,
"cmdline": shlex_join(self.args),
}
@staticmethod
def __fix_args(args: List):
new_args = ["kmt"] + list(args)
if not any([arg.startswith("-L") for arg in args]):
new_args.append("-L")
if not any([arg.startswith("-s") for arg in args]):
new_args.append("-s1")
new_args.append(config.KVDB_HOME)
new_args.append(config.KVS_NAME)
return new_args
def execute(self):
super()._execute_init()
completed_info = super()._run_command(self.args)
self.kmt_out_path = completed_info.out_path
self._postprocess()
self._print_and_save_summary()
super()._save_report()
def _postprocess(self):
init_phase = {
"name": "init",
"operations": [],
}
test_phase = {
"name": "test",
"operations": [],
}
with open(self.kmt_out_path) as fd:
for line in fd:
if line.startswith("iclose"):
record = line.split()
total_puts = int(record[6])
run_time_ms = int(record[15])
puts_per_second = int(total_puts / (run_time_ms / 1000.0))
init_phase["run_time_ms"] = run_time_ms
init_put_operation = {
"name": "put",
"throughput": puts_per_second,
}
init_phase["operations"].append(init_put_operation)
elif line.startswith("tclose"):
record = line.split()
total_gets, total_puts = int(record[5]), int(record[6])
run_time_ms = int(record[15])
puts_per_second = int(total_puts / (run_time_ms / 1000.0))
gets_per_second = int(total_gets / (run_time_ms / 1000.0))
test_phase["run_time_ms"] = run_time_ms
test_put_operation = {
"name": "put",
"throughput": puts_per_second,
}
test_get_operation = {
"name": "get",
"throughput": gets_per_second,
}
test_phase["operations"].extend(
[test_put_operation, test_get_operation]
)
elif line.startswith("slatency"):
record = line.split()
phase = record[1]
op = record[2]
(
lat_min,
lat_max,
lat_avg,
lat_p90,
lat_p95,
lat_p99,
lat_p99_9,
lat_p99_99,
) = [int(x) for x in record[5:13]]
if phase == "init":
assert op == "put"
operation_dict = init_put_operation
elif phase == "test":
assert op in ["get", "put"]
if op == "put":
operation_dict = test_put_operation
elif op == "get":
operation_dict = test_get_operation
else:
assert False
else:
assert False
operation_dict["latency_us"] = {
"avg": lat_avg,
"max": lat_max,
"min": lat_min,
"percentiles": [
[90, lat_p90],
[95, lat_p95],
[99, lat_p99],
[99.9, lat_p99_9],
[99.99, lat_p99_99],
],
}
self.report["phases"] = [
init_phase,
test_phase,
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.