max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
RecoTauTag/RecoTau/python/PFRecoTauProducer_cfi.py
|
ckamtsikis/cmssw
| 852 |
92663
|
from RecoTauTag.RecoTau.pfRecoTauProducerDef_cfi import pfRecoTauProducerDef
pfRecoTauProducer = pfRecoTauProducerDef.clone()
|
test/test_c.py
|
EraYaN/transpyle
| 107 |
92668
|
<gh_stars>100-1000
"""Tests of C language support."""
import logging
import unittest
import timing
import typed_astunparse
from transpyle.general.code_reader import CodeReader
from transpyle.c.parser import C99Parser
from transpyle.c.ast_generalizer import CAstGeneralizer
from .common import basic_check_c_ast, basic_check_python_ast, execute_on_language_examples
_LOG = logging.getLogger(__name__)
_TIME = timing.get_timing_group(__name__)
class ParserTests(unittest.TestCase):
@execute_on_language_examples('c11')
def test_parse_examples(self, input_path):
code_reader = CodeReader()
code = code_reader.read_file(input_path)
parser = C99Parser()
with _TIME.measure('parse.{}'.format(input_path.name.replace('.', '_'))) as timer:
c_ast = parser.parse(code, input_path)
basic_check_c_ast(self, input_path, c_ast)
_LOG.info('parsed "%s" in %fs', input_path, timer.elapsed)
class AstGeneralizerTests(unittest.TestCase):
@execute_on_language_examples('c11')
def test_generalize_examples(self, input_path):
code_reader = CodeReader()
code = code_reader.read_file(input_path)
parser = C99Parser()
c_ast = parser.parse(code, input_path)
basic_check_c_ast(self, input_path, c_ast)
ast_generalizer = CAstGeneralizer()
with _TIME.measure('generalize.{}'.format(input_path.name.replace('.', '_'))) as timer:
syntax = ast_generalizer.generalize(c_ast)
basic_check_python_ast(self, input_path, syntax)
_LOG.info('generalized "%s" in %fs', input_path, timer.elapsed)
_LOG.debug('%s', typed_astunparse.dump(syntax))
_LOG.debug('%s', typed_astunparse.unparse(syntax))
|
loafer/runners.py
|
hartungstenio/loafer
| 111 |
92693
|
<filename>loafer/runners.py
import asyncio
import logging
import signal
from concurrent.futures import CancelledError, ThreadPoolExecutor
from contextlib import suppress
logger = logging.getLogger(__name__)
class LoaferRunner:
def __init__(self, max_workers=None, on_stop_callback=None):
self._on_stop_callback = on_stop_callback
# XXX: See https://github.com/python/asyncio/issues/258
# The minimum value depends on the number of cores in the machine
# See https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor
self._executor = ThreadPoolExecutor(max_workers)
self.loop.set_default_executor(self._executor)
@property
def loop(self):
return asyncio.get_event_loop()
def start(self, debug=False):
if debug:
self.loop.set_debug(enabled=debug)
self.loop.add_signal_handler(signal.SIGINT, self.prepare_stop)
self.loop.add_signal_handler(signal.SIGTERM, self.prepare_stop)
try:
self.loop.run_forever()
finally:
self.stop()
self.loop.close()
logger.debug('loop.is_running={}'.format(self.loop.is_running()))
logger.debug('loop.is_closed={}'.format(self.loop.is_closed()))
def prepare_stop(self, *args):
if self.loop.is_running():
# signals loop.run_forever to exit in the next iteration
self.loop.stop()
def stop(self, *args, **kwargs):
logger.info('stopping Loafer ...')
if callable(self._on_stop_callback):
self._on_stop_callback()
logger.info('cancel schedulled operations ...')
for task in asyncio.Task.all_tasks(self.loop):
task.cancel()
if task.cancelled() or task.done():
continue
with suppress(CancelledError):
self.loop.run_until_complete(task)
self._executor.shutdown(wait=True)
|
tests/tck/utils/mmh2.py
|
tom-chensf/nebula-graph
| 816 |
92740
|
# Copyright (c) 2020 vesoft inc. All rights reserved.
#
# This source code is licensed under Apache 2.0 License,
# attached with Common Clause Condition 1.0, found in the LICENSES directory.
def __bytes2ul(b):
return int.from_bytes(b, byteorder='little', signed=False)
def mmh2(bstr, seed=0xc70f6907, signed=True):
MASK = 2 ** 64 - 1
size = len(bstr)
m = 0xc6a4a7935bd1e995
r = 47
h = seed ^ (size * m & MASK)
end = size & (0xfffffff8)
for pos in range(0, end, 8):
k = __bytes2ul(bstr[pos:pos+8])
k = k * m & MASK
k = k ^ (k >> r)
k = k * m & MASK;
h = h ^ k
h = h * m & MASK
left = size & 0x7
if left >= 7:
h = h ^ (bstr[end+6] << 48)
if left >= 6:
h = h ^ (bstr[end+5] << 40)
if left >= 5:
h = h ^ (bstr[end+4] << 32)
if left >= 4:
h = h ^ (bstr[end+3] << 24)
if left >= 3:
h = h ^ (bstr[end+2] << 16)
if left >= 2:
h = h ^ (bstr[end+1] << 8)
if left >= 1:
h = h ^ bstr[end+0]
h = h * m & MASK
h = h ^ (h >> r)
h = h * m & MASK
h = h ^ (h >> r)
if signed:
h = h | (-(h & 0x8000000000000000))
return h
if __name__ == '__main__':
assert mmh2(b'hello') == 2762169579135187400
assert mmh2(b'World') == -295471233978816215
assert mmh2(b'Hello World') == 2146989006636459346
assert mmh2(b'Hello Wo') == -821961639117166431
|
tests/cpydiff/modules_struct_fewargs.py
|
sebastien-riou/micropython
| 13,648 |
92747
|
"""
categories: Modules,struct
description: Struct pack with too few args, not checked by uPy
cause: Unknown
workaround: Unknown
"""
import struct
try:
print(struct.pack("bb", 1))
print("Should not get here")
except:
print("struct.error")
|
model/gac.py
|
hdliu21/openISP
| 384 |
92753
|
#!/usr/bin/python
import numpy as np
class GC:
'Gamma Correction'
def __init__(self, img, lut, mode):
self.img = img
self.lut = lut
self.mode = mode
def execute(self):
img_h = self.img.shape[0]
img_w = self.img.shape[1]
img_c = self.img.shape[2]
gc_img = np.empty((img_h, img_w, img_c), np.uint16)
for y in range(self.img.shape[0]):
for x in range(self.img.shape[1]):
if self.mode == 'rgb':
gc_img[y, x, 0] = self.lut[self.img[y, x, 0]]
gc_img[y, x, 1] = self.lut[self.img[y, x, 1]]
gc_img[y, x, 2] = self.lut[self.img[y, x, 2]]
gc_img[y, x, :] = gc_img[y, x, :] / 4
elif self.mode == 'yuv':
gc_img[y, x, 0] = self.lut[0][self.img[y, x, 0]]
gc_img[y, x, 1] = self.lut[1][self.img[y, x, 1]]
gc_img[y, x, 2] = self.lut[1][self.img[y, x, 2]]
self.img = gc_img
return self.img
|
configs/__init__.py
|
light1726/VAENAR-TTS
| 125 |
92767
|
from .hparams import *
from .logger import Logger
|
Stock/Data/Gateway/DyStockDataWind.py
|
AdrianGuo/DevilYuan
| 135 |
92772
|
<filename>Stock/Data/Gateway/DyStockDataWind.py
from time import sleep
import pandas as pd
from collections import OrderedDict
try:
from WindPy import *
except ImportError:
pass
from DyCommon.DyCommon import *
from ...Common.DyStockCommon import *
class DyStockDataWind(object):
""" Wind数据接口 """
sectorCodeWindMap = {DyStockCommon.sz50Index: 'a00103010b000000',
DyStockCommon.hs300Index: 'a001030201000000',
DyStockCommon.zz500Index: 'a001030208000000'
}
def __init__(self, info):
self._info = info
self._gateway = w
def getDays(self, code, startDate, endDate, fields, name=None):
"""
@return: df['datetime', indicators]
None - errors
[] - no data
"""
if not fields:
self._info.print('没有指定获取的指标', DyLogData.error)
return None
# 添加'volume',由此判断停牌是否
fields_ = ','.join(fields) if 'volume' in fields else ','.join(fields + ['volume'])
for _ in range(3):
windData = self._gateway.wsd(code, fields_, startDate, endDate)
if windData.ErrorCode != 0:
errorStr = "从Wind获取{0}:{1}, [{2}, {3}]WSD错误: {4}".format(code, name, startDate, endDate, windData.Data[0][0])
if 'Timeout' in errorStr:
sleep(1)
continue
break
if windData.ErrorCode != 0:
self._info.print(errorStr, DyLogData.error)
return None
try:
df = pd.DataFrame(windData.Data,
index=[x.lower() for x in windData.Fields],
columns=windData.Times)
df = df.T
df = df.dropna(axis=1, how='all') # 去除全为NaN的列,比如指数数据,没有'mf_vol'
df = df.ix[df['volume'] > 0, :] # 去除停牌的数据
if 'volume' not in fields:
del df['volume']
df.reset_index(inplace=True) # 把时间索引转成列
df.rename(columns={'index': 'datetime'}, inplace=True)
# 把日期的HH:MM:SS转成 00:00:00
df['datetime'] = df['datetime'].map(lambda x: x.strftime('%Y-%m-%d'))
df['datetime'] = pd.to_datetime(df['datetime'], format='%Y-%m-%d')
df = df[['datetime'] + fields]
except:
df = pd.DataFrame(columns=['datetime'] + fields)
return df
def _login(self):
if not self._gateway.isconnected():
self._info.print("登录Wind...")
data = self._gateway.start()
if data.ErrorCode != 0:
self._info.print("登录Wind失败", DyLogData.error)
return False
self._info.print("登录Wind成功")
return True
def getTradeDays(self, startDate, endDate):
if not self._login():
return None
self._info.print("开始从Wind获取交易日数据[{}, {}]...".format(startDate, endDate))
data = w.tdayscount(startDate, endDate)
if data.ErrorCode == 0:
if data.Data[0][0] == 0:
return [] # no trade days between startDate and endDate
data = self._gateway.tdays(startDate, endDate)
if data.ErrorCode == 0:
return [x.strftime('%Y-%m-%d') for x in data.Data[0]]
self._info.print("从Wind获取交易日数据失败[{0}, {1}]: {2}".format(startDate, endDate, data.Data[0][0]), DyLogData.error)
return None
def getStockCodes(self):
if not self._login():
return None
self._info.print("开始从Wind获取股票代码表...")
date = datetime.today()
date = date.strftime("%Y%m%d")
data = w.wset("SectorConstituent", "date={0};sectorId=a001010100000000".format(date))
if data.ErrorCode != 0:
self._info.print("从Wind获取股票代码表失败: {0}!".format(data.Data[0][0]), DyLogData.error)
return None
codes = {}
for code, name in zip(data.Data[1], data.Data[2]):
codes[code] = name
return codes
def getSectorStockCodes(self, sectorCode, startDate, endDate):
if not self._login():
return None
self._info.print("开始从Wind获取[{0}]股票代码表[{1}, {2}]...".format(DyStockCommon.sectors[sectorCode], startDate, endDate))
dates = DyTime.getDates(startDate, endDate)
progress = DyProgress(self._info)
progress.init(len(dates))
codesDict = OrderedDict() # {date: {code: name}}
for date_ in dates:
date = date_.strftime("%Y%m%d")
date_ = date_.strftime("%Y-%m-%d")
data = w.wset("SectorConstituent", "date={0};sectorId={1}".format(date, self.sectorCodeWindMap[sectorCode]))
if data.ErrorCode != 0:
self._info.print("从Wind获取[{0}]股票代码表[{1}]失败: {2}!".format(DyStockCommon.sectors[sectorCode], date_, data.Data[0][0]), DyLogData.error)
return None
codes = {}
if data.Data:
for code, name in zip(data.Data[1], data.Data[2]):
codes[code] = name
codesDict[date_] = codes
progress.update()
self._info.print("从Wind获取[{0}]股票代码表[{1}, {2}]完成".format(DyStockCommon.sectors[sectorCode], startDate, endDate))
return codesDict
|
talkgenerator/sources/text_generator.py
|
korymath/talk-generator
| 110 |
92788
|
""" This module helps out with generating text using templates """
import json
import random
import re
from functools import lru_cache
import tracery
from tracery.modifiers import base_english
from talkgenerator.sources import conceptnet
from talkgenerator.sources import phrasefinder
from talkgenerator.sources import wikihow
from talkgenerator.util import language_util
from talkgenerator.util import os_util
from talkgenerator.util import random_util
known_functions = {
"title": str.title,
"lower": str.lower,
"upper": str.upper,
"dashes": lambda words: words.replace(" ", "-"),
"first_letter": lambda words: words[0],
"last_letter_is_vowel": lambda word: word
if language_util.is_vowel(word[-1])
else None,
"last_letter_is_consonant": lambda word: word
if language_util.is_consonant(word[-1])
else None,
"a": lambda word: language_util.add_article(word),
"ing": language_util.to_present_participle,
"plural": language_util.to_plural,
"singular": language_util.to_singular,
# "synonym": generator_util.FromListGenerator(language_util.get_synonyms),
"2_to_1_pronouns": language_util.second_to_first_pronouns,
"wikihow_action": lambda seed: random_util.choice_optional(
wikihow.get_related_wikihow_actions(seed)
),
"get_last_noun_and_article": language_util.get_last_noun_and_article,
# Conceptnet
"conceptnet_location": conceptnet.weighted_location_generator,
"conceptnet_related": conceptnet.weighted_related_word_generator,
"conceptnet_related_single_word": lambda word: phrasefinder.get_rarest_word(
conceptnet.weighted_related_word_generator(word)
),
# Checkers
"is_noun": lambda word: word if language_util.is_noun(word) else None,
"is_verb": lambda word: word if language_util.is_verb(word) else None,
# Unique: To make a variable not be the same as something else with the same parameters
"unique": lambda x: x,
}
class AbstractTextGenerator(object):
def generate(self, variables_dictionary):
raise NotImplementedError()
def generate_with_seed(self, seed):
return self.generate({"seed": seed})
class TemplatedTextGenerator(AbstractTextGenerator):
def __init__(self, template_file=None, templates_list=None):
templates = []
if template_file:
templates.extend(read_lines(template_file))
if templates_list:
templates.extend(templates_list)
# Create a tuple so no templates can accidentally be deleted from the generator
self._templates = tuple(templates)
def generate(self, variables_dictionary=None):
""" Generates a text from the templates using the given variables dictionary"""
# Set empty dictionary if none is given
if not bool(variables_dictionary):
variables_dictionary = {}
# Create a mutable copy of the templates list
possible_templates = list(self._templates)
for i in range(len(possible_templates)):
template = random.choice(possible_templates)
if can_format_with(template, variables_dictionary):
result = apply_variables_to_template(template, variables_dictionary)
if result:
return result
# Remove the template from the possible templates list, such that it won
possible_templates.remove(template)
class TraceryTextGenerator(AbstractTextGenerator):
def __init__(self, tracery_json, variable="origin"):
with open(os_util.to_actual_file(tracery_json)) as grammar_file:
grammar = get_tracery_grammar(grammar_file)
grammar.add_modifiers(base_english)
self._grammar = grammar
self._variable = variable
def generate(self, variables_dictionary=None):
""" Generates a text from internal tracery grammar using the given variables dictionary"""
# Set empty dictionary if none is given
if not bool(variables_dictionary):
variables_dictionary = {}
# Generate
for i in range(100): # TODO prune the grammar instead of retrying
template = self._grammar.flatten("#" + self._variable + "#")
if can_format_with(template, variables_dictionary):
result = apply_variables_to_template(template, variables_dictionary)
if result:
return result
@lru_cache(maxsize=20)
def get_tracery_grammar(grammar_file):
return tracery.Grammar(json.load(grammar_file))
def can_format_with(template, variables_dictionary):
""" Checks if the template can be fully formatted by the given variable dictionary without errors"""
format_variables = get_format_variables(template)
return (len(format_variables) == 0 and len(variables_dictionary) == 0) or set(
format_variables
) <= set(variables_dictionary.keys())
def get_format_variables(template):
""" Finds all the names of the variables used in the template """
return {x[0] for x in get_format_variables_and_functions(template)}
def get_format_variables_and_functions(template):
""" Finds all the names of the variables used in the template with their functions in a large tuple"""
matches = re.findall(r"{(\w+)((?:[.]\w+)*)}", template)
return set(matches)
def apply_variables_to_template(template, variables_dictionary):
variables_and_functions = get_format_variables_and_functions(template)
applied = apply_functions_to_variables(
template, variables_dictionary, variables_and_functions
)
if applied:
(template, variables_dictionary) = applied
return template.format(**variables_dictionary)
def apply_functions(variable, functions):
""" Applies a list of functions to a variable """
result = variable
for func in functions:
# Check if it transformed the result into None
if result is None:
return None
if func in known_functions:
result = known_functions[func](result)
# Check if it is a dictionary, as is allowed in real str.format
elif isinstance(result, dict) and func in result:
result = result[func]
# Unique identifier to make similar functions on a variable have different effects
elif func.isdigit():
result = result
else:
raise ValueError("Unknown function:", func)
return result
def apply_functions_to_variables(
template, variables_dictionary, variables_and_functions
):
""" Applies the functions of the variables_and_functions tuple and stores them in the variable dictionary and
updates the template """
variables_and_functions = list(variables_and_functions)
variables_and_functions.sort(key=lambda a: len(a), reverse=True)
for var_func in variables_and_functions:
# Check if it has functions to apply
if len(var_func) > 1 and len(var_func[1]) > 0:
old_var_name = var_func[0] + var_func[1]
functions = var_func[1][1:].split(".")
variable_name = var_func[0]
variable = variables_dictionary[variable_name]
applied_functions = apply_functions(variable, functions)
if applied_functions is not None:
applied_var_name = old_var_name.replace(".", "_")
# Replace all occurrences with the dot to the underscore notation
template = template.replace(old_var_name, applied_var_name)
# Store in dictionary
variables_dictionary[applied_var_name] = applied_functions
else:
return None
return template, variables_dictionary
def read_lines(filename):
""" Reads all the string lines from a file """
return os_util.read_lines(filename)
|
server/libs/notification_enum.py
|
teemosauce/rpi-cube
| 195 |
92807
|
<reponame>teemosauce/rpi-cube<gh_stars>100-1000
from enum import Enum
class NotificationEnum(Enum):
config_refresh = 1
config_refresh_finished = 2
config_refresh_failed = 3
process_stop = 4
process_pause = 5
process_continue = 6
|
mobly/controllers/android_device_lib/services/logcat.py
|
booneng/mobly
| 532 |
92861
|
<reponame>booneng/mobly<filename>mobly/controllers/android_device_lib/services/logcat.py
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import logging
import os
import time
from mobly import logger as mobly_logger
from mobly import utils
from mobly.controllers.android_device_lib import adb
from mobly.controllers.android_device_lib import errors
from mobly.controllers.android_device_lib.services import base_service
CREATE_LOGCAT_FILE_TIMEOUT_SEC = 5
class Error(errors.ServiceError):
"""Root error type for logcat service."""
SERVICE_TYPE = 'Logcat'
class Config:
"""Config object for logcat service.
Attributes:
clear_log: bool, clears the logcat before collection if True.
logcat_params: string, extra params to be added to logcat command.
output_file_path: string, the path on the host to write the log file
to, including the actual filename. The service will automatically
generate one if not specified.
"""
def __init__(self, logcat_params=None, clear_log=True, output_file_path=None):
self.clear_log = clear_log
self.logcat_params = logcat_params if logcat_params else ''
self.output_file_path = output_file_path
class Logcat(base_service.BaseService):
"""Android logcat service for Mobly's AndroidDevice controller.
Attributes:
adb_logcat_file_path: string, path to the file that the service writes
adb logcat to by default.
"""
OUTPUT_FILE_TYPE = 'logcat'
def __init__(self, android_device, configs=None):
super().__init__(android_device, configs)
self._ad = android_device
self._adb_logcat_process = None
self._adb_logcat_file_obj = None
self.adb_logcat_file_path = None
# Logcat service uses a single config obj, using singular internal
# name: `_config`.
self._config = configs if configs else Config()
def _enable_logpersist(self):
"""Attempts to enable logpersist daemon to persist logs."""
# Logpersist is only allowed on rootable devices because of excessive
# reads/writes for persisting logs.
if not self._ad.is_rootable:
return
logpersist_warning = ('%s encountered an error enabling persistent'
' logs, logs may not get saved.')
# Android L and older versions do not have logpersist installed,
# so check that the logpersist scripts exists before trying to use
# them.
if not self._ad.adb.has_shell_command('logpersist.start'):
logging.warning(logpersist_warning, self)
return
try:
# Disable adb log spam filter for rootable devices. Have to stop
# and clear settings first because 'start' doesn't support --clear
# option before Android N.
self._ad.adb.shell('logpersist.stop --clear')
self._ad.adb.shell('logpersist.start')
except adb.AdbError:
logging.warning(logpersist_warning, self)
def _is_timestamp_in_range(self, target, begin_time, end_time):
low = mobly_logger.logline_timestamp_comparator(begin_time, target) <= 0
high = mobly_logger.logline_timestamp_comparator(end_time, target) >= 0
return low and high
def create_output_excerpts(self, test_info):
"""Convenient method for creating excerpts of adb logcat.
This copies logcat lines from self.adb_logcat_file_path to an excerpt
file, starting from the location where the previous excerpt ended.
Call this method at the end of: `setup_class`, `teardown_test`, and
`teardown_class`.
Args:
test_info: `self.current_test_info` in a Mobly test.
Returns:
List of strings, the absolute paths to excerpt files.
"""
dest_path = test_info.output_path
utils.create_dir(dest_path)
filename = self._ad.generate_filename(self.OUTPUT_FILE_TYPE, test_info,
'txt')
excerpt_file_path = os.path.join(dest_path, filename)
with io.open(excerpt_file_path, 'w', encoding='utf-8',
errors='replace') as out:
# Devices may accidentally go offline during test,
# check not None before readline().
while self._adb_logcat_file_obj:
line = self._adb_logcat_file_obj.readline()
if not line:
break
out.write(line)
self._ad.log.debug('logcat excerpt created at: %s', excerpt_file_path)
return [excerpt_file_path]
@property
def is_alive(self):
return True if self._adb_logcat_process else False
def clear_adb_log(self):
"""Clears cached adb content."""
try:
self._ad.adb.logcat('-c')
except adb.AdbError as e:
# On Android O, the clear command fails due to a known bug.
# Catching this so we don't crash from this Android issue.
if b'failed to clear' in e.stderr:
self._ad.log.warning('Encountered known Android error to clear logcat.')
else:
raise
def _assert_not_running(self):
"""Asserts the logcat service is not running.
Raises:
Error, if the logcat service is running.
"""
if self.is_alive:
raise Error(
self._ad,
'Logcat thread is already running, cannot start another one.')
def update_config(self, new_config):
"""Updates the configuration for the service.
The service needs to be stopped before updating, and explicitly started
after the update.
This will reset the service. Previous output files may be orphaned if
output path is changed.
Args:
new_config: Config, the new config to use.
"""
self._assert_not_running()
self._ad.log.info('[LogcatService] Changing config from %s to %s',
self._config, new_config)
self._config = new_config
def _open_logcat_file(self):
"""Create a file object that points to the beginning of the logcat file.
Wait for the logcat file to be created by the subprocess if it doesn't
exist.
"""
if not self._adb_logcat_file_obj:
deadline = time.perf_counter() + CREATE_LOGCAT_FILE_TIMEOUT_SEC
while not os.path.exists(self.adb_logcat_file_path):
if time.perf_counter() > deadline:
raise Error(self._ad,
'Timeout while waiting for logcat file to be created.')
time.sleep(1)
self._adb_logcat_file_obj = io.open(self.adb_logcat_file_path,
'r',
encoding='utf-8',
errors='replace')
self._adb_logcat_file_obj.seek(0, os.SEEK_END)
def _close_logcat_file(self):
"""Closes and resets the logcat file object, if it exists."""
if self._adb_logcat_file_obj:
self._adb_logcat_file_obj.close()
self._adb_logcat_file_obj = None
def start(self):
"""Starts a standing adb logcat collection.
The collection runs in a separate subprocess and saves logs in a file.
"""
self._assert_not_running()
if self._config.clear_log:
self.clear_adb_log()
self._start()
self._open_logcat_file()
def _start(self):
"""The actual logic of starting logcat."""
self._enable_logpersist()
if self._config.output_file_path:
self._close_logcat_file()
self.adb_logcat_file_path = self._config.output_file_path
if not self.adb_logcat_file_path:
f_name = self._ad.generate_filename(self.OUTPUT_FILE_TYPE,
extension_name='txt')
logcat_file_path = os.path.join(self._ad.log_path, f_name)
self.adb_logcat_file_path = logcat_file_path
utils.create_dir(os.path.dirname(self.adb_logcat_file_path))
# In debugging mode of IntelijIDEA, "patch_args" remove
# double quotes in args if starting and ending with it.
# Add spaces at beginning and at last to fix this issue.
cmd = ' "%s" -s %s logcat -v threadtime -T 1 %s >> "%s" ' % (
adb.ADB, self._ad.serial, self._config.logcat_params,
self.adb_logcat_file_path)
process = utils.start_standing_subprocess(cmd, shell=True)
self._adb_logcat_process = process
def stop(self):
"""Stops the adb logcat service."""
self._close_logcat_file()
self._stop()
def _stop(self):
"""Stops the background process for logcat."""
if not self._adb_logcat_process:
return
try:
utils.stop_standing_subprocess(self._adb_logcat_process)
except Exception:
self._ad.log.exception('Failed to stop adb logcat.')
self._adb_logcat_process = None
def pause(self):
"""Pauses logcat.
Note: the service is unable to collect the logs when paused, if more
logs are generated on the device than the device's log buffer can hold,
some logs would be lost.
"""
self._stop()
def resume(self):
"""Resumes a paused logcat service."""
self._assert_not_running()
# Not clearing the log regardless of the config when resuming.
# Otherwise the logs during the paused time will be lost.
self._start()
|
release/stubs.min/System/ComponentModel/__init___parts/HandledEventArgs.py
|
htlcnn/ironpython-stubs
| 182 |
92883
|
<reponame>htlcnn/ironpython-stubs
class HandledEventArgs(EventArgs):
"""
Provides data for events that can be handled completely in an event handler.
HandledEventArgs()
HandledEventArgs(defaultHandledValue: bool)
"""
@staticmethod
def __new__(self,defaultHandledValue=None):
"""
__new__(cls: type)
__new__(cls: type,defaultHandledValue: bool)
"""
pass
Handled=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value that indicates whether the event handler has completely handled the event or whether the system should continue its own processing.
Get: Handled(self: HandledEventArgs) -> bool
Set: Handled(self: HandledEventArgs)=value
"""
|
pytext/metric_reporters/intent_slot_detection_metric_reporter.py
|
Titousensei/pytext-1
| 6,199 |
92884
|
<reponame>Titousensei/pytext-1
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Dict, List, Optional
from pytext.common.constants import DatasetFieldName, Stage
from pytext.data.data_structures.annotation import CLOSE, OPEN, escape_brackets
from pytext.metrics.intent_slot_metrics import (
FramePredictionPair,
Node,
Span,
compute_all_metrics,
)
from pytext.utils.data import (
byte_length,
get_substring_from_offsets,
merge_token_labels_to_slot,
parse_slot_string,
)
from .channel import Channel, ConsoleChannel, FileChannel
from .metric_reporter import MetricReporter
DOC_LABEL_NAMES = "doc_label_names"
def create_frame(text, intent_label, slot_names_str, byte_len):
frame = Node(
label=intent_label,
span=Span(0, byte_len),
children={
Node(label=slot.label, span=Span(slot.start, slot.end))
for slot in parse_slot_string(slot_names_str)
},
text=text,
)
return frame
def frame_to_str(frame: Node):
annotation_str = OPEN + escape_brackets(frame.label) + " "
cur_index = 0
for slot in sorted(frame.children, key=lambda slot: slot.span.start):
annotation_str += escape_brackets(
get_substring_from_offsets(frame.text, cur_index, slot.span.start)
)
annotation_str += (
OPEN
+ escape_brackets(slot.label)
+ " "
+ escape_brackets(
get_substring_from_offsets(frame.text, slot.span.start, slot.span.end)
)
+ " "
+ CLOSE
)
cur_index = slot.span.end
annotation_str += (
escape_brackets(get_substring_from_offsets(frame.text, cur_index, None))
+ " "
+ CLOSE
)
return annotation_str
class IntentSlotMetricReporter(MetricReporter):
__EXPANSIBLE__ = True
def __init__(
self,
doc_label_names: List[str],
word_label_names: List[str],
use_bio_labels: bool,
channels: List[Channel],
slot_column_name: str = "slots",
text_column_name: str = "text",
token_tensorizer_name: str = "tokens",
) -> None:
super().__init__(channels)
self.doc_label_names = doc_label_names
self.word_label_names = word_label_names
self.use_bio_labels = use_bio_labels
self.slot_column_name = slot_column_name
self.text_column_name = text_column_name
self.token_tensorizer_name = token_tensorizer_name
class Config(MetricReporter.Config):
pass
@classmethod
def from_config(cls, config, tensorizers: Optional[Dict] = None):
# TODO this part should be handled more elegantly
for name in ["text_feats", "tokens"]:
if name in tensorizers:
token_tensorizer_name = name
break
return cls(
tensorizers["doc_labels"].vocab,
tensorizers["word_labels"].vocab,
getattr(tensorizers["word_labels"], "use_bio_labels", False),
[ConsoleChannel(), FileChannel((Stage.TEST,), config.output_path)],
tensorizers["word_labels"].slot_column,
tensorizers[token_tensorizer_name].text_column,
token_tensorizer_name,
)
def aggregate_preds(self, batch_preds, batch_context):
intent_preds, word_preds = batch_preds
self.all_preds.extend(
[
create_frame(
text,
self.doc_label_names[intent_pred],
merge_token_labels_to_slot(
token_range[0:seq_len],
[self.word_label_names[p] for p in word_pred[0:seq_len]],
self.use_bio_labels,
),
byte_length(text),
)
for text, intent_pred, word_pred, seq_len, token_range in zip(
batch_context[self.text_column_name],
intent_preds,
word_preds,
batch_context[DatasetFieldName.SEQ_LENS],
batch_context[DatasetFieldName.TOKEN_RANGE],
)
]
)
def aggregate_targets(self, batch_targets, batch_context):
intent_targets = batch_targets[0]
self.all_targets.extend(
[
create_frame(
text,
self.doc_label_names[intent_target],
raw_slot_label,
byte_length(text),
)
for text, intent_target, raw_slot_label, seq_len in zip(
batch_context[self.text_column_name],
intent_targets,
batch_context[DatasetFieldName.RAW_WORD_LABEL],
batch_context[DatasetFieldName.SEQ_LENS],
)
]
)
def get_raw_slot_str(self, raw_data_row):
return ",".join([str(x) for x in raw_data_row[self.slot_column_name]])
def aggregate_scores(self, batch_scores):
intent_scores, slot_scores = batch_scores
self.all_scores.extend(
(intent_score, slot_score)
for intent_score, slot_score in zip(
intent_scores.tolist(), slot_scores.tolist()
)
)
def predictions_to_report(self):
"""
Generate human readable predictions
"""
return [frame_to_str(frame) for frame in self.all_preds]
def targets_to_report(self):
"""
Generate human readable targets
"""
return [frame_to_str(frame) for frame in self.all_targets]
def calculate_metric(self):
return compute_all_metrics(
[
FramePredictionPair(pred_frame, target_frame)
for pred_frame, target_frame in zip(self.all_preds, self.all_targets)
],
frame_accuracy=True,
)
def batch_context(self, raw_batch, batch):
context = super().batch_context(raw_batch, batch)
context[self.text_column_name] = [
row[self.text_column_name] for row in raw_batch
]
context[DatasetFieldName.SEQ_LENS] = batch[self.token_tensorizer_name][
1
].tolist()
context[DatasetFieldName.TOKEN_RANGE] = batch[self.token_tensorizer_name][
2
].tolist()
context[DatasetFieldName.RAW_WORD_LABEL] = [
self.get_raw_slot_str(raw_data_row) for raw_data_row in raw_batch
]
return context
def get_model_select_metric(self, metrics):
return metrics.frame_accuracy
|
trainer/base.py
|
merlinarer/scrl
| 102 |
92898
|
import copy
import errno
import os
import logging
import math
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
from .helper import TensorBoardWriter
from .linear_eval import iter_eval_epoch, linear_eval_online, linear_eval_offline
from data import get_loaders_for_trainer
from models import Backbone
from models.heads import SingleLayerLinearHead, TwoLayerLinearHead
from optim import get_optimizer_and_scheduler
import utils
log = logging.getLogger('main')
C = utils.Colorer.instance()
def _unwrap(wrapped_module):
if isinstance(wrapped_module, DistributedDataParallel):
module = wrapped_module.module
else:
module = wrapped_module
return module
def _regression_loss(x, y):
# eps = 1e-6 if torch.is_autocast_enabled() else 1e-12
x = F.normalize(x, p=2, dim=1) #, eps=eps)
y = F.normalize(y, p=2, dim=1) #, eps=eps)
return (2 - 2 * (x * y).sum(dim=1)).view(-1)
class BYOLBasedTrainer:
"""This trainer supports BYOL-like training framework that can be subclassed
by other task-specific trainer classes. To specify a detailed algorithm,
the user should implement Traniner.run().
"""
def __init__(self, cfg, online_network, target_network,
predictor=None, evaluator=None,
train_loader=None, eval_loader=None):
if cfg.train.enabled:
assert train_loader is not None
assert predictor is not None
if cfg.train.enabled and cfg.train.online_eval:
assert eval_loader is not None
assert evaluator is not None
self._modules = {}
self._saving_targets = {}
self.cfg = cfg
self.device = cfg.device
self.online_network = online_network
self.target_network = target_network
self.predictor = predictor
self.evaluator = evaluator
self.xent_loss = nn.CrossEntropyLoss()
self.train_loader = train_loader
self.eval_loader = eval_loader
self._setup_device_and_distributed_parallel(cfg.device)
self.cur_epoch = 0
self.max_epochs = 0
self.max_eval_score = 0.
self.max_eval_epoch = 0
if self.cfg.train.enabled:
self.m_base = self.m = cfg.train.m
self.max_epochs = cfg.train.max_epochs
self.total_global_step = len(train_loader) * cfg.train.max_epochs
self.optimizer, self.scheduler = get_optimizer_and_scheduler(
cfg=self.cfg, mode='train', modules=self._modules, loader=train_loader,
exclude_from_lars=True, module_black_list=['target_network'])
self.scaler = torch.cuda.amp.GradScaler() #init_scale=2**14)
# default init_scale 2**16 will yield invalid gradient in the first interation
self.tb_writer = TensorBoardWriter.init_for_train_from_config(cfg)
else:
self.optimizer, self.scheduler, self.scaler = None, None, None
def __setattr__(self, name, value):
if hasattr(value, 'state_dict') and callable(value.state_dict):
self._saving_targets[name] = value # including optimzers & schedulers
if isinstance(value, nn.Module):
self._modules[name] = value
object.__setattr__(self, name, value)
def run(self):
"""Main training algorithm should be implemented in this method."""
raise NotImplementedError()
@classmethod
def init_from_config(cls, cfg):
train_loader, eval_loader, num_classes = get_loaders_for_trainer(cfg)
online_network = Backbone.init_from_config(cfg)
target_network, predictor, evaluator = None, None, None
if cfg.train.enabled:
target_network = Backbone.init_from_config(cfg)
predictor = TwoLayerLinearHead.init_predictor_from_config(cfg)
evaluator = SingleLayerLinearHead.init_evaluator_from_config(
cfg, num_classes)
return cls(
cfg=cfg,
train_loader=train_loader,
eval_loader=eval_loader,
online_network=online_network,
target_network=target_network,
predictor=predictor,
evaluator=evaluator,
)
def _setup_device_and_distributed_parallel(self, device):
for name, module in self._modules.items():
module = module.to(device)
module = utils.wrap_if_distributed(module, device)
self._modules[name] = module
object.__setattr__(self, name, module)
@torch.no_grad()
def _update_target_network_parameters(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(self.online_network.parameters(),
self.target_network.parameters()):
param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)
def _decay_ema_momentum(self, step):
self.m = (1 - (1 - self.m_base) *
(math.cos(math.pi * step / self.total_global_step) + 1) / 2)
@staticmethod
def _criterion(p_online, p_target):
"""Regression loss used in BYOL."""
p_online_v1, p_online_v2 = p_online.chunk(2)
p_target_v1, p_target_v2 = p_target.chunk(2)
assert p_online_v1.size(0) == p_online_v2.size(0)
assert p_target_v1.size(0) == p_target_v2.size(0)
assert p_online_v1.size(0) == p_target_v1.size(0)
# symmetric loss
loss = _regression_loss(p_online_v1, p_target_v2)
loss += _regression_loss(p_online_v2, p_target_v1)
return loss.mean()
def _initialize_target_network(self, from_online):
# init momentum network as encoder net
for param_q, param_k in zip(self.online_network.parameters(),
self.target_network.parameters()):
if from_online:
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
def _save_checkpoint(self, tag):
save_path = f"{self.cfg.save_dir}/checkpoint_" + str(tag) + ".pth"
state_dict = {
'tag': str(tag),
'epoch': self.cur_epoch,
'max_eval_score': self.max_eval_score,
'max_eval_epoch': self.max_eval_epoch,
}
for key, target in self._saving_targets.items():
if self.cfg.fake_checkpoint:
target = "fake_state_dict"
else:
target = utils.unwrap_if_distributed(target)
target = target.state_dict()
state_dict[f"{key}_state_dict"] = target
torch.save(state_dict, save_path)
suffix = (C.debug(" (fake_checkpoint)")
if self.cfg.fake_checkpoint else "")
return save_path + suffix
def save_checkpoint(self, epoch):
save_path = self._save_checkpoint(str(epoch))
log.info(f"[Save] restore the model's checkpoint: {save_path}")
return save_path
def save_best_checkpoint(self):
save_path = self._save_checkpoint('best')
log.info(f"[Save] restore the best model's checkpoint: {save_path}")
return save_path
def symlink_checkpoint_with_tag(self, epoch, tag):
save_path = f"{self.cfg.save_dir}/checkpoint_{epoch}.pth"
symlink_path = f"{self.cfg.save_dir}/checkpoint_{tag}.pth"
if not os.path.exists(save_path):
self._save_checkpoint(epoch)
try:
os.symlink(os.path.abspath(save_path), symlink_path)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(symlink_path)
os.symlink(os.path.abspath(save_path), symlink_path)
else:
raise e
finally:
log.info(f"[Save] make a symlink of the current model: "
f"{symlink_path}")
return symlink_path
def load_checkpoint_if_available(self, tag='last'):
if self.cfg.overwrite:
assert not self.cfg.load_dir, \
"Mutually exclusive aruguements: overwrite, load_dir."
log.warning("Overwrite checkpoints in save_dir.")
return False
try:
load_dir = self.cfg.load_dir or self.cfg.save_dir
load_path = f"{load_dir}/checkpoint_{tag}.pth"
state_dict = torch.load(load_path)
except FileNotFoundError:
if self.cfg.load_dir:
raise FileNotFoundError(f"Can't find checkpoint at {load_dir}")
else:
log.warning(f'No checkpoint to resume from {load_dir}.')
return False
self.cur_epoch = state_dict['epoch']
self.max_eval_score = state_dict['max_eval_score']
self.max_eval_epoch = state_dict['max_eval_epoch']
state_dict = {k[:-len('_state_dict')]: v for k, v in state_dict.items()
if k.endswith('_state_dict')}
log.info(f"[Resume] Loaded chekpoint (epoch: {self.cur_epoch}) "
f"from: {load_path}")
missing_keys = set(self._saving_targets.keys()) - set(state_dict.keys())
unexpected_keys = set(state_dict.keys()) - set(self._saving_targets.keys())
assert len(missing_keys) == 0, "Missing keys!"
log.info("[Resume] Redundant keys: "
f"{list(unexpected_keys) if unexpected_keys else 'None'}")
for key, target in self._saving_targets.items():
if state_dict[key] == 'fake_state_dict':
log.info(f"[Resume] Loaded {key}: {C.debug('(fake_chekpoint)')}")
else:
kwargs = {'strict': False} if isinstance(target, nn.Module) else {}
loaded = _unwrap(target).load_state_dict(state_dict[key], **kwargs)
if isinstance(target, nn.Module):
assert len(loaded.missing_keys) == 0
if isinstance(target, Backbone):
# the projector is be ignored in evaluation-only cases
assert all([key.startswith('projector.')
for key in loaded.unexpected_keys])
log.info(f"[Resume] Loaded {key}")
return True
|
common/parametric_distribution.py
|
Timothy102/seed_rl
| 733 |
92902
|
<filename>common/parametric_distribution.py<gh_stars>100-1000
# coding=utf-8
# Copyright 2019 The SEED Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parametric distributions over action spaces."""
import abc
from typing import Callable
import dataclasses
import gym
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.distributions import kullback_leibler
tfb = tfp.bijectors
tfd = tfp.distributions
class ParametricDistribution(abc.ABC):
"""Abstract class for parametric (action) distribution."""
def __init__(self,
param_size,
create_dist):
"""Abstract class for parametric (action) distribution.
Specifies how to transform distribution parameters (i.e. actor output)
into a distribution over actions.
Args:
param_size: Size of the parameters for the distribution
create_dist: Function from parameters to tf Distribution.
"""
self._param_size = param_size
self._create_dist = create_dist
@property
def create_dist(self):
return self._create_dist
def __call__(self, params):
return self.create_dist(params)
@property
def param_size(self):
return self._param_size
@property
def reparametrizable(self):
return self._create_dist(tf.zeros(
(self._param_size,
))).reparameterization_type == tfd.FULLY_REPARAMETERIZED
def sample(self, parameters):
return self._create_dist(parameters).sample()
def log_prob(self, parameters, actions):
return self._create_dist(parameters).log_prob(actions)
def entropy(self, parameters):
"""Return the entropy of the given distribution."""
return self._create_dist(parameters).entropy()
def kl_divergence(self, parameters_a, parameters_b):
"""Return KL divergence between the two distributions."""
dist_a = self._create_dist(parameters_a)
dist_b = self._create_dist(parameters_b)
return tfd.kl_divergence(dist_a, dist_b)
def categorical_distribution(n_actions, dtype):
"""Initialize the categorical distribution.
Args:
n_actions: the number of actions available.
dtype: dtype of actions, usually int32 or int64.
Returns:
A tuple (param size, fn(params) -> distribution)
"""
def create_dist(parameters):
return tfd.Categorical(logits=parameters, dtype=dtype)
return ParametricDistribution(n_actions, create_dist)
def multi_categorical_distribution(n_dimensions, n_actions_per_dim, dtype):
"""Initialize the categorical distribution.
Args:
n_dimensions: the dimensionality of actions.
n_actions_per_dim: number of actions available per dimension.
dtype: dtype of actions, usually int32 or int64.
Returns:
A tuple (param size, fn(params) -> distribution)
"""
def create_dist(parameters):
batch_shape = parameters.shape[:-1]
logits_shape = [n_dimensions, n_actions_per_dim]
logits = tf.reshape(parameters, batch_shape + logits_shape)
return tfd.Independent(
tfd.Categorical(logits=logits, dtype=dtype),
reinterpreted_batch_ndims=1)
return ParametricDistribution(n_dimensions * n_actions_per_dim, create_dist)
# NB: This distribution has no gradient w.r.t the action close to boundaries.
class TanhTransformedDistribution(tfd.TransformedDistribution):
"""Distribution followed by tanh."""
def __init__(self, distribution, threshold=.999, validate_args=False):
"""Initialize the distribution.
Args:
distribution: The distribution to transform.
threshold: Clipping value of the action when computing the logprob.
validate_args: Passed to super class.
"""
super().__init__(
distribution=distribution,
bijector=tfp.bijectors.Tanh(),
validate_args=validate_args)
# Computes the log of the average probability distribution outside the
# clipping range, i.e. on the interval [-inf, -atanh(threshold)] for
# log_prob_left and [atanh(threshold), inf] for log_prob_right.
self._threshold = threshold
inverse_threshold = self.bijector.inverse(threshold)
# Let epsilon = 1 - threshold
# average(pdf) on [threshold, 1] = probability([threshold, 1])/epsilon
# So log(average(pdf)) = log(probability) - log(epsilon)
log_epsilon = tf.math.log(1. - threshold)
# Those 2 values are differentiable w.r.t. model parameters, such that the
# gradient is defined everywhere.
# There won't be any gradient w.r.t the action though.
self._log_prob_left = self.distribution.log_cdf(
-inverse_threshold) - log_epsilon
self._log_prob_right = self.distribution.log_survival_function(
inverse_threshold) - log_epsilon
def log_prob(self, event):
# Without this clip there would be NaNs in the inner tf.where and that
# causes issues for some reasons.
event = tf.clip_by_value(event, -self._threshold, self._threshold)
# The inverse image of {threshold} is the interval [atanh(threshold), inf]
# which has a probability of "log_prob_right" under the given distribution.
return tf.where(
event <= -self._threshold, self._log_prob_left,
tf.where(event >= self._threshold, self._log_prob_right,
super().log_prob(event)))
def mode(self):
return self.bijector.forward(self.distribution.mode())
def mean(self):
return self.bijector.forward(self.distribution.mean())
def entropy(self, seed=None):
# We return an estimation using a single sample of the log_det_jacobian.
# We can still do some backpropagation with this estimate.
return self.distribution.entropy() + self.bijector.forward_log_det_jacobian(
self.distribution.sample(seed=seed), event_ndims=0)
@kullback_leibler.RegisterKL(TanhTransformedDistribution,
TanhTransformedDistribution)
def _kl_transformed(a, b, name='kl_transformed'):
return kullback_leibler.kl_divergence(
a.distribution, b.distribution, name=name)
def softplus_default_std_fn(scale):
return tf.nn.softplus(scale) + 1e-3
def normal_tanh_distribution(num_actions,
gaussian_std_fn=softplus_default_std_fn):
"""Normal distribution postprocessed by a tanh."""
def create_dist(parameters):
loc, scale = tf.split(parameters, 2, axis=-1)
scale = gaussian_std_fn(scale)
normal_dist = tfd.Normal(loc=loc, scale=scale)
return tfd.Independent(
TanhTransformedDistribution(normal_dist), reinterpreted_batch_ndims=1)
return ParametricDistribution(2 * num_actions, create_dist)
class ClippedIdentity(tfb.identity.Identity):
"""Compute Y = clip_by_value(X, -1, 1).
Note that we do not override `is_injective` despite this bijector not being
injective, to not disable Identity's `forward_log_det_jacobian`. See also
tensorflow_probability.bijectors.identity.Identity.
"""
def __init__(self, validate_args=False, name='clipped_identity'):
with tf.name_scope(name) as name:
super(ClippedIdentity, self).__init__(
validate_args=validate_args, name=name)
@classmethod
def _is_increasing(cls):
return False
def _forward(self, x):
return tf.clip_by_value(x, -1., 1.)
CLIPPED_IDENTITY = ClippedIdentity()
def normal_clipped_distribution(num_actions,
gaussian_std_fn=softplus_default_std_fn):
"""Normal distribution postprocessed by a clipped identity."""
def create_dist(parameters):
loc, scale = tf.split(parameters, 2, axis=-1)
scale = gaussian_std_fn(scale)
normal_dist = tfd.Normal(loc=loc, scale=scale)
return tfd.Independent(
CLIPPED_IDENTITY(normal_dist), reinterpreted_batch_ndims=1)
return ParametricDistribution(2 * num_actions, create_dist)
def deterministic_tanh_distribution(num_actions):
def create_dist(parameters):
return tfd.Independent(
TanhTransformedDistribution(tfd.Deterministic(loc=parameters)),
reinterpreted_batch_ndims=1)
return ParametricDistribution(num_actions, create_dist)
def joint_distribution(parametric_distributions,
dtype_override=tf.float32):
"""Initialize the distribution.
Args:
parametric_distributions: A list of ParametricDistributions.
dtype_override: The type to output the actions in.
Returns:
A tuple (param size, fn(params) -> distribution)
"""
param_sizes = [
dist.param_size for dist in parametric_distributions
]
def create_dist(parameters):
split_params = tf.split(parameters, param_sizes, axis=-1)
dists = [
dist(param)
for (dist, param) in zip(parametric_distributions, split_params)
]
return tfd.Blockwise(dists, dtype_override=dtype_override)
return ParametricDistribution(sum(param_sizes), create_dist)
def check_multi_discrete_space(space):
if min(space.nvec) != max(space.nvec):
raise ValueError('space nvec must be constant: {}'.format(space.nvec))
def check_box_space(space):
assert len(space.shape) == 1, space.shape
if any(l != -1 for l in space.low):
raise ValueError(
f'Learner only supports actions bounded to [-1,1]: {space.low}')
if any(h != 1 for h in space.high):
raise ValueError(
f'Learner only supports actions bounded to [-1,1]: {space.high}')
def get_parametric_distribution_for_action_space(action_space,
continuous_config=None):
"""Returns an action distribution parametrization based on the action space.
Args:
action_space: action space of the environment
continuous_config: Configuration for the continuous action distribution
(used when needed by the action space)..
"""
if isinstance(action_space, gym.spaces.Discrete):
return categorical_distribution(action_space.n, dtype=action_space.dtype)
elif isinstance(action_space, gym.spaces.MultiDiscrete):
check_multi_discrete_space(action_space)
return multi_categorical_distribution(
n_dimensions=len(action_space.nvec),
n_actions_per_dim=action_space.nvec[0],
dtype=action_space.dtype)
elif isinstance(action_space, gym.spaces.Box): # continuous actions
check_box_space(action_space)
if continuous_config is None:
continuous_config = ContinuousDistributionConfig()
if continuous_config.postprocessor == 'Tanh':
return normal_tanh_distribution(
num_actions=action_space.shape[0],
gaussian_std_fn=continuous_config.gaussian_std_fn)
elif continuous_config.postprocessor == 'ClippedIdentity':
return normal_clipped_distribution(
num_actions=action_space.shape[0],
gaussian_std_fn=continuous_config.gaussian_std_fn)
else:
raise ValueError(
f'Postprocessor {continuous_config.postprocessor} not supported.')
elif isinstance(action_space, gym.spaces.Tuple): # mixed actions
return joint_distribution([
get_parametric_distribution_for_action_space(subspace,
continuous_config)
for subspace in action_space
])
else:
raise ValueError(f'Unsupported action space {action_space}')
@tf.custom_gradient
def safe_exp(x):
e = tf.exp(tf.clip_by_value(x, -15, 15))
def grad(dy):
return dy * e
return e, grad
def safe_exp_std_fn(std_for_zero_param: float, min_std):
std_shift = tf.math.log(std_for_zero_param - min_std)
fn = lambda scale: safe_exp(scale + std_shift) + min_std
assert abs(fn(0) - std_for_zero_param) < 1e-3
return fn
def softplus_std_fn(std_for_zero_param: float, min_std: float):
std_shift = tfp.math.softplus_inverse(std_for_zero_param - min_std)
fn = lambda scale: tf.nn.softplus(scale + std_shift) + min_std
assert abs(fn(0) - std_for_zero_param) < 1e-3
return fn
@dataclasses.dataclass
class ContinuousDistributionConfig(object):
"""Configuration for continuous distributions.
Currently, only NormalSquashedDistribution is supported. The default
configuration corresponds to a normal distribution (with standard deviation
computed from params using an unshifted softplus offset by 1e-3),
followed by tanh.
"""
# Transforms parameters into non-negative values for standard deviation of the
# gaussian.
gaussian_std_fn: Callable[[tf.Tensor], tf.Tensor] = softplus_default_std_fn
# The squashing postprocessor.
# Accepted values are Tanh and ClippedIdentity.
postprocessor: str = 'Tanh'
def continuous_action_config(
action_min_gaussian_std: float = 1e-3,
action_gaussian_std_fn: str = 'softplus',
action_std_for_zero_param: float = 1,
action_postprocessor: str = 'Tanh') -> ContinuousDistributionConfig:
"""Configures continuous distributions from numerical and string inputs.
Currently, only NormalSquashedDistribution is supported. The default
configuration corresponds to a normal distribution with standard deviation
computed from params using an unshifted softplus, followed by tanh.
Args:
action_min_gaussian_std: minimal standard deviation.
action_gaussian_std_fn: transform for standard deviation parameters.
action_std_for_zero_param: shifts the transform to get this std when
parameters are zero.
action_postprocessor: the non-linearity applied to the sample from the
gaussian.
Returns:
A continuous distribution setup, with the parameters transform
to get the standard deviation applied with a shift, as configured.
"""
config = ContinuousDistributionConfig()
config.min_gaussian_std = float(action_min_gaussian_std)
if action_gaussian_std_fn == 'safe_exp':
config.gaussian_std_fn = safe_exp_std_fn(action_std_for_zero_param,
config.min_gaussian_std)
elif action_gaussian_std_fn == 'softplus':
config.gaussian_std_fn = softplus_std_fn(action_std_for_zero_param,
config.min_gaussian_std)
else:
raise ValueError('Flag `action_gaussian_std_fn` only supports safe_exp and'
f' softplus, got: {action_gaussian_std_fn}')
config.postprocessor = action_postprocessor
return config
|
Notebooks/Target-lane.py
|
keuntaeklee/pytorch-PPUU
| 159 |
92912
|
<gh_stars>100-1000
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.2.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
from mpl_toolkits import mplot3d
from matplotlib.pylab import *
# %%
style.use(['dark_background', 'bmh'])
rc('axes', facecolor='k')
rc('figure', facecolor='k')
rc('figure', figsize=(10,5))
# %%
# Lanes from -1 to 3 --> 0 is target lane
y = r_[-1:3.1:.1]
# 2 lane widths in front and in back
x = r_[-2:2.1:.1]
# %%
# Target lane cost
# target_lane_cost = y ** 2 / 4
target_lane_cost = abs(y) * .5
# %%
# Color shorthands
r, g, b, p = 'C1', 'C3', 'C0', 'C2'
set_color = lambda c: dict(linefmt=c, basefmt=" ", markerfmt='o'+c)
# %%
# Target Lane 0, Ego Car Lane 2, Other Car Lane 1
figure()
y_proximity = maximum(1 - abs(1 - y), 0)
stem(y, target_lane_cost + y_proximity, **set_color(p), label='Total Cost')
stem(y, target_lane_cost, **set_color(b), label='Target Lane Cost')
stem(y, y_proximity, **set_color(g), label='Y Proximity Cost')
arrow_props = dict(width=1.5, facecolor='white')
# annotate('Ego Car', (2.0, 0.0), (2, -0.25), arrowprops=arrow_props)
annotate('Other Car', (1.0, 0.0), (1, -0.25), arrowprops=arrow_props)
annotate('Target Lane', (0.0, 0.0), (0, -0.25), arrowprops=arrow_props)
axis('equal')
title('Target Lane Cost + Proximity Cost')
legend()
savefig('car_1_left.png')
# Target Lane 0, Ego Car Lane 2, Other Car Lane 0
figure()
y_proximity = maximum(1 - abs(0 - y), 0)
stem(y, target_lane_cost + y_proximity, **set_color(p), label='Total Cost')
stem(y, target_lane_cost, **set_color(b), label='Target Lane Cost')
stem(y, y_proximity, **set_color(g), label='Y Proximity Cost')
# annotate('Ego Car', (2.0, 0.0), (2, -0.25), arrowprops=arrow_props)
annotate('Other Car', (0.0, 0.0), (0.8, -0.25), arrowprops=arrow_props)
annotate('Target Lane', (0.0, 0.0), (0, -0.25), arrowprops=arrow_props)
axis('equal')
title('Target Lane Cost + Proximity Cost')
legend()
savefig('car_2_left.png')
# Target Lane 0, Ego Car Lane 1, Lane Cost accounted for
figure()
lane_cost = (maximum(0.5 - abs(0.5 - y), 0) + maximum(0.5 - abs(1.5 - y), 0)) * 0.4
stem(y, target_lane_cost + lane_cost, **set_color(p), label='Total Cost')
stem(y, target_lane_cost, **set_color(b), label='Target Lane Cost')
stem(y, lane_cost, **set_color(r), label='Lane Cost')
annotate('Ego Car', (1.0, 0.0), (1, -0.25), arrowprops=arrow_props)
annotate('Target Lane', (0.0, 0.0), (0, -0.25), arrowprops=arrow_props)
axis('equal')
title('Target Lane Cost + Lane Cost')
legend();
savefig('lane_change.png')
# %%
figure()
plot(y, lane_cost)
ylim(-0.05, 0.2)
annotate('Ego Car', (1.0, 0.0), (1, -0.04), arrowprops=arrow_props)
title('Lane Cost');
# %%
set_color_3d = lambda c: dict(color=c, marker='o', markeredgecolor=c, markevery=(0.1, 0.1))
x_proximity = maximum(0, 1 - abs(x))
figure()
ax = axes(projection='3d');
ax.set_xlabel('x-direction')
ax.set_ylabel('y-direction')
ax.set_zlabel('Cost')
for i in range(len(y)):
# Target lane cost
line = mplot3d.art3d.Line3D(*zip((0, y[i], 0), (0, y[i], target_lane_cost[i])), **set_color_3d(b))
ax.add_line(line)
# Lane cost
line = mplot3d.art3d.Line3D(*zip((0, y[i], 0), (0, y[i], lane_cost[i])), **set_color_3d(r))
ax.add_line(line)
# X-Proximity cost
line = mplot3d.art3d.Line3D(*zip((x[i], 1, 0), (x[i], 1, x_proximity[i])), **set_color_3d(g))
ax.add_line(line)
ax.set_xlim3d(-2, 2)
ax.set_ylim3d(-1, 3)
ax.set_zlim3d(-0.25, 2);
# %%
|
rest_client/clients/core.py
|
js882829/tars
| 371 |
92929
|
<reponame>js882829/tars
from __future__ import absolute_import
import json
import urlparse
import requests
from requests.exceptions import RequestException
from django.conf import settings
from rest_client.decorators import json_format
from rest_client.utils import ConfigDict, get_logger, Guardor
from rest_client.exceptions import (
ServerResponseException, InvalidRestMethodException, ConfigMissing)
logger = get_logger()
ALLOWED_HTTP_METHODS = frozenset(('GET', 'POST', 'PUT', 'DELETE', 'PATCH'))
class Client(object):
_timeout = 3
_env = 'default'
_default_env = 'default'
_use_default = False
_parser = staticmethod(json.loads)
_error_parser = staticmethod(lambda x: x)
def __new__(cls):
raise AssertionError # Client can't have instances
@classmethod
def config(cls, name=None, key=None):
name = name or cls.__name__.upper()
configs = settings.REST_CLIENT_SETTINGS.get(name, {})
try:
if cls._use_default and cls._env not in configs:
profile = cls._default_env
else:
profile = cls._env
config = configs[profile]
except KeyError:
raise ConfigMissing('Configuration for {} is not found'
.format(cls.__name__))
config = ConfigDict(config)
config.host = cls.__name__
return config if key is None else config.get(key)
@classmethod
def build_url_base(cls):
cfg = cls.config()
url = urlparse.urlparse('http://' + cfg['HOSTNAME'])
port = cfg.get('PORT') or '80'
hostname = url.hostname
url_str = url.geturl()
cls._url_base = url_str.replace(
hostname, '{}:{}'.format(hostname, port), 1)
@classmethod
def _get_sanitized_url(cls, url):
if cls._url_base is None:
cls.build_url_base()
return urlparse.urljoin(cls._url_base, url)
@classmethod
def _rest_call(cls, url, method='GET', **kwargs):
url = cls._get_sanitized_url(url)
if method in ALLOWED_HTTP_METHODS:
try:
kwargs.setdefault('timeout', cls._timeout)
response = requests.request(method.lower(), url, verify=True,
**kwargs)
except RequestException as e:
raise ServerResponseException('Connection error {}'
.format(e.message))
else:
raise InvalidRestMethodException(
'Invalid method "{}" is used for the HTTP request. Can only'
'use the following: {!s}'.format(method, ALLOWED_HTTP_METHODS)
)
if 200 <= response.status_code < 300:
response_data = response.text
data = cls._parser(response_data) if response_data else None
return data
else:
cleansed_kwargs = Guardor.cleanse_content(kwargs)
msg = '%s returned HTTP %d: %s\nResponse\nHeaders: %s\nBody: %s' % (
url, response.status_code, cleansed_kwargs, response.headers,
cls._error_parser(response.text))
logger.error(msg)
cls._error_handler(response)
raise ServerResponseException(
'Server response not OK. Verbose: {0}'.format(msg))
@classmethod
def _error_handler(cls, response):
pass
class JsonClient(Client):
@classmethod
@json_format
def _rest_call(cls, url, method, **kwargs):
return super(JsonClient, cls)._rest_call(url, method, **kwargs)
|
test/tst_create_mem.py
|
timgates42/netcdf4-python
| 574 |
92939
|
<filename>test/tst_create_mem.py
import unittest
import netCDF4
import numpy as np
from numpy.testing import assert_array_equal
class TestCreateMem(unittest.TestCase):
def test_mem_create(self):
def check_inmemory(format):
# memory is 'advisory size' - not needed for NETCDF4/HDF5
# but is used for NETCDF3.
nc = netCDF4.Dataset('test.nc','w',memory=1028,format=format)
d = nc.createDimension('x',None)
v = nc.createVariable('v',np.int32,'x')
data = np.arange(5)
v[0:5] = data
# retrieve memory buffer
b = nc.close()
# open a new file using this memory buffer
nc2 = netCDF4.Dataset('test2.nc','r',memory=b)
assert_array_equal(nc2['v'][:],data)
nc2.close()
check_inmemory('NETCDF3_CLASSIC')
check_inmemory('NETCDF4_CLASSIC')
if __name__ == '__main__':
unittest.main()
|
metal/mmtl/metal_model.py
|
inimino/metal
| 437 |
92980
|
<gh_stars>100-1000
from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
from metal.utils import move_to_device, recursive_merge_dicts, set_seed
model_defaults = {
"seed": None,
"device": 0, # gpu id (int) or -1 for cpu
"verbose": True,
"fp16": False,
"model_weights": None, # the path to a saved checkpoint to initialize with
}
class MetalModel(nn.Module):
"""A dynamically constructed discriminative classifier
Args:
tasks: a list of Task objects which bring their own (named) modules
We currently support up to N input modules -> middle layers -> up to N heads
TODO: Accept specifications for more exotic structure (e.g., via user-defined graph)
"""
def __init__(self, tasks, **kwargs):
self.config = recursive_merge_dicts(model_defaults, kwargs, misses="insert")
# Set random seed before initializing module weights
if self.config["seed"] is None:
self.config["seed"] = np.random.randint(1e6)
set_seed(self.config["seed"])
super().__init__()
# Build network
self._build(tasks)
self.task_map = {task.name: task for task in tasks}
# Load weights
if self.config["model_weights"]:
self.load_weights(self.config["model_weights"])
# Half precision
if self.config["fp16"]:
print("metal_model.py: Using fp16")
self.half()
# Move model to device now, then move data to device in forward() or calculate_loss()
if self.config["device"] >= 0:
if torch.cuda.is_available():
if self.config["verbose"]:
print("Using GPU...")
self.to(torch.device(f"cuda:{self.config['device']}"))
else:
if self.config["verbose"]:
print("No cuda device available. Using cpu instead.")
# Show network
if self.config["verbose"]:
print("\nNetwork architecture:")
print(self)
print()
num_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
print(f"Total number of parameters: {num_params}")
def _build(self, tasks):
"""Iterates over tasks, adding their input_modules and head_modules"""
# TODO: Allow more flexible specification of network structure
self.input_modules = nn.ModuleDict(
{task.name: nn.DataParallel(task.input_module) for task in tasks}
)
self.middle_modules = nn.ModuleDict(
{task.name: nn.DataParallel(task.middle_module) for task in tasks}
)
self.head_modules = nn.ModuleDict(
{task.name: nn.DataParallel(task.head_module) for task in tasks}
)
self.loss_hat_funcs = {task.name: task.loss_hat_func for task in tasks}
self.output_hat_funcs = {task.name: task.output_hat_func for task in tasks}
def forward(self, X, task_names):
"""Returns the outputs of the requested task heads in a dictionary
The output of each task is the result of passing the input through the
input_module, middle_module, and head_module for that task, in that order.
Before calculating any intermediate values, we first check whether a previously
evaluated task has produced that intermediate result. If so, we use that.
Args:
X: a [batch_size, ...] batch from a DataLoader
Returns:
output_dict: {task_name (str): output (Tensor)}
"""
input = move_to_device(X, self.config["device"])
outputs = {}
# TODO: Replace this naive caching scheme with a more intelligent and feature-
# complete approach where arbitrary DAGs of modules are specified and we only
# cache things that will be reused by another task
for task_name in task_names:
# Extra .module call is to get past DataParallel wrapper
input_module = self.input_modules[task_name].module
if input_module not in outputs:
output = input_module(input)
outputs[input_module] = output
middle_module = self.middle_modules[task_name].module
if middle_module not in outputs:
output = middle_module(outputs[input_module])
outputs[middle_module] = output
head_module = self.head_modules[task_name].module
if head_module not in outputs:
output = head_module(outputs[middle_module])
outputs[head_module] = output
return {t: outputs[self.head_modules[t].module] for t in task_names}
def calculate_loss(self, X, Ys, payload_name, labels_to_tasks):
"""Returns a dict of {task_name: loss (a FloatTensor scalar)}.
Args:
X: an appropriate input for forward(), either a Tensor or tuple
Ys: a dict of {task_name: labels} where labels is [n, ?]
labels_to_tasks: a dict of {label_name: task_name} indicating which task
head to use to calculate the loss for each labelset.
"""
task_names = set(labels_to_tasks.values())
outputs = self.forward(X, task_names)
loss_dict = {} # Stores the loss by task
count_dict = {} # Stores the number of active examples by task
for label_name, task_name in labels_to_tasks.items():
loss_name = f"{task_name}/{payload_name}/{label_name}/loss"
Y = Ys[label_name]
assert isinstance(Y, torch.Tensor)
out = outputs[task_name]
# Identify which instances have at least one non-zero target labels
active = torch.any(Y.detach() != 0, dim=1)
count_dict[loss_name] = active.sum().item()
# If there are inactive instances, slice them out to save computation
# and ignore their contribution to the loss
if 0 in active:
Y = Y[active]
if isinstance(out, torch.Tensor):
out = out[active]
# If the output of the head has multiple fields, slice them all
elif isinstance(out, dict):
out = move_to_device({k: v[active] for k, v in out.items()})
# Convert to half precision last thing if applicable
if self.config["fp16"] and Y.dtype == torch.float32:
out["data"] = out["data"].half()
Y = Y.half()
# If no examples in this batch have labels for this task, skip loss calc
# Active has type torch.uint8; avoid overflow with long()
if active.long().sum():
label_loss = self.loss_hat_funcs[task_name](
out, move_to_device(Y, self.config["device"])
)
assert isinstance(label_loss.item(), float)
loss_dict[loss_name] = (
label_loss * self.task_map[task_name].loss_multiplier
)
return loss_dict, count_dict
@torch.no_grad()
def calculate_probs(self, X, task_names):
"""Returns a dict of {task_name: probs}
Args:
X: instances to feed through the network
task_names: the names of the tasks for which to calculate outputs
Returns:
{task_name: probs}: probs is the output of the output_hat for the given
task_head
The type of each entry in probs depends on the task type:
instance-based tasks: each entry in probs is a [k]-len array
token-based tasks: each entry is a [seq_len, k] array
"""
assert self.eval()
return {
t: [probs.cpu().numpy() for probs in self.output_hat_funcs[t](out)]
for t, out in self.forward(X, task_names).items()
}
def update_config(self, update_dict):
"""Updates self.config with the values in a given update dictionary."""
self.config = recursive_merge_dicts(self.config, update_dict)
def load_weights(self, model_path):
"""Load model weights from checkpoint."""
if self.config["device"] >= 0:
device = torch.device(f"cuda:{self.config['device']}")
else:
device = torch.device("cpu")
try:
self.load_state_dict(torch.load(model_path, map_location=device)["model"])
except RuntimeError:
print("Your destination state dict has different keys for the update key.")
self.load_state_dict(
torch.load(model_path, map_location=device)["model"], strict=False
)
def save_weights(self, model_path):
"""Saves weight in checkpoint directory"""
raise NotImplementedError
@torch.no_grad()
def score(self, payload, metrics=[], verbose=True, **kwargs):
"""Calculate the requested metrics for the given payload
Args:
payload: a Payload to score
metrics: a list of full metric names, a single full metric name, or []:
list: a list of full metric names supported by the tasks' Scorers.
(full metric names are of the form task/payload/labelset/metric)
Only these metrics will be calculated and returned.
[]: defaults to all supported metrics for the given payload's Tasks
str: a single full metric name
A single score will be returned instead of a dictionary
Returns:
scores: a dict of the form {metric_name: score} corresponding to the
requested metrics (optionally a single score if metrics is a string
instead of a list)
"""
self.eval()
return_unwrapped = isinstance(metrics, str)
# If no specific metrics were requested, calculate all available metrics
if metrics:
metrics_list = metrics if isinstance(metrics, list) else [metrics]
assert all(len(metric.split("/")) == 4 for metric in metrics_list)
target_metrics = defaultdict(list)
target_tasks = []
target_labels = []
for full_metric_name in metrics:
task_name, payload_name, label_name, metric_name = full_metric_name.split(
"/"
)
target_tasks.append(task_name)
target_labels.append(label_name)
target_metrics[label_name].append(metric_name)
else:
target_tasks = set(payload.labels_to_tasks.values())
target_labels = set(payload.labels_to_tasks.keys())
target_metrics = {
label_name: None for label_name in payload.labels_to_tasks
}
Ys, Ys_probs, Ys_preds = self.predict_with_gold(
payload, target_tasks, target_labels, return_preds=True, **kwargs
)
metrics_dict = {}
for label_name, task_name in payload.labels_to_tasks.items():
scorer = self.task_map[task_name].scorer
task_metrics_dict = scorer.score(
Ys[label_name],
Ys_probs[task_name],
Ys_preds[task_name],
target_metrics=target_metrics[label_name],
)
# Expand short metric names into full metric names
for metric_name, score in task_metrics_dict.items():
full_metric_name = (
f"{task_name}/{payload.name}/{label_name}/{metric_name}"
)
metrics_dict[full_metric_name] = score
# If a single metric was given as a string (not list), return a float
if return_unwrapped:
metric, score = metrics_dict.popitem()
return score
else:
return metrics_dict
@torch.no_grad()
def predict_with_gold(
self,
payload,
target_tasks=None,
target_labels=None,
return_preds=False,
max_examples=0,
**kwargs,
):
"""Extracts Y and calculates Y_prods, Y_preds for the given payload and tasks
To get just the probabilities or predictions for a single task, consider using
predict() or predict_probs().
Args:
payload: the Payload to make predictions for
target_tasks: if not None, predict probs only for the specified tasks;
otherwise, predict probs for all tasks with corresponding labelsets
in the payload
target_labels: if not None, return labels for only the specified labelsets;
otherwise, return all labelsets
return_preds: if True, also include preds in return values
max_examples: if > 0, predict for a maximum of this many examples
# TODO: consider returning Ys as tensors instead of lists (padded if necessary)
Returns:
Ys: a {label_name: Y} dict where Y is an [n] list of labels (often ints)
Ys_probs: a {task_name: Y_probs} dict where Y_probs is a [n] list of
probabilities
Ys_preds: a {task_name: Y_preds} dict where Y_preds is a [n] list of
predictions
"""
validate_targets(payload, target_tasks, target_labels)
if target_tasks is None:
target_tasks = set(payload.labels_to_tasks.values())
elif isinstance(target_tasks, str):
target_tasks = [target_tasks]
Ys = defaultdict(list)
Ys_probs = defaultdict(list)
total = 0
for batch_num, (Xb, Yb) in enumerate(payload.data_loader):
Yb_probs = self.calculate_probs(Xb, target_tasks)
for task_name, yb_probs in Yb_probs.items():
Ys_probs[task_name].extend(yb_probs)
for label_name, yb in Yb.items():
if target_labels is None or label_name in target_labels:
Ys[label_name].extend(yb.cpu().numpy())
total += len(Xb)
if max_examples > 0 and total >= max_examples:
break
if max_examples:
Ys = {label_name: Y[:max_examples] for label_name, Y in Ys.items()}
Ys_probs = {
task_name: Y_probs[:max_examples]
for task_name, Y_probs in Ys_probs.items()
}
if return_preds:
Ys_preds = {
task_name: [probs_to_preds(y_probs) for y_probs in Y_probs]
for task_name, Y_probs in Ys_probs.items()
}
return Ys, Ys_probs, Ys_preds
else:
return Ys, Ys_probs
# Single-task prediction helpers (for convenience)
@torch.no_grad()
def predict_probs(self, payload, task_name=None, **kwargs):
"""Return probabilistic labels for a single task of a payload
Args:
payload: a Payload
task_name: the task to calculate probabilities for
If task_name is None and the payload includes labels for only one task,
return predictions for that task. If task_name is None and the payload
includes labels for more than one task, raise an exception.
Returns:
Y_probs: an [n] list of probabilities
"""
self.eval()
if task_name is None:
if len(payload.labels_to_tasks) > 1:
msg = (
"The payload you provided contains labels for more than one "
"task, so task_name cannot be None."
)
raise Exception(msg)
else:
task_name = next(iter(payload.labels_to_tasks.values()))
target_tasks = [task_name]
_, Ys_probs = self.predict_with_gold(payload, target_tasks, **kwargs)
return Ys_probs[task_name]
@torch.no_grad()
def predict(self, payload, task_name=None, return_probs=False, **kwargs):
"""Return predicted labels for a single task of a payload
Args:
payload: a Payload
task_name: the task to calculate predictions for
If task_name is None and the payload includes labels for only one task,
return predictions for that task. If task_name is None and the payload
includes labels for more than one task, raise an exception.
Returns:
Y_probs: an [n] list of probabilities
Y_preds: an [n] list of predictions
"""
self.eval()
if task_name is None:
if len(payload.labels_to_tasks) > 1:
msg = (
"The payload you provided contains labels for more than one "
"task, so task_name cannot be None."
)
raise Exception(msg)
else:
task_name = next(iter(payload.labels_to_tasks.values()))
target_tasks = [task_name]
_, Ys_probs, Ys_preds = self.predict_with_gold(
payload, target_tasks, return_preds=True, **kwargs
)
Y_probs = Ys_probs[task_name]
Y_preds = Ys_preds[task_name]
if return_probs:
return Y_preds, Y_probs
else:
return Y_preds
def validate_targets(payload, target_tasks, target_labels):
if target_tasks:
for task_name in target_tasks:
if task_name not in set(payload.labels_to_tasks.values()):
msg = (
f"Could not find the specified task_name {task_name} in "
f"payload {payload}."
)
raise Exception(msg)
if target_labels:
for label_name in target_labels:
if label_name not in payload.labels_to_tasks:
msg = (
f"Could not find the specified labelset {label_name} in "
f"payload {payload}."
)
raise Exception(msg)
def probs_to_preds(probs):
"""Identifies the largest probability in each column on the last axis
We add 1 to the argmax to account for the fact that all labels in MeTaL are
categorical and the 0 label is reserved for abstaining weak labels.
"""
# TODO: Consider replacing argmax with a version of the rargmax utility to randomly
# break ties instead of accepting the first one, or allowing other tie-breaking
# strategies
return np.argmax(probs, axis=-1) + 1
|
torchtext/datasets/conll2000chunking.py
|
parmeet/text
| 3,172 |
92983
|
from torchtext.data.datasets_utils import (
_RawTextIterableDataset,
_wrap_split_argument,
_add_docstring_header,
_download_extract_validate,
_create_dataset_directory,
_create_data_from_iob,
)
import os
import logging
URL = {
'train': "https://www.clips.uantwerpen.be/conll2000/chunking/train.txt.gz",
'test': "https://www.clips.uantwerpen.be/conll2000/chunking/test.txt.gz",
}
MD5 = {
'train': "6969c2903a1f19a83569db643e43dcc8",
'test': "a916e1c2d83eb3004b38fc6fcd628939",
}
NUM_LINES = {
'train': 8936,
'test': 2012,
}
_EXTRACTED_FILES = {
'train': 'train.txt',
'test': 'test.txt'
}
_EXTRACTED_FILES_MD5 = {
'train': "2e2f24e90e20fcb910ab2251b5ed8cd0",
'test': "56944df34be553b72a2a634e539a0951"
}
DATASET_NAME = "CoNLL2000Chunking"
@_add_docstring_header(num_lines=NUM_LINES)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(('train', 'test'))
def CoNLL2000Chunking(root, split):
# Create a dataset specific subfolder to deal with generic download filenames
root = os.path.join(root, 'conll2000chunking')
path = os.path.join(root, split + ".txt.gz")
data_filename = _download_extract_validate(root, URL[split], MD5[split], path, os.path.join(root, _EXTRACTED_FILES[split]),
_EXTRACTED_FILES_MD5[split], hash_type="md5")
logging.info('Creating {} data'.format(split))
return _RawTextIterableDataset(DATASET_NAME, NUM_LINES[split],
_create_data_from_iob(data_filename, " "))
|
LeetCode/python3/650.py
|
ZintrulCre/LeetCode_Archiver
| 279 |
92990
|
class Solution:
def minSteps(self, n: int) -> int:
res, m = 0, 2
while n > 1:
while n % m == 0:
res += m
n //= m
m += 1
return res
|
websauna/system/core/traversal.py
|
stevepiercy/websauna
| 286 |
93073
|
<filename>websauna/system/core/traversal.py<gh_stars>100-1000
"""Traversing core logic."""
# Pyramid
from pyramid.interfaces import ILocation
from zope.interface import implementer
@implementer(ILocation)
class Resource:
"""Traversable resource in a nested tree hierarchy with basic breadcrumbs support.
All traverable context classes should inherit from this class. Note that this is not a strict requirement, as often anything implementing :py:class:`pyramid.interfaces.ILocation` and ``get_title()`` will work.
For more information see :ref:`Traversal <traversal>`.
.. _traversal:
"""
# TODO: Cannot annotate request as it breaks sphinx-autodoc-typehints, sphinx-autodoc-typehints==1.1.0, when doing make html
def __init__(self, request):
#: Pointer to the parent object in traverse hierarchy. This is none until make_lineage is called.
self.__parent__ = None
#: The id of this resource as its appear in URL and traversing path
self.__name__ = None
self.request = request
def get_title(self) -> str:
"""Return human-readable title of this resource.
This is viewed in admin breadcrumbs path, etc.
"""
title = getattr(self, "title", None)
if title:
return title
raise NotImplementedError("get_title() implementation missing for {}".format(self))
@classmethod
def make_lineage(self, parent, child, name, allow_new_parent=False) -> "Resource":
"""Set traversing pointers between the child and the parent resources.
Builds __parent__ and __name__ pointer and sets it on the child resource.
* If lineage relationship is not lazy and the referenced children is stored in the parent, the lineage must be set when the child is put into parent container.
* If lineage relationship is lazy and child resource is constructed upon lookup in ``__item__``, the lineage is constructed before the child is returned.
:param parent: Parent resource who children is become part to
:param child: Child resource mutated in place
:param name: Id of the child resource as it will appear in the URL traversing path
:param allow_new_parent: If the child has alraedy a parent assigned, allow override the parent... or basically move an existing resource. You don't usually want this for in-memory resource and this is for catching bugs.
:return: The mutated child resource
"""
assert child
assert parent
assert name
if not allow_new_parent:
# Catch bugs when you try to double lineage a persistnet parent -> child relationship
assert not getattr(child, "__parent__", None), "Tried to double init lineage for {} -> {}, previous parent was {}".format(parent, child, child.__parent__)
child.__parent__ = parent
child.__name__ = name
return child
|
python/londiste/handlers/shard.py
|
zzahti/skytools
| 116 |
93115
|
<filename>python/londiste/handlers/shard.py
"""Event filtering by hash, for partitioned databases.
Parameters:
key=COLUMN: column name to use for hashing
hash_key=COLUMN: column name to use for hashing (overrides 'key' parameter)
hashfunc=NAME: function to use for hashing (default: partconf.get_hash_raw)
hashexpr=EXPR: full expression to use for hashing (deprecated)
encoding=ENC: validate and fix incoming data (only utf8 supported atm)
ignore_truncate=BOOL: ignore truncate event, default: 0, values: 0,1
On root node:
* Hash of key field will be added to ev_extra3.
This is implemented by adding additional trigger argument:
ev_extra3='hash='||partconf.get_hash_raw(key_column)
On branch/leaf node:
* On COPY time, the SELECT on provider side gets filtered by hash.
* On replay time, the events gets filtered by looking at hash in ev_extra3.
Local config:
* Local hash value and mask are loaded from partconf.conf table.
"""
import skytools
from londiste.handler import TableHandler
__all__ = ['ShardHandler', 'PartHandler']
class ShardHandler (TableHandler):
__doc__ = __doc__
handler_name = 'shard'
DEFAULT_HASHFUNC = "partconf.get_hash_raw"
DEFAULT_HASHEXPR = "%s(%s)"
def __init__(self, table_name, args, dest_table):
TableHandler.__init__(self, table_name, args, dest_table)
self.hash_mask = None # aka max part number (atm)
self.shard_nr = None # part number of local node
# primary key columns
self.hash_key = args.get('hash_key', args.get('key'))
self._validate_hash_key()
# hash function & full expression
hashfunc = args.get('hashfunc', self.DEFAULT_HASHFUNC)
self.hashexpr = self.DEFAULT_HASHEXPR % (
skytools.quote_fqident(hashfunc),
skytools.quote_ident(self.hash_key or ''))
self.hashexpr = args.get('hashexpr', self.hashexpr)
def _validate_hash_key(self):
if self.hash_key is None:
raise Exception('Specify hash key field as hash_key argument')
def reset(self):
"""Forget config info."""
self.hash_mask = None
self.shard_nr = None
TableHandler.reset(self)
def add(self, trigger_arg_list):
"""Let trigger put hash into extra3"""
arg = "ev_extra3='hash='||%s" % self.hashexpr
trigger_arg_list.append(arg)
TableHandler.add(self, trigger_arg_list)
def prepare_batch(self, batch_info, dst_curs):
"""Called on first event for this table in current batch."""
if self.hash_key is not None:
if not self.hash_mask:
self.load_shard_info(dst_curs)
TableHandler.prepare_batch(self, batch_info, dst_curs)
def process_event(self, ev, sql_queue_func, arg):
"""Filter event by hash in extra3, apply only if for local shard."""
if ev.extra3 and self.hash_key is not None:
meta = skytools.db_urldecode(ev.extra3)
self.log.debug('shard.process_event: hash=%i, hash_mask=%i, shard_nr=%i',
int(meta['hash']), self.hash_mask, self.shard_nr)
if (int(meta['hash']) & self.hash_mask) != self.shard_nr:
self.log.debug('shard.process_event: not my event')
return
self._process_event(ev, sql_queue_func, arg)
def _process_event(self, ev, sql_queue_func, arg):
self.log.debug('shard.process_event: my event, processing')
TableHandler.process_event(self, ev, sql_queue_func, arg)
def get_copy_condition(self, src_curs, dst_curs):
"""Prepare the where condition for copy and replay filtering"""
if self.hash_key is None:
return TableHandler.get_copy_condition(self, src_curs, dst_curs)
self.load_shard_info(dst_curs)
w = "(%s & %d) = %d" % (self.hashexpr, self.hash_mask, self.shard_nr)
self.log.debug('shard: copy_condition=%r', w)
return w
def load_shard_info(self, curs):
"""Load part/slot info from database."""
q = "select part_nr, max_part from partconf.conf"
curs.execute(q)
self.shard_nr, self.hash_mask = curs.fetchone()
if self.shard_nr is None or self.hash_mask is None:
raise Exception('Error loading shard info')
class PartHandler (ShardHandler):
__doc__ = "Deprecated compat name for shard handler.\n" + __doc__.split('\n',1)[1]
handler_name = 'part'
# register handler class
__londiste_handlers__ = [ShardHandler, PartHandler]
|
rbp_eclip/custom_keras_objects.py
|
Luma-1994/lama
| 137 |
93117
|
<reponame>Luma-1994/lama
import concise
# all the custom objects are already loaded through importing concise
OBJECTS = None
# new concise version
# OBJECTS = concise.custom_objects
|
notebooks/plotting.py
|
syllogismos/wandb_trpo
| 379 |
93126
|
"""
Short Plotting Routine to Plot Pandas Dataframes by Column Label
1. Takes list of dateframes to compare multiple trials
2. Takes list of y-variables to combine on 1 plot
3. Legend location and y-axis limits can be customized
Written by <NAME> (pat-coady.github.io)
"""
import matplotlib.pyplot as plt
def df_plot(dfs, x, ys, ylim=None, xlim=None, legend_loc='best'):
""" Plot y vs. x curves from pandas dataframe(s)
Args:
dfs: list of pandas dataframes
x: str column label for x variable
ys: list of str column labels for y variable(s)
ylim: tuple to override automatic y-axis limits
xlim: tuple to override automatic x-axis limits
legend_loc: str to override automatic legend placement:
'upper left', 'lower left', 'lower right' , 'right' ,
'center left', 'center right', 'lower center',
'upper center', and 'center'
"""
if ylim is not None:
plt.ylim(ylim)
if xlim is not None:
plt.xlim(xlim)
for df, name in dfs:
if '_' in name:
name = name.split('_')[1]
for y in ys:
plt.plot(df[x], df[y], linewidth=1,
label=name + ' ' + y.replace('_', ''))
plt.xlabel(x.replace('_', ''))
plt.legend(loc=legend_loc)
plt.show()
|
gpflowopt/scaling.py
|
kyu999/GPflowOpt
| 258 |
93130
|
<filename>gpflowopt/scaling.py<gh_stars>100-1000
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gpflow.param import DataHolder, AutoFlow
from gpflow import settings
import numpy as np
from .transforms import LinearTransform, DataTransform
from .domain import UnitCube
from .models import ModelWrapper
float_type = settings.dtypes.float_type
class DataScaler(ModelWrapper):
"""
Model-wrapping class, primarily intended to assure the data in GPflow models is scaled.
One DataScaler wraps one GPflow model, and can scale the input as well as the output data. By default,
if any kind of object attribute is not found in the datascaler object, it is searched on the wrapped model.
The datascaler supports both input as well as output scaling, although both scalings are set up differently:
- For input, the transform is not automatically generated. By default, the input transform is the identity
transform. The input transform can be set through the setter property, or by specifying a domain in the
constructor. For the latter, the input transform will be initialized as the transform from the specified domain to
a unit cube. When X is updated, the transform does not change.
- If enabled: for output the data is always scaled to zero mean and unit variance. This means that if the Y property
is set, the output transform is first calculated, then the data is scaled.
By default, :class:`~.acquisition.Acquisition` objects will always wrap each model received. However, the input and output transforms
will be the identity transforms, and output normalization is switched off. It is up to the user (or
specialized classes such as the BayesianOptimizer) to correctly configure the datascalers involved.
By carrying out the scaling at such a deep level in the framework, it is possible to keep the scaling
hidden throughout the rest of GPflowOpt. This means that, during implementation of acquisition functions it is safe
to assume the data is not scaled, and is within the configured optimization domain. There is only one exception:
the hyperparameters are determined on the scaled data, and are NOT automatically unscaled by this class because the
datascaler does not know what model is wrapped and what kernels are used. Should hyperparameters of the model be
required, it is the responsibility of the implementation to rescale the hyperparameters. Additionally, applying
hyperpriors should anticipate for the scaled data.
"""
def __init__(self, model, domain=None, normalize_Y=False):
"""
:param model: model to be wrapped
:param domain: (default: None) if supplied, the input transform is configured from the supplied domain to
:class:`.UnitCube`. If None, the input transform defaults to the identity transform.
:param normalize_Y: (default: False) enable automatic scaling of output values to zero mean and unit
variance.
"""
# model sanity checks, slightly stronger conditions than the wrapper
super(DataScaler, self).__init__(model)
# Initial configuration of the datascaler
n_inputs = model.X.shape[1]
n_outputs = model.Y.shape[1]
self._input_transform = (domain or UnitCube(n_inputs)) >> UnitCube(n_inputs)
self._normalize_Y = normalize_Y
self._output_transform = LinearTransform(np.ones(n_outputs), np.zeros(n_outputs))
self.X = model.X.value
self.Y = model.Y.value
@property
def input_transform(self):
"""
Get the current input transform
:return: :class:`.DataTransform` input transform object
"""
return self._input_transform
@input_transform.setter
def input_transform(self, t):
"""
Configure a new input transform.
Data in the wrapped model is automatically updated with the new transform.
:param t: :class:`.DataTransform` object: the new input transform.
"""
assert isinstance(t, DataTransform)
X = self.X.value # unscales the data
self._input_transform.assign(t)
self.X = X # scales the back using the new input transform
@property
def output_transform(self):
"""
Get the current output transform
:return: :class:`.DataTransform` output transform object
"""
return self._output_transform
@output_transform.setter
def output_transform(self, t):
"""
Configure a new output transform. Data in the model is automatically updated with the new transform.
:param t: :class:`.DataTransform` object: the new output transform.
"""
assert isinstance(t, DataTransform)
Y = self.Y.value
self._output_transform.assign(t)
self.Y = Y
@property
def normalize_output(self):
"""
:return: boolean, indicating if output is automatically scaled to zero mean and unit variance.
"""
return self._normalize_Y
@normalize_output.setter
def normalize_output(self, flag):
"""
Enable/disable automated output scaling. If switched off, the output transform becomes the identity transform.
If enabled, data will be automatically scaled to zero mean and unit variance. When the output normalization is
switched on or off, the data in the model is automatically adapted.
:param flag: boolean, turn output scaling on or off
"""
self._normalize_Y = flag
if not flag:
# Output normalization turned off. Reset transform to identity
self.output_transform = LinearTransform(np.ones(self.Y.value.shape[1]), np.zeros(self.Y.value.shape[1]))
else:
# Output normalization enabled. Trigger scaling.
self.Y = self.Y.value
# Methods overwriting methods of the wrapped model.
@property
def X(self):
"""
Returns the input data of the model, unscaled.
:return: :class:`.DataHolder`: unscaled input data
"""
return DataHolder(self.input_transform.backward(self.wrapped.X.value))
@property
def Y(self):
"""
Returns the output data of the wrapped model, unscaled.
:return: :class:`.DataHolder`: unscaled output data
"""
return DataHolder(self.output_transform.backward(self.wrapped.Y.value))
@X.setter
def X(self, x):
"""
Set the input data. Applies the input transform before setting the data of the wrapped model.
"""
self.wrapped.X = self.input_transform.forward(x.value if isinstance(x, DataHolder) else x)
@Y.setter
def Y(self, y):
"""
Set the output data. In case normalize_Y=True, the appropriate output transform is updated. It is then
applied on the data before setting the data of the wrapped model.
"""
value = y.value if isinstance(y, DataHolder) else y
if self.normalize_output:
self.output_transform.assign(~LinearTransform(value.std(axis=0), value.mean(axis=0)))
self.wrapped.Y = self.output_transform.forward(value)
def build_predict(self, Xnew, full_cov=False):
"""
build_predict builds the TensorFlow graph for prediction. Similar to the method in the wrapped model, however
the input points are transformed using the input transform. The returned mean and variance are transformed
backward using the output transform.
"""
f, var = self.wrapped.build_predict(self.input_transform.build_forward(Xnew), full_cov=full_cov)
return self.output_transform.build_backward(f), self.output_transform.build_backward_variance(var)
@AutoFlow((float_type, [None, None]))
def predict_f(self, Xnew):
"""
Compute the mean and variance of held-out data at the points Xnew
"""
return self.build_predict(Xnew)
@AutoFlow((float_type, [None, None]))
def predict_f_full_cov(self, Xnew):
"""
Compute the mean and variance of held-out data at the points Xnew
"""
return self.build_predict(Xnew, full_cov=True)
@AutoFlow((float_type, [None, None]))
def predict_y(self, Xnew):
"""
Compute the mean and variance of held-out data at the points Xnew
"""
f, var = self.wrapped.build_predict(self.input_transform.build_forward(Xnew))
f, var = self.likelihood.predict_mean_and_var(f, var)
return self.output_transform.build_backward(f), self.output_transform.build_backward_variance(var)
@AutoFlow((float_type, [None, None]), (float_type, [None, None]))
def predict_density(self, Xnew, Ynew):
"""
Compute the (log) density of the data Ynew at the points Xnew
"""
mu, var = self.wrapped.build_predict(self.input_transform.build_forward(Xnew))
Ys = self.output_transform.build_forward(Ynew)
return self.likelihood.predict_density(mu, var, Ys)
|
test/test_clamp.py
|
SimlaBurcu/QPyTorch
| 172 |
93166
|
import torch
import unittest
from qtorch.quant import *
from qtorch import FixedPoint, BlockFloatingPoint, FloatingPoint
DEBUG = False
log = lambda m: print(m) if DEBUG else False
class TestStochastic(unittest.TestCase):
"""
invariant: quantized numbers cannot be greater than the maximum representable number
or lower than the maximum representable number
"""
def test_fixed(self):
"""test fixed point clamping"""
for d in ["cpu", "cuda"]:
for r in ["stochastic", "nearest"]:
wl = 5
fl = 4
t_min = -(2 ** (wl - fl - 1))
t_max = 2 ** (wl - fl - 1) - 2 ** (-fl)
a = torch.linspace(-2, 2, steps=100, device=d)
clamp_a = fixed_point_quantize(a, wl=wl, fl=fl, clamp=True, rounding=r)
self.assertEqual(t_max, clamp_a.max().item())
self.assertEqual(t_min, clamp_a.min().item())
a = torch.linspace(-2, 2, steps=100, device=d)
no_clamp_a = fixed_point_quantize(a, wl=wl, fl=fl, clamp=False, rounding=r)
self.assertLess(t_max, no_clamp_a.max().item())
self.assertGreater(t_min, no_clamp_a.min().item())
def test_float(self):
"""test floating point quantization"""
formats = [(2,2),(2,3),(3,2)]
for exp, man in formats:
for d in ["cpu", "cuda"]:
for r in ["stochastic", "nearest"]:
a_max = 2 ** (2 ** (exp - 1)) * (1 - 2 ** (-man - 1))
a_min = 2 ** (-(2 ** (exp - 1)) + 1)
max_exp=int((2**exp)/2)
min_exp=-(max_exp-2)
mantissa_step=2**(-man)
min_mantissa=mantissa_step # When denormalized
max_mantissa=2-mantissa_step # When normalized, mantissa goes from 1 to 2-mantissa_step
a_min = 2**min_exp*min_mantissa
a_max = 2**max_exp*max_mantissa
expected_vals=[]
log(f"With {exp} exponent bits, our exponent goes from {min_exp} to {max_exp}")
log(f"With {man} mantissa bits, our mantissa goes from {min_mantissa} (denormalized) to {max_mantissa}")
log(f"With {man} mantissa bits and {exp} exponent bits, we can go from {a_min} to {a_max}")
representable_normalized =[]
for sign in [1,-1]:
for e in range(0,2**exp):
for m in range(0,2**man):
if e==0:
val = sign*(2**(e+min_exp)*m*2**(-man))
log(f"{0 if sign==1 else 1} {e:0{exp}b} {m:0{man}b} = {sign} * 2^{e+min_exp} * {m*2**(-man)} \t= {val} (denormalized)")
else:
val = sign*(2**(e+min_exp-1)*(1+(m*2**(-man))))
log(f"{0 if sign==1 else 1} {e:0{exp}b} {m:0{man}b} = {sign} * 2^{e+min_exp-1} * {1+(m*2**(-man))} \t= {val}")
if val not in expected_vals:
expected_vals.append(val)
expected_vals.sort()
# Block box test to get representable numbers
import numpy as np
quant_vals=[]
for i in np.arange(-30,30,.01):
a = torch.Tensor([i]).to(device=d)
quant_a = float_quantize(a, exp=exp, man=man, rounding=r)
if quant_a[0] not in quant_vals:
quant_vals.append(quant_a[0].item())
log("Values representable in QPytorch")
log(quant_vals)
self.assertEqual(quant_vals, expected_vals)
if __name__ == "__main__":
unittest.main()
|
Maya/cicd/python/libMayaExtended/libMayaExtended/mayaSceneApi.py
|
Mu-L/Exporters
| 445 |
93180
|
<filename>Maya/cicd/python/libMayaExtended/libMayaExtended/mayaSceneApi.py
import maya.OpenMaya as OpenMaya
import maya.OpenMayaRender as OpenMayaRender
|
test.py
|
kentsommer/tensorflow-posenet
| 302 |
93209
|
# Import the converted model's class
import numpy as np
import random
import tensorflow as tf
from tensorflow.python.ops import rnn, rnn_cell
from posenet import GoogLeNet as PoseNet
import cv2
from tqdm import tqdm
import math
batch_size = 75
max_iterations = 30000
# Set this path to your project directory
path = 'path_to_project/'
# Set this path to your dataset directory
directory = 'path_to_datasets/KingsCollege/'
dataset = 'dataset_test.txt'
class datasource(object):
def __init__(self, images, poses):
self.images = images
self.poses = poses
class vecsource(object):
def __init__(self, vecs, poses):
self.vecs = vecs
self.poses = poses
def centeredCrop(img, output_side_length):
height, width, depth = img.shape
new_height = output_side_length
new_width = output_side_length
if height > width:
new_height = output_side_length * height / width
else:
new_width = output_side_length * width / height
height_offset = (new_height - output_side_length) / 2
width_offset = (new_width - output_side_length) / 2
cropped_img = img[height_offset:height_offset + output_side_length,
width_offset:width_offset + output_side_length]
return cropped_img
def preprocess(images):
images_out = [] #final result
#Resize and crop and compute mean!
images_cropped = []
for i in tqdm(range(len(images))):
X = cv2.imread(images[i])
X = cv2.resize(X, (455, 256))
X = centeredCrop(X, 224)
images_cropped.append(X)
#compute images mean
N = 0
mean = np.zeros((1, 3, 224, 224))
for X in tqdm(images_cropped):
X = np.transpose(X,(2,0,1))
mean[0][0] += X[:,:,0]
mean[0][1] += X[:,:,1]
mean[0][2] += X[:,:,2]
N += 1
mean[0] /= N
#Subtract mean from all images
for X in tqdm(images_cropped):
X = np.transpose(X,(2,0,1))
X = X - mean
X = np.squeeze(X)
X = np.transpose(X, (1,2,0))
Y = np.expand_dims(X, axis=0)
images_out.append(Y)
return images_out
def get_data():
poses = []
images = []
with open(directory+dataset) as f:
next(f) # skip the 3 header lines
next(f)
next(f)
for line in f:
fname, p0,p1,p2,p3,p4,p5,p6 = line.split()
p0 = float(p0)
p1 = float(p1)
p2 = float(p2)
p3 = float(p3)
p4 = float(p4)
p5 = float(p5)
p6 = float(p6)
poses.append((p0,p1,p2,p3,p4,p5,p6))
images.append(directory+fname)
images = preprocess(images)
return datasource(images, poses)
def gen_data(source):
while True:
indices = range(len(source.images))
random.shuffle(indices)
for i in indices:
image = source.images[i]
pose_x = source.poses[i][0:3]
pose_q = source.poses[i][3:7]
yield image, pose_x, pose_q
def gen_data_batch(source):
data_gen = gen_data(source)
while True:
image_batch = []
pose_x_batch = []
pose_q_batch = []
for _ in range(batch_size):
image, pose_x, pose_q = next(data_gen)
image_batch.append(image)
pose_x_batch.append(pose_x)
pose_q_batch.append(pose_q)
yield np.array(image_batch), np.array(pose_x_batch), np.array(pose_q_batch)
def main():
image = tf.placeholder(tf.float32, [1, 224, 224, 3])
datasource = get_data()
results = np.zeros((len(datasource.images),2))
net = PoseNet({'data': image})
p3_x = net.layers['cls3_fc_pose_xyz']
p3_q = net.layers['cls3_fc_pose_wpqr']
init = tf.initialize_all_variables()
outputFile = "PoseNet.ckpt"
saver = tf.train.Saver()
with tf.Session() as sess:
# Load the data
sess.run(init)
saver.restore(sess, path + 'PoseNet.ckpt')
data_gen = gen_data_batch(datasource)
for i in range(len(datasource.images)):
np_image = datasource.images[i]
feed = {image: np_image}
pose_q= np.asarray(datasource.poses[i][3:7])
pose_x= np.asarray(datasource.poses[i][0:3])
predicted_x, predicted_q = sess.run([p3_x, p3_q], feed_dict=feed)
pose_q = np.squeeze(pose_q)
pose_x = np.squeeze(pose_x)
predicted_q = np.squeeze(predicted_q)
predicted_x = np.squeeze(predicted_x)
#Compute Individual Sample Error
q1 = pose_q / np.linalg.norm(pose_q)
q2 = predicted_q / np.linalg.norm(predicted_q)
d = abs(np.sum(np.multiply(q1,q2)))
theta = 2 * np.arccos(d) * 180/math.pi
error_x = np.linalg.norm(pose_x-predicted_x)
results[i,:] = [error_x,theta]
print 'Iteration: ', i, ' Error XYZ (m): ', error_x, ' Error Q (degrees): ', theta
median_result = np.median(results,axis=0)
print 'Median error ', median_result[0], 'm and ', median_result[1], 'degrees.'
if __name__ == '__main__':
main()
|
gamestonk_terminal/portfolio/brokers/ally/ally_model.py
|
Aerex/GamestonkTerminal
| 1,835 |
93248
|
<filename>gamestonk_terminal/portfolio/brokers/ally/ally_model.py<gh_stars>1000+
"""Ally Model"""
__docformat__ = "numpy"
import ally
import pandas as pd
def get_holdings() -> pd.DataFrame:
"""Get holdings from Ally account in pandas df
Returns
-------
pd.DataFrame
Dataframe of positions
"""
a = ally.Ally()
return ally_positions_to_df(a.holdings(dataframe=True))
def ally_positions_to_df(df: pd.DataFrame) -> pd.DataFrame:
"""Clean up ally holdings dataframe
Parameters
----------
df : pd.DataFrame
Input dataframe of holdings
Returns
-------
pd.DataFrame
Processed holdings
"""
names = {
"costbasis": "CostBasis",
"marketvalue": "MarketValue",
"sym": "Symbol",
"qty": "Quantity",
}
df = df.loc[:, ["qty", "costbasis", "marketvalue", "sym"]]
df[["qty", "costbasis", "marketvalue"]] = df[
["qty", "costbasis", "marketvalue"]
].astype(float)
df = df.rename(columns=names)
df["PnL"] = df["MarketValue"] - df["CostBasis"]
return df
def get_history() -> pd.DataFrame:
"""Gets transaction history for the account."
Returns
-------
pd.DataFrame
Dataframe of transaction history
"""
a = ally.Ally()
return a.history(dataframe=True)
def get_balances() -> pd.DataFrame:
"""Gets balance details for the account."
Returns
-------
pd.DataFrame
Dataframe of transaction history
"""
a = ally.Ally()
return a.balances(dataframe=True)
def get_stock_quote(ticker: str) -> pd.DataFrame:
"""Gets quote for stock ticker
Parameters
----------
ticker : str
Ticker to get. Can be in form of 'tick1,tick2...'
Returns
-------
pd.DataFrame
Dataframe of ticker quote
"""
a = ally.Ally()
return a.quote(
ticker,
fields=["last", "bid", "ask", "opn", "dollar_value", "chg", "vl"],
dataframe=True,
)
def get_top_movers(list_type: str, exchange: str) -> pd.DataFrame:
"""
Gets top lists from ally Invest API. Documentation for parameters below:
https://www.ally.com/api/invest/documentation/market-toplists-get/
Parameters
----------
list_type : str
Which list to get data for
exchange : str
Which exchange to look at
Returns
-------
pd.DataFrame
DataFrame of top movers
"""
a = ally.Ally()
return a.toplists(list_type, exchange, dataframe=True)
|
src/homework/tests/cross_check/tests_crosscheck_is_checked.py
|
denkasyanov/education-backend
| 151 |
93282
|
<gh_stars>100-1000
import pytest
pytestmark = [pytest.mark.django_db]
@pytest.fixture
def answer(answers):
return answers[0]
@pytest.fixture
def crosscheck(mixer, answer, another_user):
return mixer.blend('homework.AnswerCrossCheck', answer=answer, checker=another_user)
def test_not_by_default(crosscheck):
assert crosscheck.is_checked() is False
def test_checked_if_there_are_comments_from_checker(crosscheck, mixer, another_user, answer):
mixer.blend('homework.Answer', parent=answer, author=another_user)
assert crosscheck.is_checked() is True
def test_not_checked_if_answers_are_not_children_of_the_checked_answer(crosscheck, mixer, another_user, answer):
mixer.blend('homework.Answer', author=another_user)
assert crosscheck.is_checked() is False
|
pyclustering/utils/color.py
|
JosephChataignon/pyclustering
| 1,013 |
93284
|
<gh_stars>1000+
"""!
@brief Colors used by pyclustering library for visualization.
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
"""
class color:
"""!
@brief Consists titles of colors that are used by pyclustering for visualization.
"""
@staticmethod
def get_color(sequential_index):
"""!
@brief Returns color using round robin to avoid out of range exception.
@param[in] sequential_index (uint): Index that should be converted to valid color index.
@return (uint) Color from list color.TITLES.
"""
return color.TITLES[sequential_index % len(color.TITLES)]
## List of color titles that are used by pyclustering for visualization.
TITLES = [ 'red', 'blue', 'darkgreen', 'gold', 'violet',
'deepskyblue', 'darkgrey', 'lightsalmon', 'deeppink', 'yellow',
'black', 'mediumspringgreen', 'orange', 'darkviolet', 'darkblue',
'silver', 'lime', 'pink', 'brown', 'bisque',
'dimgray', 'firebrick', 'darksalmon', 'chartreuse', 'skyblue',
'purple', 'fuchsia', 'palegoldenrod', 'coral', 'hotpink',
'gray', 'tan', 'crimson', 'teal', 'olive']
|
migrations/versions/0983f1227366_add_reported_post_count_column_to_.py
|
RobbiNespu/forget
| 157 |
93286
|
<filename>migrations/versions/0983f1227366_add_reported_post_count_column_to_.py
"""add reported post count column to account
Revision ID: 0983f1227366
Revises: <PASSWORD>
Create Date: 2017-08-03 19:16:55.883575
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0983f1227366'
down_revision = '7<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('accounts', sa.Column('reported_post_count', sa.Integer(), nullable=True))
def downgrade():
op.drop_column('accounts', 'reported_post_count')
|
packages/pegasus-python/test/db/test_trigger.py
|
ahnitz/pegasus
| 127 |
93288
|
import datetime
import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import Pegasus.db.schema as schema
from Pegasus.db.ensembles import EMError, Triggers, TriggerType
@pytest.fixture(scope="function")
def session():
"""
Create in-memory sqlite database with tables setup and return a db session
object.
"""
engine = create_engine("sqlite://")
# create all tables in the schema
schema.Base.metadata.create_all(engine)
session = sessionmaker(bind=engine)()
# create an ensemble entry
session.add(
schema.Ensemble(
name="test-ensemble",
created=datetime.datetime.now(),
updated=datetime.datetime.now(),
state="ACTIVE",
max_running=1,
max_planning=1,
username="test-user",
)
)
session.commit()
yield session
# close session, db will be released
session.close()
class TestTriggers:
def test_get_trigger(self, session):
# insert trigger
t = schema.Trigger(
_id=1,
ensemble_id=1,
name="test-trigger",
state="STOPPED",
workflow=r'{"script":"/wf.py", "args":["arg1"]}',
args=r'{"timeout":100, "interval":20}',
_type=TriggerType.CRON.value,
)
session.add(t)
triggers = Triggers(session)
expected = {
"id": 1,
"ensemble_id": 1,
"name": "test-trigger",
"state": "STOPPED",
"workflow": {"script": "/wf.py", "args": ["arg1"]},
"args": {"timeout": 100, "interval": 20},
"type": "CRON",
}
# get trigger and convert to dict for comparison
result = Triggers.get_object(triggers.get_trigger(1, "test-trigger"))
assert expected == result
def test_get_trigger_not_found(self, session):
with pytest.raises(EMError) as e:
Triggers(session).get_trigger(1, "test-trigger")
assert "No such trigger: test-trigger" in str(e)
assert e.value.status_code == 404
def test_list_triggers(self, session):
t1 = schema.Trigger(
_id=1,
ensemble_id=1,
name="test-trigger1",
state="READY",
workflow=r'{"script":"/wf.py", "args":["arg1"]}',
args=r'{"timeout":100, "interval":20}',
_type=TriggerType.CRON.value,
)
session.add(t1)
t2 = schema.Trigger(
_id=2,
ensemble_id=1,
name="test-trigger2",
state="READY",
workflow=r'{"script":"/wf.py", "args":["arg1"]}',
args=r'{"timeout":100, "interval":20}',
_type=TriggerType.CRON.value,
)
session.add(t2)
session.commit()
triggers = Triggers(session)
result = triggers.list_triggers()
assert len(result) == 2
def test_list_triggers_by_ensemble(self, session):
# add another ensemble to the ensemble table
session.add(
schema.Ensemble(
id=2,
name="test-ensemble2",
created=datetime.datetime.now(),
updated=datetime.datetime.now(),
state="ACTIVE",
max_running=1,
max_planning=1,
username="test-user",
)
)
session.commit()
# add a trigger assigned to test-ensemble2
t = schema.Trigger(
_id=1,
ensemble_id=2,
name="test-trigger1",
state="READY",
workflow=r'{"script":"/wf.py", "args":["arg1"]}',
args=r'{"timeout":100, "interval":20}',
_type=TriggerType.CRON.value,
)
session.add(t)
session.commit()
triggers = Triggers(session)
result = triggers.list_triggers_by_ensemble(
username="test-user", ensemble="test-ensemble2"
)
assert len(result) == 1
assert Triggers.get_object(result[0]) == {
"id": 1,
"ensemble_id": 2,
"name": "test-trigger1",
"state": "READY",
"workflow": {"script": "/wf.py", "args": ["arg1"]},
"args": {"timeout": 100, "interval": 20},
"type": "CRON",
}
result = triggers.list_triggers_by_ensemble(
username="test-user", ensemble="doesntexist"
)
assert len(result) == 0
def test_insert_trigger(self, session):
print(session.query(schema.Ensemble).all())
triggers = Triggers(session)
triggers.insert_trigger(
ensemble_id=1,
trigger="test-trigger",
trigger_type=TriggerType.CRON.value,
workflow_script="/wf.py",
workflow_args=["arg1"],
interval=10,
timeout=20,
)
expected = {
"id": 1,
"ensemble_id": 1,
"name": "test-trigger",
"state": "READY",
"workflow": {"script": "/wf.py", "args": ["arg1"]},
"args": {"timeout": 20, "interval": 10},
"type": "CRON",
}
result = Triggers.get_object(
session.query(schema.Trigger)
.filter_by(ensemble_id=1, name="test-trigger")
.one()
)
assert expected == result
def test_update_state(self, session):
# insert trigger
t = schema.Trigger(
_id=1,
ensemble_id=1,
name="test-trigger",
state="READY",
workflow=r'{"script":"/wf.py", "args":["arg1"]}',
args=r'{"timeout":100, "interval":20}',
_type=TriggerType.CRON.value,
)
session.add(t)
triggers = Triggers(session)
triggers.update_state(ensemble_id=1, trigger_id=1, new_state="RUNNING")
expected_state = "RUNNING"
result = session.query(schema.Trigger).filter_by(_id=1).one().state
assert expected_state == result
def test_delete_trigger(self, session):
# insert trigger
t = schema.Trigger(
_id=1,
ensemble_id=1,
name="test-trigger",
state="READY",
workflow=r'{"script":"/wf.py", "args":["arg1"]}',
args=r'{"timeout":100, "interval":20}',
_type=TriggerType.CRON.value,
)
session.add(t)
assert len(session.query(schema.Trigger).all()) == 1
triggers = Triggers(session)
# delete trigger
triggers.delete_trigger(ensemble_id=1, trigger="test-trigger")
assert len(session.query(schema.Trigger).all()) == 0
def test_get_object(self, session):
t = schema.Trigger(
_id=1,
ensemble_id=1,
name="test-trigger",
state="READY",
workflow=r'{"script":"/wf.py", "args":["arg1"]}',
args=r'{"timeout":100, "interval":20}',
_type=TriggerType.CRON.value,
)
expected = {
"id": 1,
"ensemble_id": 1,
"name": "test-trigger",
"state": "READY",
"workflow": {"script": "/wf.py", "args": ["arg1"]},
"args": {"timeout": 100, "interval": 20},
"type": "CRON",
}
result = Triggers.get_object(t)
assert expected == result
|
cactus/utils/packaging.py
|
danielchasehooper/Cactus
| 1,048 |
93290
|
<reponame>danielchasehooper/Cactus<filename>cactus/utils/packaging.py<gh_stars>1000+
import posixpath
import pkg_resources
def pkg_walk(package, top):
"""
Walk the package resources. Implementation from os.walk.
"""
names = pkg_resources.resource_listdir(package, top)
dirs, nondirs = [], []
for name in names:
# Forward slashes with pkg_resources
if pkg_resources.resource_isdir(package, posixpath.join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
yield top, dirs, nondirs
for name in dirs:
new_path = posixpath.join(top, name)
for out in pkg_walk(package, new_path):
yield out
|
SimG4CMS/Calo/python/GeometryAPD_cff.py
|
ckamtsikis/cmssw
| 852 |
93297
|
<reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
#
# Geometry master configuration
# forStandalone APD
#
# Ideal geometry, needed for simulation
from Geometry.EcalTestBeam.APDXML_cfi import *
# Calorimeters
from Geometry.CaloEventSetup.CaloTopology_cfi import *
from Geometry.CaloEventSetup.CaloGeometry_cff import *
from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import *
from Geometry.EcalMapping.EcalMapping_cfi import *
from Geometry.EcalMapping.EcalMappingRecord_cfi import *
|
data_collection/gazette/spiders/sp_sao_roque.py
|
kaiocp/querido-diario
| 454 |
93299
|
<filename>data_collection/gazette/spiders/sp_sao_roque.py
from gazette.spiders.base.instar import BaseInstarSpider
class SpSaoRoqueSpider(BaseInstarSpider):
TERRITORY_ID = "3550605"
name = "sp_sao_roque"
allowed_domains = ["saoroque.sp.gov.br"]
start_urls = ["https://www.saoroque.sp.gov.br/portal/diario-oficial"]
|
fbrp/src/fbrp/cmd/down.py
|
ali-senguel/fairo
| 669 |
93314
|
from fbrp import life_cycle
from fbrp import registrar
import argparse
@registrar.register_command("down")
class down_cmd:
@classmethod
def define_argparse(cls, parser: argparse.ArgumentParser):
parser.add_argument("proc", action="append", nargs="*")
@staticmethod
def exec(args: argparse.Namespace):
procs = life_cycle.system_state().procs.keys()
given_proc_names = args.proc[0]
if given_proc_names:
procs = set(procs) & set(given_proc_names)
for proc_name in procs:
life_cycle.set_ask(proc_name, life_cycle.Ask.DOWN)
|
examples/web.py
|
msabramo/diesel
| 224 |
93338
|
from diesel.web import DieselFlask, request
app = DieselFlask(__name__)
@app.route("/")
def hello():
name = request.args.get('name', 'world')
return "hello, %s!" % name
@app.route("/err")
def err():
a = b
return "never happens.."
if __name__ == '__main__':
import diesel
def t():
while True:
diesel.sleep(1)
print "also looping.."
app.diesel_app.add_loop(diesel.Loop(t))
app.run(debug=True)
|
gluon/gluoncv2/models/dabnet.py
|
naviocean/imgclsmob
| 2,649 |
93352
|
<reponame>naviocean/imgclsmob
"""
DABNet for image segmentation, implemented in Gluon.
Original paper: 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1907.11357.
"""
__all__ = ['DABNet', 'dabnet_cityscapes']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv3x3, conv3x3_block, ConvBlock, NormActivation, Concurrent, InterpolationBlock,\
DualPathSequential, PReLU2
class DwaConvBlock(HybridBlock):
"""
Depthwise asymmetric separable convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
kernel_size : int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
def __init__(self,
channels,
kernel_size,
strides,
padding,
dilation=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
super(DwaConvBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=(kernel_size, 1),
strides=strides,
padding=(padding, 0),
dilation=(dilation, 1),
groups=channels,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=activation)
self.conv2 = ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=(1, kernel_size),
strides=strides,
padding=(0, padding),
dilation=(1, dilation),
groups=channels,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=activation)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
def dwa_conv3x3_block(channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
3x3 version of the depthwise asymmetric separable convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
strides : int, default 1
Strides of the convolution.
padding : int, default 1
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
return DwaConvBlock(
channels=channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=activation,
**kwargs)
class DABBlock(HybridBlock):
"""
DABNet specific base block.
Parameters:
----------
channels : int
Number of input/output channels.
dilation : int
Dilation value for a dilated branch in the unit.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
channels,
dilation,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DABBlock, self).__init__(**kwargs)
mid_channels = channels // 2
with self.name_scope():
self.norm_activ1 = NormActivation(
in_channels=channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(channels)))
self.conv1 = conv3x3_block(
in_channels=channels,
out_channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(mid_channels)))
self.branches = Concurrent(stack=True)
self.branches.add(dwa_conv3x3_block(
channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(mid_channels))))
self.branches.add(dwa_conv3x3_block(
channels=mid_channels,
padding=dilation,
dilation=dilation,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(mid_channels))))
self.norm_activ2 = NormActivation(
in_channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(mid_channels)))
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=channels)
def hybrid_forward(self, F, x):
identity = x
x = self.norm_activ1(x)
x = self.conv1(x)
x = self.branches(x)
x = x.sum(axis=1)
x = self.norm_activ2(x)
x = self.conv2(x)
x = x + identity
return x
class DownBlock(HybridBlock):
"""
DABNet specific downsample block for the main branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DownBlock, self).__init__(**kwargs)
self.expand = (in_channels < out_channels)
mid_channels = out_channels - in_channels if self.expand else out_channels
with self.name_scope():
self.conv = conv3x3(
in_channels=in_channels,
out_channels=mid_channels,
strides=2)
if self.expand:
self.pool = nn.MaxPool2D(
pool_size=2,
strides=2)
self.norm_activ = NormActivation(
in_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
def hybrid_forward(self, F, x):
y = self.conv(x)
if self.expand:
z = self.pool(x)
y = F.concat(y, z, dim=1)
y = self.norm_activ(y)
return y
class DABUnit(HybridBlock):
"""
DABNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dilations : list of int
Dilations for blocks.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
dilations,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DABUnit, self).__init__(**kwargs)
mid_channels = out_channels // 2
with self.name_scope():
self.down = DownBlock(
in_channels=in_channels,
out_channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.blocks = nn.HybridSequential(prefix="")
for i, dilation in enumerate(dilations):
self.blocks.add(DABBlock(
channels=mid_channels,
dilation=dilation,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
def hybrid_forward(self, F, x):
x = self.down(x)
y = self.blocks(x)
x = F.concat(y, x, dim=1)
return x
class DABStage(HybridBlock):
"""
DABNet stage.
Parameters:
----------
x_channels : int
Number of input/output channels for x.
y_in_channels : int
Number of input channels for y.
y_out_channels : int
Number of output channels for y.
dilations : list of int
Dilations for blocks.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
x_channels,
y_in_channels,
y_out_channels,
dilations,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DABStage, self).__init__(**kwargs)
self.use_unit = (len(dilations) > 0)
with self.name_scope():
self.x_down = nn.AvgPool2D(
pool_size=3,
strides=2,
padding=1)
if self.use_unit:
self.unit = DABUnit(
in_channels=y_in_channels,
out_channels=(y_out_channels - x_channels),
dilations=dilations,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.norm_activ = NormActivation(
in_channels=y_out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(y_out_channels)))
def hybrid_forward(self, F, y, x):
x = self.x_down(x)
if self.use_unit:
y = self.unit(y)
y = F.concat(y, x, dim=1)
y = self.norm_activ(y)
return y, x
class DABInitBlock(HybridBlock):
"""
DABNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DABInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
self.conv3 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class DABNet(HybridBlock):
"""
DABNet model from 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1907.11357.
Parameters:
----------
channels : list of int
Number of output channels for each unit (for y-branch).
init_block_channels : int
Number of output channels for the initial unit.
dilations : list of list of int
Dilations for blocks.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
channels,
init_block_channels,
dilations,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
classes=19,
**kwargs):
super(DABNet, self).__init__(**kwargs)
assert (aux is not None)
assert (fixed_size is not None)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.fixed_size = fixed_size
with self.name_scope():
self.features = DualPathSequential(
return_two=False,
first_ordinals=1,
last_ordinals=0)
self.features.add(DABInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
y_in_channels = init_block_channels
for i, (y_out_channels, dilations_i) in enumerate(zip(channels, dilations)):
self.features.add(DABStage(
x_channels=in_channels,
y_in_channels=y_in_channels,
y_out_channels=y_out_channels,
dilations=dilations_i,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
y_in_channels = y_out_channels
self.classifier = conv1x1(
in_channels=y_in_channels,
out_channels=classes)
self.up = InterpolationBlock(scale_factor=8)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
y = self.features(x, x)
y = self.classifier(y)
y = self.up(y, in_size)
return y
def get_dabnet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DABNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
channels = [35, 131, 259]
dilations = [[], [2, 2, 2], [4, 4, 8, 8, 16, 16]]
bn_epsilon = 1e-3
net = DABNet(
channels=channels,
init_block_channels=init_block_channels,
dilations=dilations,
bn_epsilon=bn_epsilon,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def dabnet_cityscapes(classes=19, **kwargs):
"""
DABNet model for Cityscapes from 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1907.11357.
Parameters:
----------
classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dabnet(classes=classes, model_name="dabnet_cityscapes", **kwargs)
def _calc_width(net):
import numpy as np
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def _test():
import mxnet as mx
pretrained = False
fixed_size = True
in_size = (1024, 2048)
classes = 19
models = [
dabnet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != dabnet_cityscapes or weight_count == 756643)
batch = 4
x = mx.nd.random.normal(shape=(batch, 3, in_size[0], in_size[1]), ctx=ctx)
y = net(x)
assert (y.shape == (batch, classes, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
|
support/misc.py
|
rknop/amuse
| 131 |
93395
|
import sys
import re
import os
import fnmatch
from os import walk as py_walk
def walk(top, callback, args):
for root, dirs, files in py_walk(top):
callback(args, root, files)
def find_data_files(srcdir, destdir, *wildcards, **kw):
"""
get a list of all files under the srcdir matching wildcards,
returned in a format to be used for install_data
"""
def walk_helper(arg, dirname, files):
if '.svn' in dirname:
return
names = []
lst, wildcards, dirnameconverter, destdir = arg
for wc in wildcards:
wc_name = os.path.normpath(os.path.join(dirname, wc))
for f in files:
filename = os.path.normpath(os.path.join(dirname, f))
if fnmatch.fnmatch(filename, wc_name) and not os.path.isdir(filename):
names.append(filename)
if names:
destdirname = dirnameconverter.sub(destdir, dirname)
lst.append((destdirname, names))
file_list = []
recursive = kw.get('recursive', True)
converter = re.compile('^({0})'.format(srcdir))
if recursive:
walk(srcdir, walk_helper, (file_list, wildcards, converter, destdir))
else:
walk_helper((file_list, wildcards, converter, destdir),
srcdir,
[os.path.basename(f) for f in glob.glob(os.path.join(srcdir, '*'))])
return file_list
|
classification_training/imagenet/resnet_vovnet_darknet_train_example/test_config.py
|
LANCEREN/simpleAICV-pytorch-ImageNet-COCO-training
| 154 |
93398
|
<gh_stars>100-1000
import os
import sys
BASE_DIR = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
sys.path.append(BASE_DIR)
from tools.path import ILSVRC2012_path
from simpleAICV.classification import backbones
from simpleAICV.classification import losses
import torchvision.transforms as transforms
import torchvision.datasets as datasets
class config:
val_dataset_path = os.path.join(ILSVRC2012_path, 'val')
network = 'resnet18'
pretrained = True
num_classes = 1000
input_image_size = 224
scale = 256 / 224
model = backbones.__dict__[network](**{
'pretrained': pretrained,
'num_classes': num_classes,
})
criterion = losses.__dict__['CELoss']()
val_dataset = datasets.ImageFolder(
val_dataset_path,
transforms.Compose([
transforms.Resize(int(input_image_size * scale)),
transforms.CenterCrop(input_image_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]))
distributed = True
seed = 0
batch_size = 256
num_workers = 16
trained_model_path = ''
|
tensorflow_federated/python/core/impl/executors/data_conversions_test.py
|
zhihansh/federated-oss
| 1,918 |
93400
|
<filename>tensorflow_federated/python/core/impl/executors/data_conversions_test.py
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from tensorflow_federated.python.core.impl.executors import data_conversions
from tensorflow_federated.python.core.impl.types import placements
class DataConversionsTest(absltest.TestCase):
def test_converts_placement_keyed_to_string_keyed(self):
num_clients = 10
placement_keyed_mapping = {
placements.SERVER: 1,
placements.CLIENTS: num_clients
}
expected_string_keyed_mapping = {
placements.SERVER.uri: 1,
placements.CLIENTS.uri: num_clients
}
string_keyed_mapping = data_conversions.convert_cardinalities_dict_to_string_keyed(
placement_keyed_mapping)
self.assertEqual(string_keyed_mapping, expected_string_keyed_mapping)
def test_raises_string_keyed_mapping(self):
string_keyed_mapping = {placements.SERVER.uri: 1, placements.CLIENTS.uri: 5}
with self.assertRaises(TypeError):
data_conversions.convert_cardinalities_dict_to_string_keyed(
string_keyed_mapping)
def test_raises_non_integer_values(self):
placement_keyed_non_integer_valued_mapping = {
placements.SERVER: 1.,
placements.CLIENTS: 10.
}
with self.assertRaises(TypeError):
data_conversions.convert_cardinalities_dict_to_string_keyed(
placement_keyed_non_integer_valued_mapping)
if __name__ == '__main__':
absltest.main()
|
corehq/apps/groups/dbaccessors.py
|
dimagilg/commcare-hq
| 471 |
93402
|
<gh_stars>100-1000
from collections import namedtuple
from django.conf import settings
from corehq.apps.domain.dbaccessors import (
get_doc_ids_in_domain_by_class,
get_docs_in_domain_by_class,
)
def group_by_domain(domain):
from corehq.apps.groups.models import Group
return get_docs_in_domain_by_class(domain, Group)
def _group_by_name(domain, name, **kwargs):
from corehq.apps.groups.models import Group
return list(Group.view(
'groups/by_name',
key=[domain, name],
**kwargs
))
def group_by_name(domain, name, include_docs=True):
return _group_by_name(
domain,
name,
include_docs=include_docs,
)
def stale_group_by_name(domain, name, include_docs=True):
return _group_by_name(
domain,
name,
include_docs=include_docs,
stale=settings.COUCH_STALE_QUERY,
)
def refresh_group_views():
from corehq.apps.groups.models import Group
for view_name in [
'groups/by_name',
]:
Group.view(
view_name,
include_docs=False,
limit=1,
).fetch()
def get_group_ids_by_domain(domain):
from corehq.apps.groups.models import Group
return get_doc_ids_in_domain_by_class(domain, Group)
GroupIdName = namedtuple('GroupIdName', 'id name')
def get_group_id_name_map_by_user(user_id, limit=None):
from corehq.apps.groups.models import Group
view_results = Group.view(
'groups/by_user',
key=user_id,
include_docs=False,
limit=limit
)
return [GroupIdName(r['id'], r['value'][1]) for r in view_results]
|
tests/changes/api/test_jenkins_master_blacklist.py
|
vault-the/changes
| 443 |
93424
|
<gh_stars>100-1000
from changes.testutils import APITestCase
class JenkinsMasterBlacklist(APITestCase):
def test_add_remove_blacklist(self):
path = '/api/0/jenkins_master_blacklist/'
# Add to blacklist
data = dict(master_url='https://jenkins-master-a')
resp = self.client.post(path, data=data)
assert resp.status_code == 200
data = dict(master_url='https://jenkins-master-b')
resp = self.client.post(path, data=data)
assert resp.status_code == 200
resp = self.client.get(path)
resp.status_code == 200
result = self.unserialize(resp)
assert 'https://jenkins-master-a' in result['blacklist']
assert 'https://jenkins-master-b' in result['blacklist']
# Delete from blacklist
data = dict(master_url='https://jenkins-master-a', remove=1)
resp = self.client.post(path, data=data)
resp.status_code == 200
assert ['https://jenkins-master-b'] == self.unserialize(resp)['blacklist']
def test_re_add(self):
path = '/api/0/jenkins_master_blacklist/'
data = dict(master_url='https://jenkins-master-a')
resp = self.client.post(path, data=data)
assert resp.status_code == 200
data = dict(master_url='https://jenkins-master-a')
resp = self.client.post(path, data=data)
assert resp.status_code == 200
result = self.unserialize(resp)
assert 'warning' in result
def test_remove_missing(self):
path = '/api/0/jenkins_master_blacklist/'
data = dict(master_url='https://jenkins-master-a', remove=1)
resp = self.client.post(path, data=data)
assert resp.status_code == 200
result = self.unserialize(resp)
assert 'warning' in result
|
tools/Ubertooth/host/python/specan_ui/setup.py
|
Charmve/BLE-Security-Att-Def
| 149 |
93441
|
<filename>tools/Ubertooth/host/python/specan_ui/setup.py
#!/usr/bin/env python
"""
Specan setup
Install script for the Ubertooth spectrum analyzer tool
Usage: python setup.py install
This file is part of project Ubertooth
Copyright 2012 <NAME>
"""
from distutils.core import setup
setup(
name = "specan",
description = "A tool for reading spectrum analyzer data from an Ubertooth device",
author = "<NAME>, <NAME>, <NAME>",
url = "https://github.com/greatscottgadgets/ubertooth/",
license = "GPL",
version = '',
packages = ['specan'],
scripts = ['ubertooth-specan-ui'],
classifiers=[
'Development Status :: 5 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Programming Language :: Python',
'Operating System :: OS Independent',
],
)
|
train.py
|
xingranzh/CocosNet-v2
| 251 |
93445
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import sys
import torch
from torchvision.utils import save_image
from options.train_options import TrainOptions
import data
from util.iter_counter import IterationCounter
from util.util import print_current_errors
from util.util import mkdir
from trainers.pix2pix_trainer import Pix2PixTrainer
if __name__ == '__main__':
# parse options
opt = TrainOptions().parse()
# print options to help debugging
print(' '.join(sys.argv))
dataloader = data.create_dataloader(opt)
len_dataloader = len(dataloader)
# create tool for counting iterations
iter_counter = IterationCounter(opt, len(dataloader))
# create trainer for our model
trainer = Pix2PixTrainer(opt, resume_epoch=iter_counter.first_epoch)
save_root = os.path.join('checkpoints', opt.name, 'train')
mkdir(save_root)
for epoch in iter_counter.training_epochs():
opt.epoch = epoch
iter_counter.record_epoch_start(epoch)
for i, data_i in enumerate(dataloader, start=iter_counter.epoch_iter):
iter_counter.record_one_iteration()
# Training
# train generator
if i % opt.D_steps_per_G == 0:
trainer.run_generator_one_step(data_i)
# train discriminator
trainer.run_discriminator_one_step(data_i)
if iter_counter.needs_printing():
losses = trainer.get_latest_losses()
try:
print_current_errors(opt, epoch, iter_counter.epoch_iter,
iter_counter.epoch_iter_num, losses, iter_counter.time_per_iter)
except OSError as err:
print(err)
if iter_counter.needs_displaying():
imgs_num = data_i['label'].shape[0]
if opt.dataset_mode == 'deepfashionHD':
label = data_i['label'][:,:3,:,:]
show_size = opt.display_winsize
imgs = torch.cat((label.cpu(), data_i['ref'].cpu(), \
trainer.get_latest_generated().data.cpu(), \
data_i['image'].cpu()), 0)
try:
save_name = '%08d_%08d.png' % (epoch, iter_counter.total_steps_so_far)
save_name = os.path.join(save_root, save_name)
save_image(imgs, save_name, nrow=imgs_num, padding=0, normalize=True)
except OSError as err:
print(err)
if iter_counter.needs_saving():
print('saving the latest model (epoch %d, total_steps %d)' %
(epoch, iter_counter.total_steps_so_far))
try:
trainer.save('latest')
iter_counter.record_current_iter()
except OSError as err:
import pdb; pdb.set_trace()
print(err)
trainer.update_learning_rate(epoch)
iter_counter.record_epoch_end()
if epoch % opt.save_epoch_freq == 0 or epoch == iter_counter.total_epochs:
print('saving the model at the end of epoch %d, iters %d' %
(epoch, iter_counter.total_steps_so_far))
try:
trainer.save('latest')
trainer.save(epoch)
except OSError as err:
print(err)
print('Training was successfully finished.')
|
changes/api/project_build_index.py
|
vault-the/changes
| 443 |
93446
|
<filename>changes/api/project_build_index.py
from __future__ import absolute_import, division, unicode_literals
from flask_restful.reqparse import RequestParser
from sqlalchemy import or_
from sqlalchemy.orm import contains_eager, joinedload
from changes.api.auth import get_current_user
from changes.api.base import APIView
from changes.constants import Cause, Result
from changes.models.author import Author
from changes.models.build import Build
from changes.models.project import Project
from changes.models.source import Source
from changes.utils.phabricator_utils import (might_be_diffusion_iden,
get_hash_from_diffusion_iden)
def validate_author(author_id):
current_user = get_current_user()
if author_id == 'me' and not current_user:
raise ValueError('You are not signed in.')
return Author.find(author_id, current_user)
class ProjectBuildIndexAPIView(APIView):
get_parser = RequestParser()
get_parser.add_argument('include_patches', type=lambda x: bool(int(x)), location='args',
default=True)
get_parser.add_argument('author', type=validate_author, location='args',
dest='authors')
get_parser.add_argument('query', type=unicode, location='args')
get_parser.add_argument('source', type=unicode, location='args')
get_parser.add_argument('result', type=unicode, location='args',
choices=('failed', 'passed', 'aborted', 'unknown', ''))
get_parser.add_argument('patches_only', type=lambda x: bool(int(x)), location='args',
default=False)
get_parser.add_argument('cause', type=unicode, location='args',
choices=('unknown', 'manual', 'push', 'retry', 'snapshot', ''))
get_parser.add_argument('tag', type=unicode, action='append', location='args')
def get(self, project_id):
project = Project.get(project_id)
if not project:
return '', 404
args = self.get_parser.parse_args()
filters = []
if args.authors:
filters.append(Build.author_id.in_([a.id for a in args.authors]))
elif args.authors is not None:
return []
if args.source:
filters.append(Build.target.startswith(args.source))
# is this from the search bar
if args.query:
clauses = []
# search by revision title
clauses.append(Build.label.contains(args.query))
# search by prefix
clauses.append(Build.target.startswith(args.query))
# allows users to paste a full commit hash and still
# find the relevant build(s). Should be fine for mercurial/git,
# and svn will never have long enough strings
if len(args.query) > 12:
clauses.append(Build.target.startswith(args.query[0:12]))
# if they searched for something that looks like a phabricator
# identifier, try to find it
if might_be_diffusion_iden(args.query):
possible_hash = get_hash_from_diffusion_iden(args.query)
if possible_hash:
# the query should always be at least as long or longer than
# our commit identifiers
clauses.append(
Build.target.startswith(possible_hash[0:12]))
filters.append(or_(*clauses))
if args.result:
filters.append(Build.result == Result[args.result])
if args.cause:
filters.append(Build.cause == Cause[args.cause])
if args.tag:
tags = filter(bool, args.tag)
# Avoid empty tags, which historically are meant to mean "no tag" restriction.
if tags:
filters.append(or_(*[Build.tags.any(t) for t in tags]))
if args.patches_only:
filters.append(Source.patch_id != None) # NOQA
elif not args.include_patches:
filters.append(Source.patch_id == None) # NOQA
queryset = Build.query.options(
joinedload('project', innerjoin=True),
joinedload('author'),
contains_eager('source').joinedload('revision'),
).join(
Source, Source.id == Build.source_id,
).filter(
Build.project_id == project.id,
Source.repository_id == project.repository_id,
*filters
).order_by(Build.date_created.desc())
return self.paginate(queryset)
|
tests/functional/modules/pyi_testmod_relimp/relimp1.py
|
hawkhai/pyinstaller
| 9,267 |
93448
|
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2021, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
name = 'pyi_testmod_relimp.relimp1'
from . import relimp2 as upper # noqa: E402
from .pyi_testmod_relimp import relimp2 as lower # noqa: E402
assert upper.name == 'pyi_testmod_relimp.relimp2'
assert lower.name == 'pyi_testmod_relimp.pyi_testmod_relimp.relimp2'
if upper.__name__ == lower.__name__:
raise SystemExit("Imported the same module")
if upper.__file__ == lower.__file__:
raise SystemExit("Imported the same file")
|
examples/pipeline_sensitivity.py
|
rozlana-g/FEDOT
| 358 |
93456
|
from os import makedirs
from os.path import exists, join
from fedot.core.composer.gp_composer.gp_composer import GPComposerBuilder, GPComposerRequirements
from fedot.core.data.data import InputData
from fedot.core.data.data_split import train_test_data_setup
from fedot.core.optimisers.gp_comp.gp_optimiser import GPGraphOptimiserParameters
from fedot.core.optimisers.gp_comp.operators.inheritance import GeneticSchemeTypesEnum
from fedot.core.pipelines.node import PrimaryNode, SecondaryNode
from fedot.core.pipelines.pipeline import Pipeline
from fedot.core.repository.operation_types_repository import get_operations_for_task
from fedot.core.repository.quality_metrics_repository import ClassificationMetricsEnum, MetricsRepository, \
RegressionMetricsEnum
from fedot.core.repository.tasks import Task, TaskTypesEnum
from fedot.core.utils import default_fedot_data_dir, fedot_project_root
from fedot.sensitivity.node_sa_approaches import NodeDeletionAnalyze, NodeReplaceOperationAnalyze
from fedot.sensitivity.nodes_sensitivity import NodesAnalysis
def get_three_depth_manual_class_pipeline():
logit_node_primary = PrimaryNode('logit')
xgb_node_primary = PrimaryNode('xgboost')
xgb_node_primary_second = PrimaryNode('xgboost')
qda_node_third = SecondaryNode('qda', nodes_from=[xgb_node_primary_second])
knn_node_third = SecondaryNode('knn', nodes_from=[logit_node_primary, xgb_node_primary])
knn_root = SecondaryNode('knn', nodes_from=[qda_node_third, knn_node_third])
pipeline = Pipeline(knn_root)
return pipeline
def get_three_depth_manual_regr_pipeline():
xgb_primary = PrimaryNode('xgbreg')
knn_primary = PrimaryNode('knnreg')
dtreg_secondary = SecondaryNode('dtreg', nodes_from=[xgb_primary])
rfr_secondary = SecondaryNode('rfr', nodes_from=[knn_primary])
knnreg_root = SecondaryNode('knnreg', nodes_from=[dtreg_secondary, rfr_secondary])
pipeline = Pipeline(knnreg_root)
return pipeline
def get_composed_pipeline(dataset_to_compose, task, metric_function):
# the search of the models provided by the framework that can be used as nodes in a pipeline for the selected task
available_model_types = get_operations_for_task(task=task, mode='model')
# the choice and initialisation of the GP search
composer_requirements = GPComposerRequirements(
primary=available_model_types,
secondary=available_model_types, max_arity=3,
max_depth=3, pop_size=20, num_of_generations=20,
crossover_prob=0.8, mutation_prob=0.8)
# GP optimiser parameters choice
scheme_type = GeneticSchemeTypesEnum.steady_state
optimiser_parameters = GPGraphOptimiserParameters(genetic_scheme_type=scheme_type)
# Create builder for composer and set composer params
builder = GPComposerBuilder(task=task).with_requirements(composer_requirements).with_metrics(
metric_function).with_optimiser_parameters(optimiser_parameters)
# Create GP-based composer
composer = builder.build()
# the optimal pipeline generation by composition - the most time-consuming task
pipeline_evo_composed = composer.compose_pipeline(data=dataset_to_compose,
is_visualise=True)
return pipeline_evo_composed
def get_scoring_data():
file_path_train = 'cases/data/scoring/scoring_train.csv'
full_path_train = join(str(fedot_project_root()), file_path_train)
# a dataset for a final validation of the composed model
file_path_test = 'cases/data/scoring/scoring_test.csv'
full_path_test = join(str(fedot_project_root()), file_path_test)
task = Task(TaskTypesEnum.classification)
train = InputData.from_csv(full_path_train, task=task)
test = InputData.from_csv(full_path_test, task=task)
return train, test
def get_kc2_data():
file_path = 'cases/data/kc2/kc2.csv'
full_path = join(str(fedot_project_root()), file_path)
task = Task(TaskTypesEnum.classification)
data = InputData.from_csv(full_path, task=task)
train, test = train_test_data_setup(data)
return train, test
def get_cholesterol_data():
file_path = 'cases/data/cholesterol/cholesterol.csv'
full_path = join(str(fedot_project_root()), file_path)
task = Task(TaskTypesEnum.regression)
data = InputData.from_csv(full_path, task=task)
train, test = train_test_data_setup(data)
return train, test
def pipeline_by_task(task, metric, data, is_composed):
if is_composed:
pipeline = get_composed_pipeline(data, task,
metric_function=metric)
else:
if task.task_type.name == 'classification':
pipeline = get_three_depth_manual_class_pipeline()
else:
pipeline = get_three_depth_manual_regr_pipeline()
return pipeline
def run_analysis_case(train_data: InputData, test_data: InputData,
case_name: str, task, metric, is_composed=False, result_path=None):
pipeline = pipeline_by_task(task=task, metric=metric,
data=train_data, is_composed=is_composed)
pipeline.fit(train_data)
if not result_path:
result_path = join(default_fedot_data_dir(), 'sensitivity', f'{case_name}')
if not exists(result_path):
makedirs(result_path)
pipeline.show(path=result_path)
pipeline_analysis_result = NodesAnalysis(pipeline=pipeline, train_data=train_data,
test_data=test_data, path_to_save=result_path,
approaches=[NodeDeletionAnalyze,
NodeReplaceOperationAnalyze]).analyze()
print(f'pipeline analysis result {pipeline_analysis_result}')
def run_class_scoring_case(is_composed: bool, path_to_save=None):
train_data, test_data = get_scoring_data()
task = Task(TaskTypesEnum.classification)
# the choice of the metric for the pipeline quality assessment during composition
metric_function = MetricsRepository().metric_by_id(ClassificationMetricsEnum.ROCAUC_penalty)
if is_composed:
case = 'scoring_composed'
run_analysis_case(train_data, test_data, case, task,
metric=metric_function,
is_composed=True, result_path=path_to_save)
else:
case = 'scoring'
run_analysis_case(train_data, test_data, case, task,
metric=metric_function,
is_composed=False, result_path=path_to_save)
def run_class_kc2_case(is_composed: bool = False, path_to_save=None):
train_data, test_data = get_kc2_data()
task = Task(TaskTypesEnum.classification)
# the choice of the metric for the pipeline quality assessment during composition
metric_function = MetricsRepository().metric_by_id(ClassificationMetricsEnum.ROCAUC_penalty)
if is_composed:
case = 'kc2_composed'
run_analysis_case(train_data, test_data, case, task,
metric=metric_function,
is_composed=True, result_path=path_to_save)
else:
case = 'kc2'
run_analysis_case(train_data, test_data, case, task,
metric=metric_function,
is_composed=False, result_path=path_to_save)
def run_regr_case(is_composed: bool = False, path_to_save=None):
train_data, test_data = get_cholesterol_data()
task = Task(TaskTypesEnum.regression)
# the choice of the metric for the pipeline quality assessment during composition
metric_function = MetricsRepository().metric_by_id(RegressionMetricsEnum.RMSE)
if is_composed:
case = 'cholesterol_composed'
run_analysis_case(train_data, test_data, case, task,
metric=metric_function,
is_composed=True, result_path=path_to_save)
else:
case = 'cholesterol'
run_analysis_case(train_data, test_data, case, task,
metric=metric_function,
is_composed=False, result_path=path_to_save)
if __name__ == '__main__':
# scoring case manual
run_class_scoring_case(is_composed=False)
# kc2 case manual
run_class_kc2_case(is_composed=False)
# cholesterol regr case
run_regr_case(is_composed=False)
|
src/debugpy/_vendored/pydevd/tests_python/resources/_debugger_case15.py
|
r3m0t/debugpy
| 695 |
93471
|
class Car(object):
"""A car class"""
def __init__(self, model, make, color):
self.model = model
self.make = make
self.color = color
self.price = None
def get_price(self):
return self.price
def set_price(self, value):
self.price = value
availableCars = []
def main():
global availableCars
#Create a new car obj
carObj = Car("<NAME>", "2011", "Black")
carObj.set_price(950000) # Set price
# Add this to available cars
availableCars.append(carObj)
print('TEST SUCEEDED')
if __name__ == '__main__':
main()
|
modules/dbnd/test_dbnd/scenarios/pipelines/double_fault.py
|
ipattarapong/dbnd
| 224 |
93497
|
<gh_stars>100-1000
import logging
import sys
from dbnd import PipelineTask, PythonTask, output, parameter
logger = logging.getLogger(__name__)
class T1(PythonTask):
p1 = parameter.value("somep")
o_1 = output[str]
def run(self):
self.o_1 = self.p1
class T2(PythonTask):
p1 = parameter.value("somep")
o_1 = output[str]
def run(self):
raise Exception()
# self.o_1 = self.p1
class TPipe(PipelineTask):
o_1 = output[str]
o_2 = output[str]
def band(self):
self.o_1 = T1().o_1
self.o_2 = T2(p1=self.o_1)
if __name__ == "__main__":
TPipe(override={T1.task_version: "now"}).dbnd_run()
|
mmtbx/ions/geometry.py
|
dperl-sol/cctbx_project
| 155 |
93498
|
# -*- coding: utf-8; py-indent-offset: 2 -*-
"""
This module provides tools for examining a set of vectors and find the geometry
that best fits from a set of built in shapes.
"""
from __future__ import absolute_import, division, print_function
from scitbx.matrix import col
from collections import OrderedDict
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
from math import sqrt
from six.moves import zip
def _bond_angles(vectors):
"""
Creates a list of angles (In degrees) between all two-element combinations
in vectors.
Parameters
----------
vectors : scitbx.matrix.col
Returns
-------
list of float
"""
return [(v1, v2, v1.angle(v2, deg=True))
for index, v1 in enumerate(vectors)
for v2 in vectors[index + 1:]]
def _is_tetrahedron(vectors, dev_cutoff=20):
"""
Tetrahedrons have four vertices, with angles between all pairs of vertices
uniformly about 104.5 degrees.
Parameters
----------
vectors : list scitbx.matrix.col
dev_cutoff : float, optional
Returns
-------
bool
"""
if len(vectors) > 4 or len(vectors) < 3:
return
angles = _bond_angles(vectors)
deviation = sqrt(sum(abs(i[2] - 104.5) ** 2 for i in angles) / len(vectors))
if deviation <= dev_cutoff:
return deviation, 4 - len(vectors)
def _is_trigonal_plane(vectors, dev_cutoff=20):
"""
Triangular planar geometry has three vertices (By definition all on the same
equatorial plane). The expected angles are 120 degrees between neighboring
vertices.
Parameters
----------
vectors : list scitbx.matrix.col
dev_cutoff : float, optional
Returns
-------
bool
"""
if len(vectors) != 3:
return
angles = _bond_angles(vectors)
a_120s = []
for angle in angles:
a_120s.append(angle[2] - 120)
deviation = sqrt(sum(i ** 2 for i in a_120s) / len(angles))
if deviation <= dev_cutoff:
return deviation, 3 - len(vectors)
def _is_square_plane(vectors, dev_cutoff=20):
"""
Square planar geometry has four vertices, all on the same equatorial plane.
The expected angles are 90 degrees between neighboring vertices and 180
degrees between vertices across from one another.
Parameters
----------
vectors : list scitbx.matrix.col
dev_cutoff : float, optional
Returns
-------
bool
"""
if len(vectors) != 4:
return
angles = _bond_angles(vectors)
# Expect 2x 180 degrees and 4x 90 degrees
a_90s = []
a_180s = []
for angle in angles:
if abs(angle[2] - 90) < abs(angle[2] - 180):
a_90s.append(angle[2] - 90)
else:
a_180s.append(angle[2] - 180)
# With up to one atom missing, we must have 2 to 4 90 degree angles and 1 to 2
# 180 degree angles
if len(a_90s) < 2 or len(a_90s) > 4 or len(a_180s) < 1 or len(a_180s) > 2:
return
deviation = sqrt(sum(i ** 2 for i in a_90s + a_180s) / len(angles))
if deviation <= dev_cutoff:
return deviation, 4 - len(vectors)
def _is_square_pyramid(vectors, dev_cutoff=20):
"""
Square bipyramids have five vertices, four on the same equatorial plane with
one above. The expected angles are all either 90 degrees or 180 degrees.
Parameters
----------
vectors : list scitbx.matrix.col
dev_cutoff : float, optional
Returns
-------
bool
"""
if len(vectors) != 5:
return
angles = _bond_angles(vectors)
a_90s, a_180s = [], []
for angle in angles:
if abs(angle[2] - 90) < abs(angle[2] - 180):
a_90s.append(angle[2] - 90)
else:
a_180s.append(angle[2] - 180)
if len(a_90s) != 8 or len(a_180s) != 2:
return
deviation = sqrt(sum(i ** 2 for i in a_90s + a_180s) / len(angles))
if deviation <= dev_cutoff:
return deviation, 5 - len(vectors)
def _is_octahedron(vectors, dev_cutoff=20):
"""
Octahedrons have six vertices (Their name comes from their eight faces).
The expected angles are all either 90 degrees (Next to each other),
or 180 degrees (Across from each other).
Another name for this shape is square bipyramidal.
Parameters
----------
vectors : list scitbx.matrix.col
dev_cutoff : float, optional
Returns
-------
bool
"""
if len(vectors) != 6:
return
angles = _bond_angles(vectors)
a_90s, a_180s = [], []
for angle in angles:
if abs(angle[-1] - 90) < abs(angle[-1] - 180):
a_90s.append(angle[-1] - 90)
else:
a_180s.append(angle[-1] - 180)
if len(a_180s) > 3 or len(a_180s) < 2 or len(a_90s) < 8 or len(a_90s) > 12:
return
deviation = sqrt(sum(i ** 2 for i in a_90s + a_180s) / len(angles))
if deviation <= dev_cutoff:
return deviation, 6 - len(vectors)
def _is_trigonal_pyramid(vectors, dev_cutoff=15):
"""
Trigional pyramids have four vertices. Three vertices form a plane with
angles of 120 degrees between each pair. The last vertex resides axial
to the plane, at 90 degrees from all of the equatorial vertices.
Parameters
----------
vectors : list scitbx.matrix.col
dev_cutoff : float, optional
Returns
-------
bool
"""
if len(vectors) != 4:
return
angles = _bond_angles(vectors)
a_90s, a_120s = [], []
for angle in angles:
if abs(angle[2] - 90) < abs(angle[2] - 120):
a_90s.append(angle[2] - 90)
else:
a_120s.append(angle[2] - 120)
if len(a_90s) < 2 or len(a_90s) > 4 or len(a_120s) < 2 or len(a_120s) > 4:
return
deviation = sqrt(sum(i ** 2 for i in a_90s + a_120s) / len(angles))
if deviation <= dev_cutoff:
return deviation, 4 - len(vectors)
def _is_trigonal_bipyramid(vectors, dev_cutoff=15):
"""
Trigonal bipyramids have five vertices. Three vertices form a plane in the
middle and the angles between all three are 120 degrees. The two other
vertices reside axial to the plane, at 90 degrees from all the equatorial
vertices.
Parameters
----------
vectors : list scitbx.matrix.col
dev_cutoff : float, optional
Returns
-------
bool
"""
if len(vectors) > 5 or len(vectors) < 4:
return
angles = _bond_angles(vectors)
# Grab the two axial vectors
ax1, ax2, axial_angle = max(angles, key=lambda x: abs(x[-1]))
if axial_angle < 150:
# Missing one of the two axial vectors, just quit
return
base_to_axials = []
equatorial_angles = []
for v1, v2, angle in angles:
# Python has no boolean xor!
# Grab the angles between the two endpoints of the bipyramid and the base
if (v1 in [ax1, ax2]) != (v2 in [ax1, ax2]):
base_to_axials += angle,
elif (v1 not in [ax1, ax2]) and (v2 not in [ax1, ax2]):
equatorial_angles += angle,
deviants = [axial_angle - 180]
deviants += [i - 90 for i in base_to_axials]
deviants += [i - 120 for i in equatorial_angles]
deviation = sqrt(sum(i ** 2 for i in deviants) / len(deviants))
if deviation <= dev_cutoff:
return deviation, 5 - len(vectors)
def _is_pentagonal_bipyramid(vectors, dev_cutoff=15):
"""
Pentagonal bipyramids have seven vertices. Five vertices form a plane in the
middle and the angles between all five are 72 degrees. The two other vertices
reside axial to the plane, at 90 degrees from all the equatorial vertices.
Parameters
----------
vectors : list scitbx.matrix.col
dev_cutoff : float, optional
Returns
-------
bool
"""
if len(vectors) > 7 or len(vectors) < 6:
return
angles = _bond_angles(vectors)
# Determine which two vectors define the axial angles
axials = []
for v1 in vectors:
v_angles = []
for v2 in vectors:
if v2 != v1:
v_angles.append(v1.angle(v2, deg=True))
a_180s = len([i for i in v_angles if abs(i - 180) < 20])
a_90s = len([i for i in v_angles if abs(i - 90) < 20])
if a_180s > 0 and a_90s > 4:
axials.append(v1)
if len(axials) != 2:
# Couldn't determine axial angles
return
ax1, ax2 = axials
axial_angle = ax1.angle(ax2, deg=True)
base_to_axials = []
equatorial_angles = []
for v1, v2, angle in angles:
# Python has no boolean xor!
# Grab the angles between the two endpoints of the bipyramid and the base
if (v1 in [ax1, ax2]) != (v2 in [ax1, ax2]):
base_to_axials += angle,
elif (v1 not in [ax1, ax2]) and (v2 not in [ax1, ax2]):
equatorial_angles += angle,
deviants = [axial_angle - 180]
deviants += [i - 90 for i in base_to_axials]
deviants += [min(abs(i - 72), abs(i - 144)) for i in equatorial_angles]
deviation = sqrt(sum(i ** 2 for i in deviants) / len(deviants))
if deviation <= dev_cutoff:
return deviation, 7 - len(vectors)
def _is_trigonal_prism(vectors, dev_cutoff=15):
"""
Triangular prisms are defined by 3 vertices in a triangular pattern on two
aligned planes. Unfortunately, the angles are dependent on the length and
width of the prism. Need more examples to come up with a better way of
detecting this shape.
For now, this code is experimental.
Parameters
----------
vectors : list scitbx.matrix.col
dev_cutoff : float, optional
Returns
-------
bool
"""
if len(vectors) != 6:
return
angles = _bond_angles(vectors)
a_85s, a_135s = [], []
for angle in angles:
if abs(angle[-1] - 85) < abs(angle[-1] - 135):
a_85s.append(angle[-1] - 85)
else:
a_135s.append(angle[-1] - 135)
if len(a_85s) != 9 and len(a_135s) != 6:
return
deviation = sqrt(sum(i ** 2 for i in a_85s + a_135s) / len(angles))
if deviation < dev_cutoff:
return deviation, 6 - len(vectors)
SUPPORTED_GEOMETRIES_OLD = OrderedDict([
("tetrahedral", _is_tetrahedron),
("trigonal_planar", _is_trigonal_plane),
("square_planar", _is_square_plane),
("square_pyramidal", _is_square_pyramid),
("octahedral", _is_octahedron),
("trigonal_pyramidal", _is_trigonal_pyramid),
("trigonal_bipyramidal", _is_trigonal_bipyramid),
("pentagonal_bipyramidal", _is_pentagonal_bipyramid),
("trigonal_prism", _is_trigonal_prism),
])
def _concatenate(*args):
"""
Reduces a list of a mixture of elements and lists down to a single list.
Parameters
----------
args : tuple of (object or list of object)
Returns
-------
list
"""
lst = []
for arg in args:
if isinstance(arg, list):
for elem in arg:
lst.append(elem)
else:
lst.append(arg)
return lst
def _tetrahedron():
"""
Returns
-------
list of scitbx.matrix.col
"""
return [
col([1, 1, 1]),
col([-1, -1, 1]),
col([-1, 1, -1]),
col([1, -1, -1]),
]
def _octahedron():
"""
Returns
-------
list of scitbx.matrix.col
"""
return _bipyramid(_square_plane())
def _trigonal_plane():
"""
Returns
-------
list of scitbx.matrix.col
"""
return [
col([0, 1, 0]),
col([-sqrt(3) / 2, -1 / 2, 0]),
col([sqrt(3) / 2, -1 / 2, 0]),
]
def _square_plane():
"""
Returns
-------
list of scitbx.matrix.col
"""
return [
col([0, 1, 0]),
col([0, -1, 0]),
col([1, 0, 0]),
col([-1, 0, 0]),
]
def _pyramid(base):
"""
Returns
-------
list of scitbx.matrix.col
"""
return _concatenate(base, col([0, 0, 1]))
def _bipyramid(base):
"""
Returns
-------
list of scitbx.matrix.col
"""
return _concatenate(base, col([0, 0, 1]), col([0, 0, -1]))
def _square_pyramid():
"""
Returns
-------
list of scitbx.matrix.col
"""
return _pyramid(_square_plane())
def _square_bipyramid():
"""
Returns
-------
list of scitbx.matrix.col
"""
return _bipyramid(_square_plane())
def _trigonal_pyramid():
"""
Returns
-------
list of scitbx.matrix.col
"""
return _pyramid(_trigonal_plane())
def _trigonal_bipyramid():
"""
Returns
-------
list of scitbx.matrix.col
"""
return _bipyramid(_trigonal_plane())
def _trigonal_prism():
"""
Returns
-------
list of scitbx.matrix.col
"""
return _concatenate(
[i + col([0, 0, 1]) for i in _trigonal_plane()],
[i + col([0, 0, -1]) for i in _trigonal_plane()],
)
def _pentagon():
"""
Create a list of vectors in the shape of a planar pentagon.
Returns
-------
list of scitbx.matrix.col
See Also
--------
http://mathworld.wolfram.com/Pentagon.html
"""
c_1 = (sqrt(5) - 1) / 4
c_2 = (sqrt(5) + 1) / 4
s_1 = sqrt(10 + 2 * sqrt(5)) / 4
s_2 = sqrt(10 - 2 * sqrt(5)) / 4
return [
col([1, 0, 0]),
col([c_1, s_1, 0]),
col([-c_2, s_2, 0]),
col([-c_2, -s_2, 0]),
col([c_1, -s_1, 0]),
]
def _pentagonal_pyramid():
"""
Returns
-------
list of scitbx.matrix.col
"""
return _pyramid(_pentagon())
def _pentagonal_bipyramid():
"""
Creates a square bipyramid shape.
Returns
-------
list of scitbx.matrix.col
"""
return _bipyramid(_pentagon())
def _square_pyramid_bidentate_miss_1():
"""
Creates a square pyramid shape with one vertex replaced with a bidentate
coordination group. One vertex is missing in this shape.
Returns
-------
list of scitbx.matrix.col
"""
return _concatenate(
_square_plane(),
col([sqrt(2) / 2, sqrt(2) / 2, -1]),
col([-sqrt(2) / 2, -sqrt(2) / 2, -1]),
)
def _square_pyramid_bidentate_miss_2():
"""
Creates a square pyramid shape with one vertex replaced with a bidentate
coordination group. One vertex is missing in this shape.
Returns
-------
list of scitbx.matrix.col
"""
return [
col([0, 1, 0]),
col([0, -1, 0]),
col([-1, 0, 0]),
col([0, 0, 1]),
col([sqrt(2) / 2, sqrt(2) / 2, -1]),
col([-sqrt(2) / 2, -sqrt(2) / 2, -1]),
]
def _square_pyramid_bidentate_miss_3():
"""
Creates a square pyramid shape with one vertex replaced with a bidentate
coordination group. One vertex is missing in this shape.
Returns
-------
list of scitbx.matrix.col
"""
return [
col([0, 1, 0]),
col([0, -1, 0]),
col([-1, 0, 0]),
col([1, 0, 0]),
col([sqrt(2) / 2, sqrt(2) / 2, -1]),
col([-sqrt(2) / 2, -sqrt(2) / 2, -1]),
]
def _square_pyramid_bidentate():
"""
Creates a square pyramid shape with one vertex replaced with a bidentate
coordination group.
Returns
-------
list of scitbx.matrix.col
"""
return _concatenate(
_square_pyramid(),
col([sqrt(2) / 2, sqrt(2) / 2, -1]),
col([-sqrt(2) / 2, -sqrt(2) / 2, -1])
)
def _pentagonal_pyramid_bidentate():
"""
Creates a pentagonal pyramid shape with one vertex replaced with a bidentate
coordination group.
Returns
-------
list of scitbx.matrix.col
"""
return _concatenate(
_pentagonal_pyramid(),
col([sqrt(2) / 2, sqrt(2) / 2, -1]),
col([-sqrt(2) / 2, -sqrt(2) / 2, -1]),
)
def _pentagonal_bibidentate_miss_1():
"""
A planar pentagon with bidentate atoms coordinating directly above and
below. One vertex from the plane is missing in this shape.
Returns
-------
list of scitbx.matrix.col
"""
return _concatenate(
col([sqrt(2) / 2, sqrt(2) / 2, 1]),
col([-sqrt(2) / 2, -sqrt(2) / 2, 1]),
col([sqrt(2) / 2, sqrt(2) / 2, -1]),
col([-sqrt(2) / 2, -sqrt(2) / 2, -1]),
*_pentagon()[:-1]
)
def _pentagonal_bibidentate_miss_2():
"""
A planar pentagon with bidentate atoms coordinating directly above and
below. One vertex from the plane is missing in this shape.
Returns
-------
list of scitbx.matrix.col
"""
return _concatenate(
col([sqrt(2) / 2, sqrt(2) / 2, 1]),
col([-sqrt(2) / 2, -sqrt(2) / 2, 1]),
col([sqrt(2) / 2, sqrt(2) / 2, -1]),
col([-sqrt(2) / 2, -sqrt(2) / 2, -1]),
*_pentagon()[1:]
)
def _pentagonal_bibidentate_miss_3():
"""
A planar pentagon with bidentate atoms coordinating directly above and
below. One vertex from the plane is missing in this shape.
Returns
-------
list of scitbx.matrix.col
"""
pentagon = _pentagon()
return _concatenate(
col([sqrt(2) / 2, sqrt(2) / 2, 1]),
col([-sqrt(2) / 2, -sqrt(2) / 2, 1]),
col([sqrt(2) / 2, sqrt(2) / 2, -1]),
col([-sqrt(2) / 2, -sqrt(2) / 2, -1]),
pentagon[0],
pentagon[1],
pentagon[3],
pentagon[4],
)
def _pentagonal_bibidentate_miss_4():
"""
A planar pentagon with bidentate atoms coordinating directly above and
below. One vertex from a bidentate coordinator is missing in this shape.
Returns
-------
list of scitbx.matrix.col
"""
return _concatenate(
_pentagon(),
col([sqrt(2) / 2, sqrt(2) / 2, 1]),
col([-sqrt(2) / 2, -sqrt(2) / 2, 1]),
col([-sqrt(2) / 2, -sqrt(2) / 2, -1]),
)
def _pentagonal_bibidentate_miss_5():
"""
A planar pentagon with bidentate atoms coordinating directly above and
below. One vertex from a bidentate coordinator is missing in this shape.
Returns
-------
list of scitbx.matrix.col
"""
return _concatenate(
_pentagon(),
col([sqrt(2) / 2, sqrt(2) / 2, 1]),
col([-sqrt(2) / 2, -sqrt(2) / 2, 1]),
col([sqrt(2) / 2, sqrt(2) / 2, -1]),
)
def _pentagonal_bibidentate():
"""
A planar pentagon with bidentate atoms coordinating directly above and below.
Returns
-------
list of scitbx.matrix.col
"""
return _concatenate(
_pentagon(),
col([sqrt(2) / 2, sqrt(2) / 2, 1]),
col([-sqrt(2) / 2, -sqrt(2) / 2, 1]),
col([sqrt(2) / 2, sqrt(2) / 2, -1]),
col([-sqrt(2) / 2, -sqrt(2) / 2, -1]),
)
def _see_saw():
"""
An octahedron missing two adjacent points.
Returns
-------
list of scitbx.matrix.col
"""
return [
col([1, 0, 0]),
col([-1, 0, 0]),
col([0, 1, 0]),
col([0, 0, 1]),
]
def _three_legs():
"""
Better name? Imagine 3 orthogonal vectors pointed in the x, y, and z
directions.
Returns
-------
list of scitbx.matrix.col
"""
return [
col([1, 0, 0]),
col([0, 1, 0]),
col([0, 0, 1]),
]
SUPPORTED_GEOMETRIES = OrderedDict([
(3, [
("three_legs", _three_legs, 15),
("trigonal_plane", _trigonal_plane, 15),
]),
(4, [
("tetrahedron", _tetrahedron, 15),
("square_plane", _square_plane, 20),
("trigonal_pyramid", _trigonal_pyramid, 15),
("see_saw", _see_saw, 15),
]),
(5, [
("square_pyramid", _square_pyramid, 15),
("trigonal_bipyramid", _trigonal_bipyramid, 15),
("pentagon", _pentagon, 15)
]),
(6, [
("octahedron", _octahedron, 15),
("trigonal_prism", _trigonal_prism, 15),
("pentagonal_pyramid", _pentagonal_pyramid, 15),
("square_pyramid_bidentate_miss", [_square_pyramid_bidentate_miss_1,
_square_pyramid_bidentate_miss_2,
_square_pyramid_bidentate_miss_3], 15),
]),
(7, [
("pentagonal_bipyramid", _pentagonal_bipyramid, 15),
("square_pyramid_bidentate", _square_pyramid_bidentate, 15),
]),
(8, [
("pentagonal_pyramid_bidentate", _pentagonal_pyramid_bidentate, 15),
("pentagonal_bibidentate_miss", [_pentagonal_bibidentate_miss_1,
_pentagonal_bibidentate_miss_2,
_pentagonal_bibidentate_miss_3], 15),
]),
(9, [
("pentagonal_bibidentate", _pentagonal_bibidentate, 15),
]),
])
SUPPORTED_GEOMETRY_NAMES = \
[lst_i[0] for vals in SUPPORTED_GEOMETRIES.values() for lst_i in vals]
def _angles_deviation(vectors_a, vectors_b):
"""
Calculates the root mean square of the angle deviation (in degrees) between
two lists of vectors.
Parameters
----------
vectors_a : list of scitbx.matrix.col
vectors_b : list of scitbx.matrix.col
Returns
-------
float
"""
assert len(vectors_a) == len(vectors_b)
angles_a = [vec.angle(vec_o, deg=True)
for index, vec in enumerate(vectors_a)
for vec_o in vectors_a[index + 1:]]
angles_b = [vec.angle(vec_o, deg=True)
for index, vec in enumerate(vectors_b)
for vec_o in vectors_b[index + 1:]]
angles_a.sort()
angles_b.sort()
angle_deviation = sqrt(sum((i - j) ** 2 for i, j in zip(angles_a, angles_b))
/ len(angles_a))
return angle_deviation
def find_coordination_geometry(nearby_atoms, minimizer_method=False,
cutoff=2.9):
"""
Searches through a list of geometries to find those that fit nearby_atom.
Geometries are recognized by generating a list of all combinations of angles
between the vectors and comparing them against the angles among the vectors
of the ideal geometry.
Parameters
----------
nearby_atoms: list of mmtbx.ions.environment.atom_contact
A list of atom contacts, indicating the vertices of the shape to be
recognized.
minimizer_method: bool, optional
Optional parameter to use the new, more efficient version of geometry
recognition. The old method will be depreciated in later versions of
cctbx.
cutoff: float, optional
A cutoff distance, past which vectors are not included in geometry
calculations.
Returns
-------
list of tuples of str, float
A list of found geometries. Each tuple contains the name of the geometry
in string form followed by the deviation from ideal angles.
See Also
--------
mmtbx.ions.geometry.SUPPORTED_GEOMETRY_NAMES,
mmtbx.ions.geometry.SUPPORTED_GEOMETRIES_OLD
"""
# Filter out overlapping atoms, we just want an idea of the coordinating
# geometry, even if it is two different atoms are occupying the same spot.
non_overlapping = []
for index, contact in enumerate(nearby_atoms):
if all(contact.distance_from(other) > 0.5
for other in nearby_atoms[index + 1:]):
non_overlapping.append(contact)
# Filter out contacts > cutoff away
filtered = []
for contact in non_overlapping:
if contact.distance() < cutoff:
filtered.append(contact.vector)
geometries = []
if minimizer_method:
n_vectors = len(filtered)
if n_vectors not in SUPPORTED_GEOMETRIES:
return geometries
for name, func, rmsa_cutoff in SUPPORTED_GEOMETRIES[n_vectors]:
if isinstance(func, Iterable):
rmsa = min(_angles_deviation(filtered, i()) for i in func)
else:
rmsa = _angles_deviation(filtered, func())
if rmsa < rmsa_cutoff:
geometries.append((name, rmsa))
if geometries:
geometries.sort(key=lambda x: x[-1])
geometries = [geometries[0]]
else:
for name, func in SUPPORTED_GEOMETRIES_OLD.items():
val = func(filtered)
if val:
deviation = val[0]
geometries.append((name, deviation))
return geometries
|
mcsniperpy/util/ping_tester.py
|
veil-ctf/MCsniperPY
| 1,320 |
93527
|
# type: ignore
# ^ that's necessary to prevent a false linting error of some kind
import asyncio
import urllib.parse
from time import perf_counter
import typer
from mcsniperpy.util.logs_manager import Color as color
from mcsniperpy.util.logs_manager import Logger as log
async def check(url: str, iterations: int):
async def ping():
try:
uri = urllib.parse.urlparse(url)
reader, writer = await asyncio.open_connection(uri.hostname, 443, ssl=False)
writer.write(
f"GET {uri.path or '/'} HTTP/1.1\r\nHost:{uri.hostname}\r\n\r\n".encode()
)
start = perf_counter()
await writer.drain()
_ = await reader.read(100)
end = perf_counter()
return round((end - start) * 1000)
# pylint: disable=invalid-name, broad-except
except Exception as e:
log.error("Failed to connect to URL. error code: " + str(e))
pings = []
with typer.progressbar(
range(iterations),
fill_char="█",
empty_char=" ",
color=10,
show_eta=False,
bar_template="%(label)s %(bar)s %(info)s",
) as progress:
for _ in progress:
pings.append(await ping())
await asyncio.sleep(0.01)
print()
log.info(f"Host {color.l_cyan}» {color.blue}{urllib.parse.urlparse(url).hostname}")
log.info(f"Ping {color.l_cyan}» {color.blue}{sum(pings) / 5}ms")
async def ping_test(iterations):
print()
await check("https://api.minecraftservices.com/minecraft", iterations)
|
create-template-script/createtemplatefile.py
|
TechnicalConsultant123/material-design-icons-adt-template
| 352 |
93535
|
import os
from os.path import join
from collections import OrderedDict
templateFile = open('../MaterialDesignIcons/template.xml', 'w')
file1 = open('part1.txt', 'r')
for line in file1:
templateFile.write(line),
file1.close()
icons_path = "../MaterialDesignIcons/root/material-design-icons"
walk = os.listdir(icons_path)
onlydirs = [f for f in walk if os.path.isdir(join(icons_path, f))]
# This line may be optional - if get error "ValueError: list.remove(x): x not in list" then comment it out
# onlydirs.remove(".git")
onlydirs.remove("sprites")
for category in onlydirs:
print category
walkdir = os.listdir(icons_path + '/' + category + "/svg/design")
walkdirremovedsvgbit = []
for dir2 in walkdir:
walkdirremovedsvgbit.append(dir2[:-9])
walkdirset = list(OrderedDict.fromkeys(walkdirremovedsvgbit))
for image_name in walkdirset:
templateFile.write(
' <option id="' + category + "/" + image_name + '">' + category + "/" + image_name + '</option>\n')
file2 = open('part2.txt', 'r')
for line in file2:
templateFile.write(line),
file2.close()
for color in ["black", "grey600", "white"]:
for category in onlydirs:
print category
walkdir = os.listdir(icons_path + '/' + category + "/svg/design")
walkdirremovedsvgbit = []
for dir2 in walkdir:
walkdirremovedsvgbit.append(dir2[:-9])
walkdirset = list(OrderedDict.fromkeys(walkdirremovedsvgbit))
for image_name in walkdirset:
templateFile.write(
' <thumb color="' + color + '" asset="' + category + "/" + image_name + '">root/material-design-icons/' + category + '/drawable-xxxhdpi/' + image_name + '_' + color + '_48dp.png</thumb>\n')
file3 = open('part3.txt', 'r')
for line in file3:
templateFile.write(line),
file3.close()
templateFile.close()
|
testing/example.py
|
gndctrl2mjrtm/dkeras
| 195 |
93559
|
# Ideas
"""
server = dkeras.DataServer()
model1.link(model3)
model1.postprocess = lambda z: np.float16(z)
server = model1 + model2 + model3
server.add(camera1, dest=('m1', 'm2'))
server.add_address('192.168.1.42')
"""
|
tests/isolated/green_ssl_py36_properties.py
|
li-caspar/eventlet_0.30.2
| 5,079 |
93570
|
<gh_stars>1000+
__test__ = False
if __name__ == '__main__':
import eventlet
eventlet.monkey_patch()
try:
eventlet.wrap_ssl(
eventlet.listen(('localhost', 0)),
certfile='does-not-exist',
keyfile='does-not-exist',
server_side=True)
except IOError as ex:
assert ex.errno == 2
print('pass')
|
problog/version.py
|
HEmile/problog
| 189 |
93587
|
<gh_stars>100-1000
version = '2.2.2'
|
exp/comm/comm_model_utils.py
|
PeterouZh/CIPS-3D
| 308 |
93588
|
<reponame>PeterouZh/CIPS-3D
import logging
import random
import numpy as np
import math
import torch
import torch.nn as nn
class PosEmbedding(nn.Module):
def __init__(self,
max_logscale,
N_freqs,
logscale=True,
multi_pi=False,):
"""
Defines a function that embeds x to (x, sin(2^k x), cos(2^k x), ...)
"""
super().__init__()
self.N_freqs = N_freqs
self.funcs = [torch.sin, torch.cos]
if logscale:
self.freqs = 2 ** torch.linspace(0, max_logscale, N_freqs)
else:
self.freqs = torch.linspace(1, 2 ** max_logscale, N_freqs)
if multi_pi:
self.freqs = self.freqs * math.pi
pass
def get_out_dim(self):
outdim = 3 + 3 * 2 * self.N_freqs
return outdim
def forward(self, x):
"""
Inputs:
x: (B, 3)
Outputs:
out: (B, 6*N_freqs+3)
"""
out = [x]
for freq in self.freqs:
for func in self.funcs:
out += [func(freq * x)]
return torch.cat(out, -1)
class EMA(object):
def __init__(self,
source,
target,
decay=0.9999,
start_itr=0):
"""
# Simple wrapper that applies EMA to a model. Could be better done in 1.0 using
# the parameters() and buffers() module functions, but for now this works
# with state_dicts using .copy_
:param source: model
:param target: ema model
:param decay:
:param start_itr:
"""
self.source = source
self.target = target
self.decay = decay
# Optional parameter indicating what iteration to start the decay at
self.start_itr = start_itr
logger = logging.getLogger('tl')
# Initialize target's params to be source's
self.source_dict = self.source.state_dict()
self.target_dict = self.target.state_dict()
logger.info(f'Initializing EMA [{decay}] parameters to be source parameters...')
self.update_target_dict(source_state_dict=self.source_dict)
pass
def update_target_dict(self, source_state_dict):
"""
Reset the ema model weights.
:param source_state_dict:
:return:
"""
with torch.no_grad():
for key in source_state_dict:
self.target_dict[key].data.copy_(source_state_dict[key].data)
# target_dict[key].data = source_dict[key].data # Doesn't work!
pass
def update(self, itr=None, source_dict=None):
"""
# If an iteration counter is provided and itr is less than the start itr,
# peg the ema weights to the underlying weights.
:param itr:
:return:
"""
if itr is not None and itr < self.start_itr:
# decay = 0.0
return
else:
decay = self.decay
with torch.no_grad():
if source_dict is None:
source_dict = self.source_dict
for key in source_dict:
self.target_dict[key].data.copy_(
self.target_dict[key].data * decay + source_dict[key].data * (1 - decay))
pass
|
lldb/test/API/python_api/frame/inlines/TestInlinedFrame.py
|
mkinsner/llvm
| 2,338 |
93603
|
"""
Testlldb Python SBFrame APIs IsInlined() and GetFunctionName().
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class InlinedFrameAPITestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to of function 'c'.
self.source = 'inlines.c'
self.first_stop = line_number(
self.source, '// This should correspond to the first break stop.')
self.second_stop = line_number(
self.source, '// This should correspond to the second break stop.')
def test_stop_at_outer_inline(self):
"""Exercise SBFrame.IsInlined() and SBFrame.GetFunctionName()."""
self.build()
exe = self.getBuildArtifact("a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Now create a breakpoint on main.c by the name of 'inner_inline'.
breakpoint = target.BreakpointCreateByName('inner_inline', 'a.out')
self.trace("breakpoint:", breakpoint)
self.assertTrue(breakpoint and
breakpoint.GetNumLocations() > 1,
VALID_BREAKPOINT)
# Now launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
process = target.GetProcess()
self.assertEqual(process.GetState(), lldb.eStateStopped,
PROCESS_STOPPED)
import lldbsuite.test.lldbutil as lldbutil
stack_traces1 = lldbutil.print_stacktraces(process, string_buffer=True)
if self.TraceOn():
print(
"Full stack traces when first stopped on the breakpoint 'inner_inline':")
print(stack_traces1)
# The first breakpoint should correspond to an inlined call frame.
# If it's an inlined call frame, expect to find, in the stack trace,
# that there is a frame which corresponds to the following call site:
#
# outer_inline (argc);
#
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
self.assertIsNotNone(thread)
frame0 = thread.GetFrameAtIndex(0)
if frame0.IsInlined():
filename = frame0.GetLineEntry().GetFileSpec().GetFilename()
self.assertEqual(filename, self.source)
self.expect(
stack_traces1, "First stop at %s:%d" %
(self.source, self.first_stop), exe=False, substrs=[
'%s:%d' %
(self.source, self.first_stop)])
# Expect to break again for the second time.
process.Continue()
self.assertEqual(process.GetState(), lldb.eStateStopped,
PROCESS_STOPPED)
stack_traces2 = lldbutil.print_stacktraces(
process, string_buffer=True)
if self.TraceOn():
print(
"Full stack traces when stopped on the breakpoint 'inner_inline' for the second time:")
print(stack_traces2)
self.expect(
stack_traces2, "Second stop at %s:%d" %
(self.source, self.second_stop), exe=False, substrs=[
'%s:%d' %
(self.source, self.second_stop)])
|
trackstats/trackers.py
|
keranno/django-trackstats
| 369 |
93640
|
from datetime import date, timedelta, datetime, time
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.db import models
from django.db import connections
from django.utils import timezone
from .models import Period, StatisticByDate, StatisticByDateAndObject
class ObjectsByDateTracker(object):
date_field = 'date'
aggr_op = None
metric = None
period = None
statistic_model = StatisticByDate
def __init__(self, **kwargs):
for prop, val in kwargs.items():
setattr(self, prop, val)
def get_most_recent_kwargs(self):
most_recent_kwargs = {
'metric': self.metric,
'period': self.period}
return most_recent_kwargs
def get_start_date(self, qs):
most_recent_kwargs = self.get_most_recent_kwargs()
last_stat = self.statistic_model.objects.most_recent(
**most_recent_kwargs)
if last_stat:
start_date = last_stat.date
else:
first_instance = qs.order_by(self.date_field).first()
if first_instance is None:
# No data
return
start_date = getattr(first_instance, self.date_field)
if start_date and isinstance(start_date, datetime):
if timezone.is_aware(start_date):
start_date = timezone.make_naive(start_date).date()
else:
start_date = start_date.date()
return start_date
def track_lifetime_upto(self, qs, upto_date):
filter_kwargs = {
self.date_field + '__date__lte': upto_date
}
n = qs.filter(**filter_kwargs).count()
self.statistic_model.objects.record(
metric=self.metric,
value=n,
period=self.period,
date=upto_date)
def get_track_values(self):
return []
def get_record_kwargs(self, val):
return {}
def track(self, qs):
to_date = date.today()
start_date = self.get_start_date(qs)
if not start_date:
return
if self.period == Period.LIFETIME:
# Intentionally recompute last stat, as we may have computed
# that the last time when the day was not over yet.
upto_date = start_date
while upto_date <= to_date:
self.track_lifetime_upto(qs, upto_date)
upto_date += timedelta(days=1)
elif self.period == Period.DAY:
values_fields = ['ts_date'] + self.get_track_values()
connection = connections[qs.db]
tzname = (
timezone.get_current_timezone_name()
if settings.USE_TZ else None)
is_datetime = isinstance(qs.model._meta.get_field(
self.date_field), models.DateTimeField)
if is_datetime:
date_sql = connection.ops.datetime_cast_date_sql(
self.date_field,
tzname)
# before django 2.0 it returns a tuple
if isinstance(date_sql, tuple):
vals = qs.extra(
select={"ts_date": date_sql[0]},
select_params=date_sql[1])
else:
vals = qs.extra(select={"ts_date": date_sql})
start_dt = datetime.combine(
start_date, time()) - timedelta(days=1)
if tzname:
start_dt = timezone.make_aware(
start_dt,
timezone.get_current_timezone())
else:
vals = qs.extra(select={"ts_date": self.date_field})
start_dt = start_date
vals = vals.filter(
**{self.date_field + '__gte': start_dt}).values(
*values_fields).order_by().annotate(ts_n=self.aggr_op)
# TODO: Bulk create
for val in vals:
self.statistic_model.objects.record(
metric=self.metric,
value=val['ts_n'],
date=val['ts_date'],
period=self.period,
**self.get_record_kwargs(val))
else:
raise NotImplementedError
class ObjectsByDateAndObjectTracker(ObjectsByDateTracker):
object = None
object_model = None
object_field = None
statistic_model = StatisticByDateAndObject
def __init__(self, **kwargs):
super(ObjectsByDateAndObjectTracker, self).__init__(**kwargs)
assert self.object is None or self.object_field is None
assert self.object or self.object_field
def get_most_recent_kwargs(self):
kwargs = super(
ObjectsByDateAndObjectTracker, self).get_most_recent_kwargs()
if self.object_model:
kwargs['object_type'] = ContentType.objects.get_for_model(
self.object_model)
else:
kwargs['object'] = self.object
return kwargs
def track_lifetime_upto(self, qs, upto_date):
filter_kwargs = {
self.date_field + '__date__lte': upto_date
}
if self.object_model:
vals = qs.filter(**filter_kwargs).values(
self.object_field).annotate(ts_n=self.aggr_op)
for val in vals:
object = self.object_model(
pk=val[self.object_field])
# TODO: Bulk create
StatisticByDateAndObject.objects.record(
metric=self.metric,
value=val['ts_n'],
date=upto_date,
object=object,
period=self.period)
else:
n = qs.filter(**filter_kwargs).count()
StatisticByDateAndObject.objects.record(
metric=self.metric,
value=n,
object=self.object,
period=self.period,
date=upto_date)
def get_track_values(self):
ret = super(ObjectsByDateAndObjectTracker, self).get_track_values()
if self.object_model:
ret.append(self.object_field)
return ret
def get_record_kwargs(self, val):
if self.object_model:
object = self.object_model(pk=val[self.object_field])
else:
object = self.object
return {'object': object}
class CountObjectsByDateTracker(ObjectsByDateTracker):
aggr_op = models.Count('pk', distinct=True)
class CountObjectsByDateAndObjectTracker(ObjectsByDateAndObjectTracker):
aggr_op = models.Count('pk', distinct=True)
|
harness/determined/deploy/aws/gen_vcpu_mapping.py
|
gh-determined-ai/determined
| 1,729 |
93654
|
<gh_stars>1000+
import argparse
import json
from pathlib import Path
from typing import Dict, Iterable, List, Tuple
import boto3
import yaml
def _fetch_vcpu_mapping() -> Iterable[Tuple[str, Dict]]:
# Price List api is only available in us-east-1 and ap-southeast-1.
client = boto3.client("pricing", region_name="us-east-1")
for page in client.get_paginator("get_products").paginate(ServiceCode="AmazonEC2"):
for sku_str in page["PriceList"]:
sku_data = json.loads(sku_str)
try:
attributes = sku_data["product"]["attributes"]
data = {
"instanceType": attributes["instanceType"],
"vcpu": int(attributes["vcpu"]),
}
if "gpu" in attributes:
data["gpu"] = int(attributes["gpu"])
yield (attributes["instanceType"], data)
except KeyError:
pass
def fetch_vcpu_mapping() -> List[Dict]:
return [v for (_, v) in sorted(dict(_fetch_vcpu_mapping()).items())]
def main(args: argparse.Namespace) -> None:
data = fetch_vcpu_mapping()
with args.output_fn.open("w") as fout:
yaml.safe_dump(data, fout)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("output_fn", type=Path, help="output filename")
args = parser.parse_args()
main(args)
|
semseg/models/heads/__init__.py
|
Genevievekim/semantic-segmentation-1
| 196 |
93662
|
<filename>semseg/models/heads/__init__.py
from .upernet import UPerHead
from .segformer import SegFormerHead
from .sfnet import SFHead
from .fpn import FPNHead
from .fapn import FaPNHead
from .fcn import FCNHead
from .condnet import CondHead
__all__ = ['UPerHead', 'SegFormerHead', 'SFHead', 'FPNHead', 'FaPNHead', 'FCNHead', 'CondHead']
|
code/exercises/ex_8_8/update_distribution_example.py
|
ElliotMunro200/reinforcement_learning_an_introduction
| 234 |
93673
|
<reponame>ElliotMunro200/reinforcement_learning_an_introduction
#!/usr/bin/env python
"""
--------------------------------
project: code
created: 12/07/2018 10:34
---------------------------------
"""
import itertools
from concurrent.futures import ProcessPoolExecutor
import numpy as np
from generic import policies
class Model:
def __init__(self, n_states, n_actions_per_state, branching_factor, random_state=None):
self.n_states = n_states
self.n_actions_per_state = n_actions_per_state
self.branching_factor = branching_factor
self.random_state = random_state or np.random.RandomState(seed=0)
self.states = np.arange(n_states)
self.actions = np.arange(n_actions_per_state)
self.start_state = 0
self.terminal_state = n_states - 1
self.p_terminate = 0.1
self.non_terminal_states = np.array([s for s in self.states if s != self.terminal_state])
self.current_state = self.start_state
self._successor_table = dict()
for a in self.actions:
self._successor_table[self.terminal_state, a] = []
self._expected_reward_table = self.random_state.normal(
loc=0.,
scale=1.,
size=(n_states - 1, n_actions_per_state)
)
def reset(self):
self.current_state = self.start_state
return
def step(self, action):
if self.random_state.binomial(1, self.p_terminate) == 1:
return self.terminal_state, self.expected_reward(self.current_state, action), True
else:
new_state = self.random_state.choice(self.successors(self.current_state, action))
result = new_state, self.expected_reward(self.current_state, action), new_state == self.terminal_state
self.current_state = new_state
return result
def successors(self, state, action):
if state == self.terminal_state:
return []
if (state, action) not in self._successor_table:
self._successor_table[state, action] = self.random_state.choice(
self.states,
size=self.branching_factor
)
return self._successor_table[state, action]
def expected_reward(self, state, action):
return self._expected_reward_table[state, action]
def initial_action_values(self, v):
avs = {
s: {a: v for a in self.actions} for s in self.states
}
avs[self.terminal_state] = {a: 0. for a in self.actions}
return avs
class UniformUpdateAgent:
def __init__(self, model, gamma):
self.model = model
self.gamma = gamma
def starting_state_value_estimates(self, n_updates, tolerance, policy_eval_iter):
av = self.model.initial_action_values(0.)
starting_state_values = [0.]
i = 0
while i < n_updates:
for state, action in itertools.product(self.model.non_terminal_states, self.model.actions):
av[state][action] = expected_update(
model=self.model,
action_values=av,
state=state,
action=action,
gamma=self.gamma
)
if i % policy_eval_iter == 0 and i != 0:
vals = policy_evaluation(
policy=policies.GreedyPolicy(av, cache=True),
model=self.model,
tolerance=tolerance,
gamma=self.gamma
)
starting_state_values.append(vals[self.model.start_state])
i += 1
if i == n_updates:
break
return np.array(starting_state_values)
class OnPolicyUpdateAgent:
def __init__(self, model, gamma):
self.model = model
self.gamma = gamma
def starting_state_value_estimates(self, n_updates, tolerance, policy_eval_iter):
starting_state_values = [0.]
policy = policies.EpsilonGreedyPolicy(
action_values=self.model.initial_action_values(0.),
epsilon=0.1,
random_state=np.random.RandomState(seed=0)
)
self.model.reset()
i = 0
while i < n_updates:
state = self.model.current_state
action = policy(state)
_, _, done = self.model.step(action)
policy.action_values[state][action] = expected_update(
model=self.model,
action_values=policy.action_values,
state=state,
action=action,
gamma=self.gamma
)
if i % policy_eval_iter == 0 and i != 0:
vals = policy_evaluation(
policy=policies.GreedyPolicy(policy.action_values, cache=True),
model=self.model,
tolerance=tolerance,
gamma=self.gamma
)
starting_state_values.append(vals[self.model.start_state])
if done:
self.model.reset()
i += 1
return np.array(starting_state_values)
def run_task(agent_class, n_states, branching_factor, n_actions_per_state, gamma,
n_iters, n_updates_per_task, tolerance, policy_eval_iter):
agents = [
agent_class(
model=Model(
n_states=n_states,
branching_factor=branching_factor,
n_actions_per_state=n_actions_per_state,
random_state=np.random.RandomState(seed=seed),
),
gamma=gamma
) for seed in range(n_iters)
]
futures = list()
with ProcessPoolExecutor() as executor:
for agent in agents:
futures.append(
executor.submit(
agent.starting_state_value_estimates,
n_updates_per_task,
tolerance,
policy_eval_iter
)
)
return np.column_stack([future.result() for future in futures])
def expected_update(model, action_values, state, action, gamma):
p = (1 - model.p_terminate) / model.branching_factor
return (
model.expected_reward(state, action)
+ gamma * p * sum(max(action_values[s].values()) for s in model.successors(state, action))
)
def policy_evaluation(policy, model, tolerance, gamma, max_updates=10 ** 5):
"""for deterministic policies only"""
values = np.array(
[max(policy.action_values[s].values()) if s != model.terminal_state else 0. for s in model.states]
)
delta = 0.
i = 0
while True:
old_values = values
for s in model.non_terminal_states:
a = policy(s)
values[s] = (
model.expected_reward(s, a)
+ gamma * ((1 - model.p_terminate) / model.branching_factor)
* sum(values[ns] for ns in model.successors(s, a))
)
i += 1
if i == max_updates:
return values
delta = max(delta, np.max(np.abs(values - old_values)))
if delta < tolerance:
return values
if __name__ == "__main__":
import pickle
import time
import os
import matplotlib; matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import plotting
import constants as c
from exercises import utils as ex_utils
start = time.time()
n_states = 1000
n_updates_per_task = 20 * n_states
policy_eval_iter = n_updates_per_task / 40
n_iters = 200
branching_factor = 1
uniform_update_output = run_task(
agent_class=UniformUpdateAgent,
n_states=n_states,
n_actions_per_state=2,
branching_factor=branching_factor,
n_iters=n_iters,
n_updates_per_task=n_updates_per_task,
tolerance=0.01,
gamma=0.9,
policy_eval_iter=policy_eval_iter
)
on_policy_update_output = run_task(
agent_class=OnPolicyUpdateAgent,
n_states=n_states,
n_actions_per_state=2,
branching_factor=branching_factor,
n_iters=n_iters,
n_updates_per_task=n_updates_per_task,
tolerance=0.01,
gamma=0.9,
policy_eval_iter=policy_eval_iter
)
ex_utils.to_pickle(
uniform_update_output,
os.path.join(
c.Paths.output,
'ex_8_8',
f'uniform_output_{n_states}_{branching_factor}.pkl'
)
)
ex_utils.to_pickle(
on_policy_update_output,
os.path.join(
c.Paths.output,
'ex_8_8',
f'on_policy_output_{n_states}_{branching_factor}.pkl'
)
)
def plot_output(ax, output, color, label):
x = np.arange(output.shape[0]) * policy_eval_iter
ax.plot(x, output, alpha=0.05, color=color, lw=1, label=None)
ax.plot(x, np.mean(output, axis=1), color=color, lw=2, label=label)
ax.plot(x, np.mean(output, axis=1), color='k', lw=5, label=None, zorder=-1)
return
with plt.rc_context(plotting.rc()):
fig, ax = plt.subplots(1)
plot_output(ax, uniform_update_output, color='C3', label="Uniform")
plot_output(ax, on_policy_update_output, color='C2', label="On-Policy")
ax.grid(alpha=0.1)
ax.set_title(
f"Comparison of update distributions for tasks with {n_states} states and $b=${branching_factor}",
)
ax.set_ylabel("Value of initial state")
ax.set_xlabel("Number of updates")
ax.legend()
plotting.savefig(
fig=fig,
path=os.path.join(
c.Paths.output,
'ex_8_8',
f'update_distribution_comparison_{n_states}_{branching_factor}.png'
)
)
print('ready')
print('took', time.time() - start, 'seconds')
plt.show()
|
saleor/account/migrations/0009_auto_20170206_0407.py
|
elwoodxblues/saleor
| 15,337 |
93679
|
<reponame>elwoodxblues/saleor<gh_stars>1000+
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-06 10:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("account", "0008_auto_20161115_1011")]
replaces = [("userprofile", "0009_auto_20170206_0407")]
operations = [
migrations.AlterModelOptions(
name="address",
options={"verbose_name": "address", "verbose_name_plural": "addresses"},
),
migrations.AlterModelOptions(
name="user",
options={"verbose_name": "user", "verbose_name_plural": "users"},
),
migrations.AlterField(
model_name="user",
name="addresses",
field=models.ManyToManyField(
blank=True, to="account.Address", verbose_name="addresses"
),
),
migrations.AlterField(
model_name="user",
name="email",
field=models.EmailField(max_length=254, unique=True, verbose_name="email"),
),
]
|
venv/lib/python3.8/site-packages/restructuredtext_lint/__init__.py
|
trkohler/biopython
| 142 |
93680
|
# Load in our dependencies
from __future__ import absolute_import
from restructuredtext_lint.lint import lint, lint_file
# Export lint functions
lint = lint
lint_file = lint_file
|
client/channellog.py
|
heartsoso/Discline
| 535 |
93715
|
# Wrapper class to make dealing with logs easier
class ChannelLog():
__channel = ""
__logs = []
unread = False
mentioned_in = False
# the index of where to start printing the messages
__index = 0
def __init__(self, channel, logs):
self.__channel = channel
self.__logs = list(logs)
def get_server(self): return self.__channel.server
def get_channel(self): return self.__channel
def get_logs(self):
return self.__logs
def get_name(self):
return self.__channel.name
def get_server_name(self):
return self.__channel.server.name
def append(self, message):
self.__logs.append(message)
def index(self, message):
return self.__logs.index(message)
def insert(self, i, message):
self.__logs.insert(i, message)
def len(self):
return len(self.__logs)
def get_index(self):
return self.__index
def set_index(self, int):
self.__index = int
def inc_index(self, int):
self.__index += int
def dec_index(self, int):
self.__index -= int
|
scripts/remove_input_references.py
|
rlamoureux/Python-
| 263 |
93723
|
<gh_stars>100-1000
# This script removes the input reference numbers from html pages.
# They play a useful role in scientific notebooks, but they are really
# just visual clutter in this project.
# Could be an nbconvert setting, but it's an easy enough scripting job.
import os
import sys
print("\nStripping input reference numbers from code cells...")
# Find all files to work with.
path_to_notebooks = '/srv/projects/intro_programming/intro_programming/notebooks/'
filenames = []
for filename in os.listdir(path_to_notebooks):
if '.html' in filename and filename != 'index.html':
filenames.append(filename)
# one file for testing:
#filenames = ['hello_world.html']
for filename in filenames:
f = open(path_to_notebooks + filename, 'r')
lines = f.readlines()
f.close()
f = open(path_to_notebooks + filename, 'wb')
for line in lines:
# Unwanted lines have opening and closing div on same line,
# with input reference number between them.
if ('<div class="prompt input_prompt">' in line
and '</div>' in line):
# Don't write this line.
continue
else:
# Regular line, write it.
f.write(line.encode('utf-8'))
f.close()
print(" Stripped input reference numbers.\n")
|
tests/geo_test.py
|
sethvargo/vaex
| 337 |
93748
|
from common import *
def test_virtual_columns_spherical():
df = vaex.from_scalars(alpha=0, delta=0, distance=1)
df.add_virtual_columns_spherical_to_cartesian("alpha", "delta", "distance", "x", "y", "z", radians=False)
x, y, z = df['x'].values[0], df['y'].values[0], df['z'].values[0]
np.testing.assert_array_almost_equal(x, 1)
np.testing.assert_array_almost_equal(y, 0)
np.testing.assert_array_almost_equal(z, 0)
for radians in [True, False]:
def dfs(alpha, delta, distance, radians=radians):
ds_1 = vaex.from_scalars(alpha=alpha, delta=delta, distance=distance, alpha_e=0.1, delta_e=0.2, distance_e=0.3)
ds_1.add_virtual_columns_spherical_to_cartesian("alpha", "delta", "distance", propagate_uncertainties=True, radians=radians)
N = 1000000
# distance
alpha = np.random.normal(0, 0.1, N) + alpha
delta = np.random.normal(0, 0.2, N) + delta
distance = np.random.normal(0, 0.3, N) + distance
ds_many = vaex.from_arrays(alpha=alpha, delta=delta, distance=distance)
ds_many.add_virtual_columns_spherical_to_cartesian("alpha", "delta", "distance", radians=radians)
return ds_1, ds_many
ds_1, ds_many = dfs(0, 0, 1.)
x_e = ds_1.evaluate("x_uncertainty")[0]
y_e = ds_1.evaluate("y_uncertainty")[0]
z_e = ds_1.evaluate("z_uncertainty")[0]
np.testing.assert_array_almost_equal(x_e, ds_many.std("x").item(), decimal=2)
np.testing.assert_array_almost_equal(y_e, ds_many.std("y").item(), decimal=2)
np.testing.assert_array_almost_equal(z_e, ds_many.std("z").item(), decimal=2)
np.testing.assert_array_almost_equal(x_e, 0.3)
# TODO: from cartesian tot spherical errors
df.add_virtual_columns_cartesian_to_spherical("x", "y", "z", "theta", "phi", "r", radians=False)
theta, phi, r = df("theta", "phi", "r").row(0)
np.testing.assert_array_almost_equal(theta, 0)
np.testing.assert_array_almost_equal(phi, 0)
np.testing.assert_array_almost_equal(r, 1)
df.add_virtual_columns_celestial("alpha", "delta", "l", "b", _matrix='eq2gal')
# TODO: properly test, with and without radians
df.evaluate("l")
df.evaluate("b")
ds = vaex.from_scalars(x=1, y=0, z=0)
ds.add_virtual_columns_cartesian_to_spherical()
assert ds.evaluate('b')[0] == 0
def test_inside_polygon_single(df_factory):
df = df_factory(x=[1, 2, 3], y=[2, 3, 4])
px = np.array([1.5, 2.5, 2.5, 1.5])
py = np.array([2.5, 2.5, 3.5, 3.5])
df['inside'] = df.geo.inside_polygon(df.x, df.y, px, py)
assert df.inside.values.tolist() == [False, True, False]
def test_inside_polygons(df_factory):
df = df_factory(x=[1, 2, 3], y=[2, 3, 4])
px = np.array([1.5, 2.5, 2.5, 1.5])
py = np.array([2.5, 2.5, 3.5, 3.5])
df['inside'] = df.geo.inside_polygons(df.x, df.y, [px, px + 1], [py, py + 1], any=True)
assert df.inside.values.tolist() == [False, True, True]
def test_which_polygon_single(df_factory):
df = df_factory(x=[1, 2, 3], y=[2, 3, 4])
px = np.array([1.5, 2.5, 2.5, 1.5])
py = np.array([2.5, 2.5, 3.5, 3.5])
df['polygon_index'] = df.geo.inside_which_polygon(df.x, df.y, [px, px + 1], [py, py + 1])
assert df.polygon_index.values.tolist() == [None, 0, 1]
def test_which_polygons(df_factory):
df = df_factory(x=[1, 2, 3], y=[2, 3, 4])
# polygon1a = np.array( [(1.5, 2.5, 2.5, 1.5), (2.5, 2.5, 3.5, 3.5)] )
# polygon1b = (polygon1a.T + [1, 1]).T
px = np.array([1.5, 2.5, 2.5, 1.5])
py = np.array([2.5, 2.5, 3.5, 3.5])
polygon1a = [px, py] # matches #1
polygon1b = [px + 1, py + 1] # matches #2
polygon_nothing = [px + 10, py + 10] # matches nothing
pxw = np.array([1.5, 3.5, 3.5, 1.5])
pyw = np.array([2.5, 2.5, 4.5, 4.5])
polygon1c = [pxw, pyw] # matches #1 and 2
pxs = [[polygon1a, polygon1b], [polygon1b, polygon1c], [polygon1c]]
df['polygon_index'] = df.geo.inside_which_polygons(df.x, df.y, pxs, any=True)
assert df.polygon_index.values.tolist() == [None, 0, 0]
df['polygon_index'] = df.geo.inside_which_polygons(df.x, df.y, pxs, any=False)
assert df.polygon_index.values.tolist() == [None, 2, 1]
pxs = [[polygon_nothing, polygon1a, polygon_nothing]]
df['polygon_index'] = df.geo.inside_which_polygons(df.x, df.y, pxs, any=True)
assert df.polygon_index.values.tolist() == [None, 0, None]
pxs = [[polygon1a, polygon_nothing, polygon1a]]
df['polygon_index'] = df.geo.inside_which_polygons(df.x, df.y, pxs, any=False)
assert df.polygon_index.values.tolist() == [None, None, None]
|
caffe/ultra_face_caffe_inference.py
|
Yucao42/Ultra-Light-Fast-Generic-Face-Detector-1MB
| 6,602 |
93765
|
# coding=utf-8
import argparse
import os
import time
from math import ceil
import caffe
import cv2
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--caffe_prototxt_path', default="model/RFB-320/RFB-320.prototxt", type=str, help='caffe_prototxt_path')
parser.add_argument('--caffe_model_path', default="model/RFB-320/RFB-320.caffemodel", type=str, help='caffe_model_path')
parser.add_argument('--input_size', default="320,240", type=str, help='define network input size,format: width,height')
parser.add_argument('--threshold', default=0.7, type=float, help='score threshold')
parser.add_argument('--imgs_path', default="../MNN/imgs", type=str, help='imgs dir')
parser.add_argument('--results_path', default="results", type=str, help='results dir')
parser.add_argument('--mode', default="cpu", type=str, help='cpu or gpu')
args = parser.parse_args()
if args.mode == "cpu":
caffe.set_mode_cpu()
elif args.mode == "gpu":
caffe.set_mode_gpu()
image_mean = np.array([127, 127, 127])
image_std = 128.0
iou_threshold = 0.3
center_variance = 0.1
size_variance = 0.2
min_boxes = [[10.0, 16.0, 24.0], [32.0, 48.0], [64.0, 96.0], [128.0, 192.0, 256.0]]
strides = [8.0, 16.0, 32.0, 64.0]
def define_img_size(image_size):
shrinkage_list = []
feature_map_w_h_list = []
for size in image_size:
feature_map = [int(ceil(size / stride)) for stride in strides]
feature_map_w_h_list.append(feature_map)
for i in range(0, len(image_size)):
shrinkage_list.append(strides)
priors = generate_priors(feature_map_w_h_list, shrinkage_list, image_size, min_boxes)
return priors
def generate_priors(feature_map_list, shrinkage_list, image_size, min_boxes):
priors = []
for index in range(0, len(feature_map_list[0])):
scale_w = image_size[0] / shrinkage_list[0][index]
scale_h = image_size[1] / shrinkage_list[1][index]
for j in range(0, feature_map_list[1][index]):
for i in range(0, feature_map_list[0][index]):
x_center = (i + 0.5) / scale_w
y_center = (j + 0.5) / scale_h
for min_box in min_boxes[index]:
w = min_box / image_size[0]
h = min_box / image_size[1]
priors.append([
x_center,
y_center,
w,
h
])
print("priors nums:{}".format(len(priors)))
return np.clip(priors, 0.0, 1.0)
def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200):
scores = box_scores[:, -1]
boxes = box_scores[:, :-1]
picked = []
indexes = np.argsort(scores)
indexes = indexes[-candidate_size:]
while len(indexes) > 0:
current = indexes[-1]
picked.append(current)
if 0 < top_k == len(picked) or len(indexes) == 1:
break
current_box = boxes[current, :]
indexes = indexes[:-1]
rest_boxes = boxes[indexes, :]
iou = iou_of(
rest_boxes,
np.expand_dims(current_box, axis=0),
)
indexes = indexes[iou <= iou_threshold]
return box_scores[picked, :]
def area_of(left_top, right_bottom):
hw = np.clip(right_bottom - left_top, 0.0, None)
return hw[..., 0] * hw[..., 1]
def iou_of(boxes0, boxes1, eps=1e-5):
overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2])
overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:])
overlap_area = area_of(overlap_left_top, overlap_right_bottom)
area0 = area_of(boxes0[..., :2], boxes0[..., 2:])
area1 = area_of(boxes1[..., :2], boxes1[..., 2:])
return overlap_area / (area0 + area1 - overlap_area + eps)
def predict(width, height, confidences, boxes, prob_threshold, iou_threshold=0.3, top_k=-1):
boxes = boxes[0]
confidences = confidences[0]
picked_box_probs = []
picked_labels = []
for class_index in range(1, confidences.shape[1]):
probs = confidences[:, class_index]
mask = probs > prob_threshold
probs = probs[mask]
if probs.shape[0] == 0:
continue
subset_boxes = boxes[mask, :]
box_probs = np.concatenate([subset_boxes, probs.reshape(-1, 1)], axis=1)
box_probs = hard_nms(box_probs,
iou_threshold=iou_threshold,
top_k=top_k,
)
picked_box_probs.append(box_probs)
picked_labels.extend([class_index] * box_probs.shape[0])
if not picked_box_probs:
return np.array([]), np.array([]), np.array([])
picked_box_probs = np.concatenate(picked_box_probs)
picked_box_probs[:, 0] *= width
picked_box_probs[:, 1] *= height
picked_box_probs[:, 2] *= width
picked_box_probs[:, 3] *= height
return picked_box_probs[:, :4].astype(np.int32), np.array(picked_labels), picked_box_probs[:, 4]
def convert_locations_to_boxes(locations, priors, center_variance,
size_variance):
if len(priors.shape) + 1 == len(locations.shape):
priors = np.expand_dims(priors, 0)
return np.concatenate([
locations[..., :2] * center_variance * priors[..., 2:] + priors[..., :2],
np.exp(locations[..., 2:] * size_variance) * priors[..., 2:]
], axis=len(locations.shape) - 1)
def center_form_to_corner_form(locations):
return np.concatenate([locations[..., :2] - locations[..., 2:] / 2,
locations[..., :2] + locations[..., 2:] / 2], len(locations.shape) - 1)
def inference():
net = caffe.Net(args.caffe_prototxt_path, args.caffe_model_path, caffe.TEST)
input_size = [int(v.strip()) for v in args.input_size.split(",")]
witdh = input_size[0]
height = input_size[1]
priors = define_img_size(input_size)
net.blobs['input'].reshape(1, 3, height, witdh)
result_path = args.results_path
imgs_path = args.imgs_path
if not os.path.exists(result_path):
os.makedirs(result_path)
listdir = os.listdir(imgs_path)
for file_path in listdir:
img_path = os.path.join(imgs_path, file_path)
img_ori = cv2.imread(img_path)
tmp_batch = np.zeros([1, 3, height, witdh], dtype=np.float32)
rect = cv2.resize(img_ori, (witdh, height))
rect = cv2.cvtColor(rect, cv2.COLOR_BGR2RGB)
image = (rect - image_mean) / image_std
tmp_batch[0, :, :, :] = image.transpose(2, 0, 1)
net.blobs['input'].data[...] = tmp_batch
time_time = time.time()
scores = net.forward()['scores'][0]
boxes = net.forward()['boxes'][0]
print("inference time: {} s".format(round(time.time() - time_time, 4)))
boxes = np.expand_dims(np.reshape(boxes, (-1, 4)), axis=0)
scores = np.expand_dims(np.reshape(scores, (-1, 2)), axis=0)
boxes = convert_locations_to_boxes(boxes, priors, center_variance, size_variance)
boxes = center_form_to_corner_form(boxes)
boxes, labels, probs = predict(img_ori.shape[1], img_ori.shape[0], scores, boxes, args.threshold)
for i in range(boxes.shape[0]):
box = boxes[i, :]
cv2.rectangle(img_ori, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2)
cv2.imwrite(os.path.join(result_path, file_path), img_ori)
print("result_pic is written to {}".format(os.path.join(result_path, file_path)))
cv2.imshow("ultraFace_caffe_py", img_ori)
cv2.waitKey(-1)
cv2.destroyAllWindows()
if __name__ == '__main__':
inference()
|
awsume/awsumepy/lib/logger.py
|
ignatenkobrain/awsume
| 654 |
93780
|
<filename>awsume/awsumepy/lib/logger.py
import sys
import logging
import re
class LogFormatter(logging.Formatter):
@staticmethod
def _filter(s):
no_access_key_id = re.sub(r'(?<![A-Z0-9])[A-Z0-9]{20}(?![A-Z0-9])', 'SECRET', s)
no_secret_access_key = re.sub(r'(?<![A-Za-z0-9/+=])[A-Za-z0-9/+=]{40}(?![A-Za-z0-9/+=])', 'SECRET', no_access_key_id)
return no_secret_access_key
def format(self, record):
original = logging.Formatter.format(self, record)
return self._filter(original)
logger = logging.getLogger('awsume') # type: logging.Logger
LOG_HANDLER = logging.StreamHandler()
LOG_HANDLER.setFormatter(LogFormatter('[%(asctime)s] %(filename)s:%(funcName)s : [%(levelname)s] %(message)s'))
logger.addHandler(LOG_HANDLER)
|
common/request.py
|
ujlbu4/vas3k.club
| 496 |
93798
|
from django.http import JsonResponse, Http404
from django.shortcuts import redirect
def parse_ip_address(request):
ipaddress = request.META.get("HTTP_X_REAL_IP") \
or request.META.get("HTTP_X_FORWARDED_FOR") \
or request.environ.get("REMOTE_ADDR") or ""
if "," in ipaddress: # multiple ips in the header
ipaddress = ipaddress.split(",", 1)[0]
return ipaddress
def parse_useragent(request):
return (request.META.get("HTTP_USER_AGENT") or "")[:512]
def is_ajax(request):
return bool(request.GET.get("is_ajax"))
def ajax_request(view):
def wrapper(request, *args, **kwargs):
status_code = 200
try:
results = view(request, *args, **kwargs)
except Http404:
status_code = 404
results = {"error": "Not Found"}
if is_ajax(request):
return JsonResponse(data=results, status=status_code)
else:
return redirect(request.META.get("HTTP_REFERER") or "/")
return wrapper
|
sportsbetting/bookmakers/netbet.py
|
pretrehr/Sports-betting
| 169 |
93807
|
"""
NetBet odds scraper
"""
import datetime
import http.client
import re
import urllib
import urllib.error
import urllib.request
import fake_useragent
from bs4 import BeautifulSoup
import sportsbetting as sb
from sportsbetting.auxiliary_functions import truncate_datetime
def parse_netbet(url):
"""
Retourne les cotes disponibles sur netbet
"""
sport = None
if url in ["football", "tennis", "basketball", "hockey-glace", "rugby", "handball"]:
sport = url
url = "https://www.netbet.fr/top-paris"
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4)"
"AppleWebKit/537.36 (KHTML, like Gecko)"
"Chrome/83.0.4103.97"
"Safari/537.36"}
for _ in range(3):
try:
request = urllib.request.Request(url, None, headers)
response = urllib.request.urlopen(request, timeout=5)
soup = BeautifulSoup(response, features="lxml")
break
except http.client.IncompleteRead:
headers = {"User-Agent": fake_useragent.UserAgent().random}
print("User agent change")
except urllib.error.HTTPError:
headers = {"User-Agent": fake_useragent.UserAgent().random}
print("User agent change (403)")
except urllib.error.URLError:
headers = {"User-Agent": fake_useragent.UserAgent().random}
print("User agent change (Timeout)")
else:
raise sb.UnavailableSiteException
if soup.find(attrs={"class": "none"}):
raise sb.UnavailableCompetitionException
if response.geturl() == "https://www.netbet.fr/":
raise sb.UnavailableCompetitionException
match_odds_hash = {}
today = datetime.datetime.today()
today = datetime.datetime(today.year, today.month, today.day)
date = ""
year = " " + str(today.year)
match = ""
competition = ""
date_time = None
valid_match = True
for line in soup.find_all():
if "class" in line.attrs and "nb-link-event" in line["class"] and "href" in line.attrs:
if sport:
valid_match = sport+"/" in line["href"]
link = line["href"]
competition = " - ".join(map(lambda x : x.replace("-", " ").title(), link.split("/")[2:4]))
elif "class" in line.attrs and "nb-event_datestart" in line["class"]:
date = list(line.stripped_strings)[0] + year
if "Auj." in date:
date = datetime.datetime.today().strftime("%d/%m %Y")
elif "class" in line.attrs and "nb-event_timestart" in line["class"]:
hour = line.text
if " min" in hour:
date_time = datetime.datetime.today()+datetime.timedelta(minutes=int(hour.strip(" min")))
date_time = truncate_datetime(date_time)
continue
try:
date_time = datetime.datetime.strptime(
date + " " + hour, "%d/%m %Y %H:%M")
if date_time < today:
date_time = date_time.replace(year=date_time.year + 1)
except ValueError:
date_time = "undefined"
elif "class" in line.attrs and "nb-event_actors" in line["class"]:
match = " - ".join(list(map(lambda x: x.replace(" - ",
"-"), line.stripped_strings)))
reg_exp = r'\[[0-7]\/[0-7]\s?([0-7]\/[0-7]\s?)*\]|\[[0-7]\-[0-7]\s?([0-7]\-[0-7]\s?)*\]'
if list(re.finditer(reg_exp, match)): # match tennis live
match = match.split("[")[0].strip()
elif "class" in line.attrs and "nb-event_odds_wrapper" in line["class"]:
try:
odds = list(map(lambda x: float(x.replace(",", ".")),
list(line.stripped_strings)[1::2]))
if valid_match and match and match not in match_odds_hash and date_time:
match_odds_hash[match] = {}
match_odds_hash[match]['odds'] = {"netbet": odds}
match_odds_hash[match]['date'] = date_time
match_odds_hash[match]['id'] = {"netbet": link}
match_odds_hash[match]['competition'] = competition
except ValueError: # match live (cotes non disponibles)
pass
return match_odds_hash
|
scripts/storage/secondary/cloud-install-sys-tmplt.py
|
ycyun/ablestack-cloud
| 1,131 |
93826
|
<filename>scripts/storage/secondary/cloud-install-sys-tmplt.py<gh_stars>1000+
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import sys
import urllib
import uuid
import subprocess
import os
import shutil
import gzip
import zipfile
import bz2
class InstallSysTemplate(object):
parser = None
mountpoint = None
args = None
hypervisor = None
systemvmtemplatepath = None
systemvmtemplateurl = None
managementsecretkey = None
forcecleanup = None
databasehostname = None
databaseusername = None
databaseuserpassword = None
templatesuffix = None
template = None
fileextension = None
templateName = None
destDir = None
fileSize = None
dictionary = None
def __init__(self):
self.dictionary = dict(xenserver=('XenServer', 'vhd'), kvm=('KVM', 'qcow2'), vmware=('VMware', 'ova'), lxc=('LXC', 'qcow2'), hyperv=('Hyperv', 'vhd'))
def parseOptions(self):
self.parser = argparse.ArgumentParser(prog="System Template Installer")
self.parser.add_argument("-m", "--mount-point", action="store", dest="mountpoint", help="Secondary Storage Mount Point where to install the temlate.", required="true")
self.parser.add_argument("-H", "--hypervisor", action="store", dest="hypervisor", help="The Hypervisor name for which template need to be installed", required="true", choices=['kvm','xenserver','vmware','lxc','hyperv'])
group = self.parser.add_mutually_exclusive_group(required=True)
group.add_argument("-f", "--system-vm-template", action="store", dest="systemvmtemplatepath", help="The local system vm template file path")
group.add_argument("-u", "--system-vm-template-url", action="store", dest="systemvmtemplateurl", help="Url to download system vm template")
self.parser.add_argument("-s", "--management-secret-key", action="store", dest="managementsecretkey", help="mgmt server secret key, if you specified any when running cloudstack-setup-database, default is password", default="password")
self.parser.add_argument("-F", "--force-clean-up", action="store_true", dest="forcecleanup", help="clean up system templates of specified hypervisor", default="false")
self.parser.add_argument("-d", "--database-host-name", action="store", dest="databasehostname", help="Database server hostname or ip, e.g localhost", default="localhost", required="true")
self.parser.add_argument("-r", "--database-user-name", action="store", dest="databaseusername", help="Database user name, e.g root", default="root", required="true")
self.parser.add_argument("-p", "--database-user-password", nargs='?', action="store", dest="databaseuserpassword", help="Database password. Followed by nothing if the password is empty", default="", required="true")
self.parser.add_argument("-e", "--template-suffix", action="store", dest="templatesuffix", help="Template suffix, e.g vhd, ova, qcow2",default="vhd")
self.parser.add_argument("-t", "--file-extension", action="store", dest="fileextension", help="The template file extension", default="", required="true", choices=['bz2','gz','zip'])
self.args = self.parser.parse_args()
def populateOptions(self):
self.mountpoint = self.args.mountpoint
self.hypervisor = self.args.hypervisor
self.fileextension = self.args.fileextension
if self.args.systemvmtemplatepath:
self.systemvmtemplatepath = self.args.systemvmtemplatepath
if self.args.systemvmtemplateurl:
self.systemvmtemplateurl = self.args.systemvmtemplateurl
if self.args.managementsecretkey:
self.managementsecretkey = self.args.managementsecretkey
if self.args.forcecleanup:
self.forcecleanup = self.args.forcecleanup
if self.args.databasehostname:
self.databasehostname = self.args.databasehostname
if self.args.databaseusername:
self.databaseusername = self.args.databaseusername
if self.args.databaseuserpassword:
self.databaseuserpassword = self.args.databaseuserpassword
else:
self.databaseuserpassword = ""
if self.args.templatesuffix:
self.templatesuffix = self.args.templatesuffix
print 'Password for DB: %s'%self.databaseuserpassword
def errorAndExit(self, msg):
err = '''\n\nWe apologize for below error:
***************************************************************
%s
***************************************************************
Please run:
cloud-install-sys-tmplt -h
for full help
''' % msg
sys.stderr.write(err)
sys.stderr.flush()
sys.exit(1)
def runCmd(self, cmds):
process = subprocess.Popen(' '.join(cmds), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
print(stdout)
if process.returncode != 0:
raise Exception(stderr)
return stdout
def runMysql(self, query):
try:
print 'Running Query: %s' % query
mysqlCmds = ['mysql', '--user=%s'%self.databaseusername, '--host=%s'%self.databasehostname, '--password=%s'%self.databaseuserpassword, '--skip-column-names', '-U', 'cloud', '-e "%s"'%query]
templateId = self.runCmd(mysqlCmds)
print 'TemplateId is : %s' % templateId
except Exception, e:
err = '''Encountering an error when executing mysql script\n%s''' % str(e)
self.errorAndExit(err)
return templateId
def fetchTemplateDetails(self):
mysqlQuery = "select max(id) from cloud.vm_template where type = 'SYSTEM' and hypervisor_type = '%s' and removed is null"
ht = None
hypervisorInfo = self.dictionary[self.hypervisor]
ht = hypervisorInfo[0]
self.templatesuffix = hypervisorInfo[1]
self.template = int(self.runMysql(mysqlQuery%ht))
def downloadTemplate(self):
self.systemvmtemplatepath = self.templateName + "." + self.fileextension
print 'Downloading template from %s To %s' % (self.systemvmtemplateurl, self.systemvmtemplatepath)
try:
templateFileDownloadUrl = urllib.urlretrieve(self.systemvmtemplateurl, self.systemvmtemplatepath, reporthook=self.report)
except Exception:
self.errorAndExit("Failed to download template file from %s" % self.systemvmtemplateurl)
def report(tmp, blocknr, blocksize, size):
current = blocknr*blocksize
sys.stdout.write("\rDownloading completed: {0:.2f}%".format(100.0*current/size))
def installTemplate(self):
destDir = self.mountpoint + os.sep + "template" + os.sep + "tmpl" + os.sep + "1" + os.sep + str(self.template)
self.destDir = destDir
print 'The desination Directory is : %s' % destDir
try:
if self.forcecleanup:
if os.path.exists(destDir):
shutil.rmtree(destDir)
if not os.path.exists(destDir):
os.makedirs(destDir)
except Exception, e:
self.errorAndExit('Failed to create directories on the mounted path.. %s' % str (e))
print 'Installing Template to : %s' % destDir
tmpFile = self.templateName + "." + "tmp"
self.uncompressFile(tmpFile)
print 'Moving the decompressed file to destination directory %s... which could take a long time, please wait' % destDir
shutil.move(tmpFile, destDir + os.sep + self.templateName)
def uncompressFile(self, fileName):
print 'Uncompressing the file %s... which could take a long time, please wait' % self.systemvmtemplatepath
if self.fileextension == 'gz':
compressedFile = gzip.GzipFile(self.systemvmtemplatepath, 'rb')
decompressedData = compressedFile.read()
compressedFile.close()
decompressedFile = file(fileName, 'wb')
decompressedFile.write(decompressedData)
decompressedFile.close()
elif self.fileextension == 'bz2':
compressedFile = bz2.BZ2File(self.systemvmtemplatepath)
decompressedData = compressedFile.read()
compressedFile.close()
decompressedFile = file(fileName, 'wb')
decompressedFile.write(decompressedData)
decompressedFile.close()
print ''
elif self.fileextension == 'zip':
zippedFile = zipfile.ZipFile(self.systemvmtemplatepath, 'r')
zippedFiles = zippedFile.namelist()
compressedFile = zippedFiles[0]
decompressedData = zippedFile.read(compressedFile)
decompressedFile = file(fileName, 'wb')
decompressedFile.write(decompressedData)
decompressedFile.close()
zippedFile.close()
print ''
else:
self.errorAndExit('Not supported file type %s to decompress' % self.fileextension)
self.fileSize = os.path.getsize(fileName)
def writeProperties(self):
propertiesFile = file(self.destDir + os.sep + 'template.properties', 'wb')
propertiesFile.write('filename=%s\n'%self.templateName)
propertiesFile.write('description=SystemVM Template\n')
propertiesFile.write('checksum=\n')
propertiesFile.write('hvm=false\n')
propertiesFile.write('size=%s\n'%str(self.fileSize))
propertiesFile.write('%s=true\n'%self.templatesuffix)
propertiesFile.write('id=%s\n'%str(self.template))
propertiesFile.write('public=true\n')
propertiesFile.write('%s.filename=%s\n'%(self.templatesuffix, self.templateName))
propertiesFile.write('uniquename=routing-%s\n'%str(self.template))
propertiesFile.write('%s.virtualsize=%s\n'%(self.templatesuffix, str(self.fileSize)))
propertiesFile.write('virtualsize=%s\n'%str(self.fileSize))
propertiesFile.write('%s.size=%s'%(self.templatesuffix, str(self.fileSize)))
propertiesFile.close()
def run(self):
try:
self.parseOptions()
self.populateOptions()
self.fetchTemplateDetails()
randomUUID = uuid.uuid1()
self.templateName = str(randomUUID) + "." + self.templatesuffix
if self.args.systemvmtemplateurl:
self.downloadTemplate()
self.installTemplate()
self.writeProperties()
finally:
print ''
print ''
print "CloudStack has successfully installed system template"
print ''
if __name__ == "__main__":
o = InstallSysTemplate()
o.run()
|
examples/pkg1/test_mod2.py
|
altendky/pytest-monitor
| 136 |
93835
|
<gh_stars>100-1000
import time
def test_sleep_400ms():
time.sleep(0.4)
|
glumpy/gloo/framebuffer.py
|
Frekby/glumpy
| 1,074 |
93858
|
# -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 <NAME>. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
"""
A framebuffer is a collection of buffers that can be used as the destination
for rendering. OpenGL has two kinds of framebuffers: the default framebuffer,
which is provided by the OpenGL Context; and user-created framebuffers called
framebuffer objects (FBOs). The buffers for default framebuffers are part of
the context and usually represent a window or display device. The buffers for
FBOs reference images from either textures or render buffers; they are never
directly visible.
Read more on framebuffers on `OpenGL Wiki
<https://www.opengl.org/wiki/Framebuffer>`_
**Example usage**
.. code:: python
...
texture = np.zeros((512,512,4),np.float32).view(gloo.TextureFloat2D)
framebuffer = gloo.FrameBuffer(color=[texture])
...
@window.event
def on_draw(dt):
framebuffer.activate()
window.clear()
quad.draw(gl.GL_TRIANGLE_STRIP)
framebuffer.deactivate()
"""
import numpy as np
from glumpy import gl
from glumpy.log import log
from glumpy.gloo.globject import GLObject
from glumpy.gloo.texture import Texture2D
class RenderBuffer(GLObject):
""" Base class for render buffer object.
:param GLEnum format: Buffer format
:param int width: Buffer width (pixels)
:param int height: Buffer height (pixel)
"""
def __init__(self, width, height, format):
GLObject.__init__(self)
self._width = width
self._height = height
self._target = gl.GL_RENDERBUFFER
self._format = format
self._need_resize = True
@property
def width(self):
""" Buffer width (read-only). """
return self._width
@property
def height(self):
""" Buffer height (read-only). """
return self._height
def resize(self, width, height):
""" Resize the buffer (deferred operation).
:param int width: New buffer width (pixels)
:param int height: New buffer height (pixels)
"""
if width != self._width or height != self._height:
self._need_resize = True
self._width = width
self._height = height
def _create(self):
""" Create buffer on GPU """
log.debug("GPU: Create render buffer")
self._handle = gl.glGenRenderbuffers(1)
def _delete(self):
""" Delete buffer from GPU """
log.debug("GPU: Deleting render buffer")
gl.glDeleteRenderbuffer(self._handle)
def _activate(self):
""" Activate buffer on GPU """
log.debug("GPU: Activate render buffer")
gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self._handle)
if self._need_resize:
self._resize()
self._need_resize = False
def _deactivate(self):
""" Deactivate buffer on GPU """
log.debug("GPU: Deactivate render buffer")
gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, 0)
def _resize(self):
""" Buffer resize on GPU """
# WARNING: width/height should be checked against maximum size
# maxsize = gl.glGetParameter(gl.GL_MAX_RENDERBUFFER_SIZE)
log.debug("GPU: Resize render buffer")
gl.glRenderbufferStorage(self._target, self._format,
self._width, self._height)
class ColorBuffer(RenderBuffer):
""" Color buffer object.
:param int width: Buffer width (pixels)
:param int height: Buffer height (pixel)
:param GLEnum format: Buffer format (default is gl.GL_RGBA)
"""
def __init__(self, width, height, format=gl.GL_RGBA):
# if format not in (gl.GL_RGB565, gl.GL_RGBA4, gl.GL_RGB5_A1):
# raise ValueError("Format not allowed for color buffer")
RenderBuffer.__init__(self, width, height, format)
class DepthBuffer(RenderBuffer):
""" Depth buffer object.
:param int width: Buffer width (pixels)
:param int height: Buffer height (pixel)
:param GLEnum format: Buffer format (default is gl.GL_DEPTH_COMPONENT)
"""
def __init__(self, width, height, format=gl.GL_DEPTH_COMPONENT):
#if format not in (gl.GL_DEPTH_COMPONENT16,):
# raise ValueError("Format not allowed for depth buffer")
RenderBuffer.__init__(self, width, height, format)
class StencilBuffer(RenderBuffer):
""" Stencil buffer object
:param int width: Buffer width (pixels)
:param int height: Buffer height (pixel)
:param GLEnum format: Buffer format (default is gl.GL_STENCIL_INDEX8)
"""
def __init__(self, width, height, format=gl.GL_STENCIL_INDEX8):
# if format not in (gl.GL_STENCIL_INDEX,):
# raise ValueError("Format not allowed for color buffer")
RenderBuffer.__init__(self, width, height, format)
class FrameBuffer(GLObject):
""" Framebuffer object.
:param ColorBuffer color: One or several color buffers or None
:param DepthBuffer depth: A depth buffer or None
:param StencilBuffer stencil: A stencil buffer or None
"""
def __init__(self, color=None, depth=None, stencil=None):
"""
"""
GLObject.__init__(self)
self._width = None
self._height = None
self._color = None
self._depth = None
self._stencil = None
self._need_attach = True
self._pending_attachments = []
if color is not None:
self.color = color
if depth is not None:
self.depth = depth
if stencil is not None:
self.stencil = stencil
@property
def color(self):
""" Color buffer attachment(s) (read/write) """
return self._color
@color.setter
def color(self, buffers):
""" Color buffer attachment(s) (read/write) """
if not isinstance(buffers,list):
buffers = [buffers]
self._color = []
for i,buffer in enumerate(buffers):
if self.width is not None and self.width != buffer.width:
raise ValueError("Buffer width does not match")
elif self.height is not None and self.height != buffer.height:
raise ValueError("Buffer height does not match")
self._width = buffer.width
self._height = buffer.height
target = gl.GL_COLOR_ATTACHMENT0+i
self._color.append(buffer)
if isinstance(buffer, (ColorBuffer, Texture2D)) or buffer is None:
self._pending_attachments.append((target, buffer))
else:
raise ValueError(
"Buffer must be a ColorBuffer, Texture2D or None")
self._need_attach = True
@property
def depth(self):
""" Depth buffer attachment (read/write) """
return self._depth
@depth.setter
def depth(self, buffer):
""" Depth buffer attachment (read/write) """
if self.width is not None and self.width != buffer.width:
raise ValueError("Buffer width does not match")
elif self.height is not None and self.height != buffer.height:
raise ValueError("Buffer height does not match")
self._width = buffer.width
self._height = buffer.height
target = gl.GL_DEPTH_ATTACHMENT
self._depth = buffer
if isinstance(buffer, (DepthBuffer, Texture2D)) or buffer is None:
self._pending_attachments.append((target, buffer))
else:
raise ValueError(
"Buffer must be a DepthBuffer, Texture2D or None")
self._need_attach = True
@property
def stencil(self):
""" Stencil buffer attachment (read/write) """
return self._stencil
@stencil.setter
def stencil(self, buffer):
""" Stencil buffer attachment (read/write) """
if self.width is not None and self.width != buffer.width:
raise ValueError("Buffer width does not match")
elif self.height is not None and self.height != buffer.height:
raise ValueError("Buffer height does not match")
self._width = buffer.width
self._height = buffer.height
target = gl.GL_STENCIL_ATTACHMENT
self._stencil = buffer
if isinstance(buffer, StencilBuffer) or buffer is None:
self._pending_attachments.append((target, buffer))
else:
raise ValueError(
"Buffer must be a StencilBuffer, Texture2D or None")
self._need_attach = True
@property
def width(self):
""" Buffer width (read only, pixels) """
return self._width
@property
def height(self):
""" Buffer height (read only, pixels) """
return self._height
def resize(self, width, height):
""" Resize the buffer (deferred operation).
This method will also resize any attached buffers.
:param int width: New buffer width (pixels)
:param int height: New buffer height (pixels)
"""
self._width = width
self._height = height
for i, buffer in enumerate(self.color):
if isinstance(buffer, ColorBuffer):
buffer.resize(width, height)
elif isinstance(buffer, Texture2D):
newbuffer = np.resize(buffer, (height,width,buffer.shape[2]))
newbuffer = newbuffer.view(buffer.__class__)
self.color[i] = newbuffer
buffer.delete()
target = gl.GL_COLOR_ATTACHMENT0+i
self._pending_attachments.append((target, self.color[i]))
self._need_attach = True
if isinstance(self.depth, DepthBuffer):
self.depth.resize(width, height)
elif isinstance(self.depth, Texture2D):
depth = np.resize(self.depth, (height,width, self.depth.shape[2]))
depth = depth.view(self.depth.__class__)
self.depth.delete()
self.depth = depth
target = gl.GL_DEPTH_ATTACHMENT
self._pending_attachments.append((target, self.depth))
self._need_attach = True
if isinstance(self.stencil, StencilBuffer):
self.stencil.resize(width, height)
elif isinstance(self.stencil, Texture2D):
stencil = np.resize(self.stencil, (height,width, self.stencil.shape[2]))
stencil = stencil.view(self.stencil.__class__)
self.stencil.delete()
self.stencil = stencil
target = gl.GL_STENCIL_ATTACHMENT
self._pending_attachments.append((target, self.stencil))
self._need_attach = True
def _create(self):
""" Create framebuffer on GPU """
log.debug("GPU: Create framebuffer")
self._handle = gl.glGenFramebuffers(1)
def _delete(self):
""" Delete buffer from GPU """
log.debug("GPU: Delete framebuffer")
gl.glDeleteFramebuffers(1, np.array([self._handle]))
def _activate(self):
""" Activate framebuffer on GPU """
log.debug("GPU: Activate render framebuffer")
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self._handle)
if self._need_attach:
self._attach()
self._need_attach = False
attachments = [gl.GL_COLOR_ATTACHMENT0+i for i in range(len(self.color))]
gl.glDrawBuffers(np.array(attachments,dtype=np.uint32))
def _deactivate(self):
""" Deactivate framebuffer on GPU """
log.debug("GPU: Deactivate render framebuffer")
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
# gl.glDrawBuffers([gl.GL_COLOR_ATTACHMENT0])
def _attach(self):
""" Attach render buffers to framebuffer """
log.debug("GPU: Attach render buffers")
while self._pending_attachments:
attachment, buffer = self._pending_attachments.pop(0)
if buffer is None:
gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, attachment,
gl.GL_RENDERBUFFER, 0)
elif isinstance(buffer, RenderBuffer):
buffer.activate()
gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, attachment,
gl.GL_RENDERBUFFER, buffer.handle)
buffer.deactivate()
elif isinstance(buffer, Texture2D):
buffer.activate()
# INFO: 0 is for mipmap level 0 (default) of the texture
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, attachment,
buffer.target, buffer.handle, 0)
buffer.deactivate()
else:
raise ValueError("Invalid attachment")
res = gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER)
if res == gl.GL_FRAMEBUFFER_COMPLETE:
pass
elif res == 0:
raise RuntimeError('Target not equal to GL_FRAMEBUFFER')
elif res == gl.GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT:
raise RuntimeError(
'FrameBuffer attachments are incomplete.')
elif res == gl.GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:
raise RuntimeError(
'No valid attachments in the FrameBuffer.')
elif res == gl.GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS:
raise RuntimeError(
'attachments do not have the same width and height.')
elif res == gl.GL_FRAMEBUFFER_INCOMPLETE_FORMATS:
raise RuntimeError('Internal format of attachment '
'is not renderable.')
elif res == gl.GL_FRAMEBUFFER_UNSUPPORTED:
raise RuntimeError('Combination of internal formats used '
'by attachments is not supported.')
|
math/josepheus_problem/python/josephus.py
|
CarbonDDR/al-go-rithms
| 1,253 |
93862
|
from __future__ import print_function
def josephus(list_of_players, step):
#skipdeadguy
step -= 1
index = step
while len(list_of_players) > 1:
print("Player Died : " , list_of_players.pop(index))
index = (index + step) % len(list_of_players)
print('Player Survived : ', list_of_players[0])
def main():
print("[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15], 5")
josephus([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15], 5)
if __name__ == "__main__":
main()
|
tools/android/tracing/systrace-extract-startup.py
|
zealoussnow/chromium
| 14,668 |
93884
|
#!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script parses result of build/android/adb_profile_chrome_startup and
# prints various information.
from __future__ import print_function
import argparse
import collections
import glob
import itertools
import json
import os
import re
def HumanSortingKey(string):
# From https://goo.gl/65xrva
def _ToInt(string):
return int(string) if string.isdigit() else string
return [_ToInt(c) for c in re.split('(\d+)', string)]
class LogIndenter(object):
_indentation = 0
def __init__(self, message=None, *arguments):
self._indented = False
if message is not None:
log(message, *arguments)
def __enter__(self):
self.indent()
return self
def __exit__(self, type, value, traceback):
self.unindent()
def indent(self):
if not self._indented:
LogIndenter._indentation += 1
self._indented = True
def unindent(self):
if self._indented:
LogIndenter._indentation -= 1
self._indented = False
@classmethod
def indentation(self):
return LogIndenter._indentation
def log(message, *arguments):
if not message:
print()
return
if arguments:
message = message.format(*arguments)
if LogIndenter.indentation() > 0:
message = ' ' * LogIndenter.indentation() + message
print(message)
def ParseTraceDatas(trace_file_path):
start_tag = re.compile(
'^\s*<script class="trace-data" type="application/text">$')
end_tag = re.compile('^(?P<line>.*?)\s*</script>$')
trace_datas = []
current_trace_lines = None
with open(trace_file_path) as trace_file:
for line in trace_file:
line = line.rstrip()
if current_trace_lines is None:
if start_tag.match(line):
current_trace_lines = []
else:
match = end_tag.match(line)
if match:
current_trace_lines.append(match.group('line'))
trace_datas.append('\n'.join(current_trace_lines))
current_trace_lines = None
else:
current_trace_lines.append(line)
return trace_datas
class Event(object):
PHASE_BEGIN = 'B'
PHASE_END = 'E'
PHASE_COMPLETE = 'X'
PHASE_ASYNC_BEGIN = 'S'
PHASE_ASYNC_END = 'F'
def __init__(self, node):
self._node = node
@property
def pid(self):
return int(self._node['pid'])
@property
def tid(self):
return int(self._node['tid'])
@property
def name(self):
return self._node.get('name')
@property
def phase(self):
return self._node['ph']
@property
def category(self):
return self._node['cat']
@property
def timestamp_us(self):
return long(self._node['ts'])
@property
def duration_us(self):
return long(self._node['dur'])
@property
def args(self):
return self._node['args']
class EventInterval(object):
def __init__(self, from_event=None, to_event=None):
self.from_event = from_event
self.to_event = to_event
def SetFromEventOnce(self, from_event):
if self.from_event is None:
self.from_event = from_event
def SetToEventOnce(self, to_event):
if self.to_event is None:
self.to_event = to_event
def FormatAsMilliseconds(self):
if not self.from_event:
time_string = "[missing the start of of the interval]"
elif not self.to_event:
time_string = "[missing the end of the interval]"
else:
interval_us = self.to_event.timestamp_us - self.from_event.timestamp_us
time_string = str(interval_us / 1000.0)
return time_string
def UpTo(self, other_iterval):
return EventInterval(self.from_event, other_iterval.to_event)
class Process(object):
BROWSER_NAME = 'Browser'
def __init__(self, pid):
self.pid = pid
self.name = None
self.events_by_name = collections.defaultdict(list)
self.time_ns_by_histogram = {}
self.malloc_counter_by_name = {}
# TODO: move these into Trace
self.startup_interval = EventInterval()
self.first_ui_interval = EventInterval()
class Trace(object):
def __init__(self, file_path):
self.file_path = file_path
self.process_by_pid = {}
self.startup_event = None
self.navigation_start_event = None
# TODO: convert these to properties over events
self.navigation_to_contentul_paint_interval = EventInterval()
self.navigation_to_meaningful_paint_interval = EventInterval()
self.navigation_to_commit_interval = None
@property
def startup_to_navigation_interval(self):
return EventInterval(self.startup_event, self.navigation_start_event)
def Finalize(self):
self.startup_event = self.FindFirstEvent(*Trace.STARTUP_EVENT_NAMES)
self.navigation_start_event = self.FindFirstEvent(
Trace.NAVIGATION_START_EVENT_NAME)
def _FindNavigationToCommitInterval():
events = self.FindAllEvents(Trace.NAVIGATION_COMMIT_EVENT_NAME)
interval = EventInterval()
for event in events:
if event.phase == Event.PHASE_ASYNC_BEGIN:
interval.SetFromEventOnce(event)
elif event.phase == Event.PHASE_ASYNC_END:
interval.SetToEventOnce(event)
return interval
self.navigation_to_commit_interval = _FindNavigationToCommitInterval()
def FindAllEvents(self, *names):
events = []
for process in self.process_by_pid.itervalues():
for name in names:
process_events = process.events_by_name.get(name)
if process_events:
events.extend(process_events)
events.sort(key=lambda e: e.timestamp_us)
return events
def FindFirstEvent(self, *names):
events = self.FindAllEvents(*names)
return events[0] if events else None
NAVIGATION_START_EVENT_NAME = 'NavigationTiming navigationStart'
NAVIGATION_COMMIT_EVENT_NAME = 'Navigation StartToCommit'
STARTUP_EVENT_NAMES = [
'Startup.BrowserMainEntryPoint', 'ChromeApplication.onCreate',
'ContentShellApplication.onCreate'
]
def ParseTrace(file_path):
trace_datas = ParseTraceDatas(file_path)
if not trace_datas:
raise Exception("The file doesn't have any trace-data elements.")
trace_json = None
for trace_data in trace_datas:
try:
trace_json = json.loads(trace_data)
except ValueError:
continue
if not trace_json:
raise Exception("Couldn't parse trace-data json.")
trace = Trace(file_path)
for event_node in trace_json['traceEvents']:
event = Event(event_node)
pid = event.pid
process = trace.process_by_pid.get(event.pid)
if not process:
process = Process(pid)
trace.process_by_pid[pid] = process
name = event.name
if not name:
continue
process.events_by_name[name].append(event)
phase = event.phase
category = event.category
if name == 'process_name':
process.name = event.args['name']
if (category == 'disabled-by-default-uma-addtime' and
name not in process.time_ns_by_histogram):
process.time_ns_by_histogram[name] = int(event.args['value_ns'])
if name in Trace.STARTUP_EVENT_NAMES:
process.startup_interval.SetFromEventOnce(event)
elif name == 'BenchmarkInstrumentation::ImplThreadRenderingStats':
process.startup_interval.SetToEventOnce(event)
if name == Trace.NAVIGATION_START_EVENT_NAME:
trace.navigation_to_contentul_paint_interval.SetFromEventOnce(event)
trace.navigation_to_meaningful_paint_interval.SetFromEventOnce(event)
elif name == 'firstContentfulPaint':
trace.navigation_to_contentul_paint_interval.SetToEventOnce(event)
elif name == 'firstMeaningfulPaint':
trace.navigation_to_meaningful_paint_interval.SetToEventOnce(event)
if (name == 'AsyncInitializationActivity.onCreate()' and
phase == Event.PHASE_END):
process.first_ui_interval.SetFromEventOnce(event)
elif name == 'ChromeBrowserInitializer.startChromeBrowserProcessesAsync':
process.first_ui_interval.SetToEventOnce(event)
if category == 'malloc' and name == 'malloc_counter':
counter_name, counter_value = next(event.args.iteritems())
process.malloc_counter_by_name[counter_name] = long(counter_value)
trace.Finalize()
return trace
EventSummary = collections.namedtuple('EventSummary', [
'trace',
'event',
'startup_to_event_ms',
'navigation_to_event_ms',
'duration_ms'
])
def SummarizeEvents(event_name_regex, trace, process):
summaries = []
def _AddSummary(event, start_us, duration_us):
startup_to_event_ms = (
None if trace.startup_event is None else
(start_us - trace.startup_event.timestamp_us) / 1000.0)
navigation_to_event_ms = (
None if trace.navigation_start_event is None else
(start_us - trace.navigation_start_event.timestamp_us) / 1000.0)
summaries.append(EventSummary(
trace, event, startup_to_event_ms, navigation_to_event_ms,
duration_us / 1000.0))
for name, events in process.events_by_name.iteritems():
if event_name_regex.search(name):
sorted_events = sorted(events,
key=lambda e: (e.tid, e.timestamp_us))
begin_event = None
for event in sorted_events:
if event.phase == Event.PHASE_COMPLETE:
_AddSummary(event, event.timestamp_us, event.duration_us)
elif (event.phase == Event.PHASE_BEGIN or
event.phase == Event.PHASE_ASYNC_BEGIN):
begin_event = event
elif (event.phase == Event.PHASE_END or
event.phase == Event.PHASE_ASYNC_END):
if begin_event is not None:
duration_us = event.timestamp_us - begin_event.timestamp_us
_AddSummary(event, begin_event.timestamp_us, duration_us)
begin_event = None
return summaries
def PrintReport(file_paths, options):
# TODO: don't accumulate traces, build report on the fly
traces = []
for file_path in file_paths:
log('Parsing {}...', file_path)
try:
traces.append(ParseTrace(file_path))
except Exception as e:
log('Oops: {}', e.message)
log('Parsed {} trace(s).', len(traces))
event_name_regex = None
event_summaries_by_name = collections.defaultdict(list)
if options.print_events:
event_name_regex = re.compile(options.print_events)
def _TraceSortingKey(trace):
return HumanSortingKey(os.path.basename(trace.file_path))
traces.sort(key=lambda t: _TraceSortingKey(t))
if options.csv:
separator = ','
gap = ''
else:
separator = '\t'
# Make it less likely for terminals to eat tabs when wrapping a line.
gap = ' '
table = [[
'File',
'Startup (ms)',
'StartupToNavigation (ms)',
'NavigationToCommit (ms)',
'NavigationToContentfulPaint (ms)',
'StartupToContentfulPaint (ms)',
'NavigationToMeaningfulPaint (ms)',
'StartupToMeaningfulPaint (ms)'
]]
for trace in traces:
browser_process = None
for process in trace.process_by_pid.itervalues():
if process.name == Process.BROWSER_NAME:
browser_process = process
break
if browser_process is None:
continue
table.append([
os.path.basename(trace.file_path),
browser_process.startup_interval.FormatAsMilliseconds(),
trace.startup_to_navigation_interval.FormatAsMilliseconds(),
trace.navigation_to_commit_interval.FormatAsMilliseconds(),
trace.navigation_to_contentul_paint_interval.FormatAsMilliseconds(),
browser_process.startup_interval.UpTo(trace.navigation_to_contentul_paint_interval).\
FormatAsMilliseconds(),
trace.navigation_to_meaningful_paint_interval.FormatAsMilliseconds(),
browser_process.startup_interval.UpTo(trace.navigation_to_meaningful_paint_interval).\
FormatAsMilliseconds()
])
if event_name_regex:
event_summaries = SummarizeEvents(
event_name_regex, trace, browser_process)
for summary in event_summaries:
event_summaries_by_name[summary.event.name].append(summary)
for name, event_summaries in event_summaries_by_name.iteritems():
table.append([])
summaries_by_trace = collections.defaultdict(list)
for summary in event_summaries:
summaries_by_trace[summary.trace].append(summary)
width = max(len(s) for s in summaries_by_trace.itervalues())
summary_headers = [
'StartupToEvent (ms)',
'NavigationToEvent (ms)',
'Duration (ms)'
]
table.append(
[name] +
([gap] * len(summary_headers)) +
list(itertools.chain.from_iterable(
['#{}'.format(i)] + [gap] * (len(summary_headers) - 1)
for i in range(1, width))))
table.append(
['File'] +
summary_headers * width)
trace_summaries = sorted(summaries_by_trace.iteritems(),
key=lambda t_s: _TraceSortingKey(t_s[0]))
for trace, summaries in trace_summaries:
row = [os.path.basename(trace.file_path)]
for summary in summaries:
row += [
(gap if summary.startup_to_event_ms is None
else summary.startup_to_event_ms),
(gap if summary.navigation_to_event_ms is None
else summary.navigation_to_event_ms),
summary.duration_ms
]
table.append(row)
print()
print('\n'.join(separator.join(str(v) for v in row) for row in table))
def PrintTrace(trace_file_path, options):
trace = ParseTrace(trace_file_path)
def _PrintInterval(name, interval):
log('{} (ms): {}', name, interval.FormatAsMilliseconds())
def _PrintHistogramTime(process, name):
time_ns = process.time_ns_by_histogram.get(name)
time_ms = None if time_ns is None else time_ns / 1e6
if time_ms is not None or options.print_none_histograms:
log('{} (ms): {}', name, time_ms)
histogram_names = [
'Startup.FirstCommitNavigationTime3.ZoomedIn',
'MobileStartup.ToolbarFirstDrawTime.ChromeTabbedActivity',
'ChromeGeneratedCustomTab.IntentToFirstCommitNavigationTime2.ZoomedIn',
'CustomTabs.IntentToFirstCommitNavigationTime2.ZoomedIn',
'PageLoad.PaintTiming.NavigationToFirstPaint',
'PageLoad.PaintTiming.NavigationToFirstContentfulPaint',
'PageLoad.Experimental.PaintTiming.NavigationToFirstMeaningfulPaint',
'SessionRestore.ForegroundTabFirstPaint3',
]
processes = sorted(trace.process_by_pid.itervalues(), key=lambda p: p.name)
events_regex = None
if options.print_events:
events_regex = re.compile(options.print_events)
for process in processes:
log('{} timings:', process.name)
indenter = LogIndenter()
indenter.indent()
if process.name == Process.BROWSER_NAME:
_PrintInterval('Startup', process.startup_interval)
_PrintInterval('StartupToNavigation',
trace.startup_to_navigation_interval)
_PrintInterval('NavigationToCommit', trace.navigation_to_commit_interval)
_PrintInterval('NavigationToContentfulPaint',
trace.navigation_to_contentul_paint_interval)
_PrintInterval('StartupToContentfulPaint', process.startup_interval.UpTo(
trace.navigation_to_contentul_paint_interval))
_PrintInterval('NavigationToMeaningfulPaint',
trace.navigation_to_meaningful_paint_interval)
_PrintInterval('StartupToMeaningfulPaint', process.startup_interval.UpTo(
trace.navigation_to_meaningful_paint_interval))
if options.experimental:
_PrintInterval('First UI interval', process.first_ui_interval)
if process.malloc_counter_by_name:
def _PrintMallocCounter(title, value_name, factor):
value = process.malloc_counter_by_name.get(value_name)
if value is not None:
value /= factor
log('{}: {}', title, value)
log('Malloc counters:')
with LogIndenter():
_PrintMallocCounter('Total time (ms)', 'malloc_time_ns', 1000000)
_PrintMallocCounter('Total allocated (KiB)', 'allocated_bytes', 1024)
_PrintMallocCounter('Allocations', 'allocation_count', 1)
_PrintMallocCounter('Frees', 'free_count', 1)
for histogram_name in histogram_names:
_PrintHistogramTime(process, histogram_name)
if events_regex:
event_summaries = SummarizeEvents(events_regex, trace, process)
if event_summaries:
with LogIndenter('Events matching "{}":', events_regex.pattern):
for event_summary in event_summaries:
with LogIndenter('{}:', event_summary.event.name):
if event_summary.startup_to_event_ms is not None:
log('StartupToEvent (ms): {}',
event_summary.startup_to_event_ms)
if event_summary.navigation_to_event_ms is not None:
log('NavigationToEvent (ms): {}',
event_summary.navigation_to_event_ms)
log('Duration (ms): {}', event_summary.duration_ms)
indenter.unindent()
log('')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('file_or_glob')
parser.add_argument('--print-none-histograms',
help='Print histograms with None values.',
default=False, action='store_true')
# TODO: introduce a variant that takes a list of event names, as escaping
# event names can be tedious.
# TODO: match regex against '<index>|<event name>' to allow selecting
# events by index (complicated for begin/end pairs).
parser.add_argument('--print-events',
help='Print events matching the specified regex.')
parser.add_argument('--experimental',
default=False, action='store_true',
help='Enable experimental stuff.')
parser.add_argument('--report',
default=False, action='store_true',
help='Present information as a tab-separated table.')
parser.add_argument('--csv',
default=False, action='store_true',
help=('Separate report values by commas (not tabs).'))
options = parser.parse_args()
globbed = False
if os.path.isfile(options.file_or_glob):
trace_file_paths = [options.file_or_glob]
else:
globbed = True
file_pattern = options.file_or_glob
trace_file_paths = glob.glob(file_pattern)
if not trace_file_paths:
file_pattern += '*html'
trace_file_paths = glob.glob(file_pattern)
if not trace_file_paths:
log("'{}' didn't match anything.", file_pattern)
return
log("'{}' matched {} file(s).", file_pattern, len(trace_file_paths))
log('')
if options.report:
PrintReport(trace_file_paths, options)
else:
for file_path in trace_file_paths:
if globbed:
log('_' * len(file_path))
log(file_path)
log('')
PrintTrace(file_path, options)
if __name__ == '__main__':
main()
|
mmtbx/geometry/tests/tst_topology.py
|
dperl-sol/cctbx_project
| 155 |
93888
|
from __future__ import absolute_import, division, print_function
from mmtbx.geometry import topology
import unittest
class TestAtom(unittest.TestCase):
def test_1(self):
foo = object()
bar = object()
a = topology.Atom( foo = foo, bar = bar )
self.assertEqual( a.foo, foo )
self.assertEqual( a.bar, bar )
class TestMolecule(unittest.TestCase):
def test_0(self):
m = topology.Molecule()
self.assertEqual( m.size(), 0 )
self.assertEqual( m.atoms, [] )
self.assertEqual( m.atom_for, {} )
self.assertEqual( m.descriptor_for, {} )
self.assertEqual( list( m.graph.vertices() ), [] )
self.assertEqual( list( m.graph.edges() ), [] )
def test_1(self):
m = topology.Molecule()
a = topology.Atom()
m.add( atom = a, xyz = ( 0, 0, 0 ) )
self.assertEqual( m.size(), 1 )
self.assertEqual( m.atoms, [ a ] )
self.assertEqual( len( m.atom_for ), 1 )
self.assertTrue( a in m.atom_for.values() )
self.assertEqual( len( m.descriptor_for ), 1 )
self.assertTrue( a in m.descriptor_for )
self.assertEqual( len( list( m.graph.vertices() ) ), 1 )
self.assertEqual( list( m.graph.edges() ), [] )
def test_2(self):
m = topology.Molecule()
a1 = topology.Atom()
a2 = topology.Atom()
m.add( atom = a1, xyz = ( 0, 0, 0 ) )
m.add( atom = a2, xyz = ( 1, 1, 1 ) )
self.assertEqual( m.size(), 2 )
self.assertEqual( set( m.atoms ), set( [ a1, a2 ] ) )
self.assertEqual( len( m.atom_for ), 2 )
self.assertTrue( a1 in m.atom_for.values() )
self.assertTrue( a2 in m.atom_for.values() )
self.assertEqual( len( m.descriptor_for ), 2 )
self.assertTrue( a1 in m.descriptor_for )
self.assertTrue( a2 in m.descriptor_for )
self.assertEqual( len( list( m.graph.vertices() ) ), 2 )
self.assertEqual( len( list( m.graph.edges() ) ), 1 )
edge = next(m.graph.edges())
self.assertAlmostEqual( m.graph.edge_weight( edge = edge ), 1.73205, 5 )
class TestCompound(unittest.TestCase):
def test_0(self):
m = topology.Compound.create()
self.assertEqual( m.atoms, [] )
self.assertEqual( m.atom_for, {} )
self.assertEqual( m.descriptor_for, {} )
self.assertEqual( list( m.graph.vertices() ), [] )
self.assertEqual( list( m.graph.edges() ), [] )
def test_1(self):
m = topology.Compound.create()
a = topology.Atom()
m.add_atom( atom = a )
self.assertEqual( m.atoms, [ a ] )
self.assertEqual( len( m.atom_for ), 1 )
self.assertTrue( a in m.atom_for.values() )
self.assertEqual( len( m.descriptor_for ), 1 )
self.assertTrue( a in m.descriptor_for )
self.assertEqual( len( list( m.graph.vertices() ) ), 1 )
self.assertEqual( list( m.graph.edges() ), [] )
self.assertEqual( m.distances_from( atom = a ), { a: 0 } )
self.assertEqual( m.connected_segments(), [ [ a ] ] )
def test_2(self):
m = topology.Compound.create()
a1 = topology.Atom()
a2 = topology.Atom()
m.add_atom( atom = a1 )
m.add_atom( atom = a2 )
self.assertEqual( set( m.atoms ), set( [ a1, a2 ] ) )
self.assertEqual( len( m.atom_for ), 2 )
self.assertTrue( a1 in m.atom_for.values() )
self.assertTrue( a2 in m.atom_for.values() )
self.assertEqual( len( m.descriptor_for ), 2 )
self.assertTrue( a1 in m.descriptor_for )
self.assertTrue( a2 in m.descriptor_for )
self.assertEqual( len( list( m.graph.vertices() ) ), 2 )
self.assertEqual( len( list( m.graph.edges() ) ), 0 )
self.assertEqual( m.distances_from( atom = a1 ), { a1: 0, a2: None } )
self.assertEqual( m.distances_from( atom = a2 ), { a2: 0, a1: None } )
self.assertEqual(
set( frozenset( s ) for s in m.connected_segments() ),
set( [ frozenset( [ a1 ] ), frozenset( [ a2 ] ) ] ),
)
m.add_bond( left = a1, right = a2 )
self.assertEqual( len( list( m.graph.vertices() ) ), 2 )
self.assertEqual( len( list( m.graph.edges() ) ), 1 )
self.assertEqual( m.distances_from( atom = a1 ), { a1: 0, a2: 1 } )
self.assertEqual( m.distances_from( atom = a2 ), { a2: 0, a1: 1 } )
self.assertEqual(
set( frozenset( s ) for s in m.connected_segments() ),
set( [ frozenset( [ a1, a2 ] ) ] ),
)
ss1 = m.subset( atoms = [ a1 ] )
self.assertEqual( len( ss1.atom_for ), 1 )
self.assertTrue( a1 in ss1.atom_for.values() )
self.assertEqual( len( ss1.descriptor_for ), 1 )
self.assertTrue( a1 in ss1.descriptor_for )
self.assertEqual( len( list( ss1.graph.vertices() ) ), 1 )
self.assertEqual( len( list( ss1.graph.edges() ) ), 0 )
ss2 = m.subset( atoms = [ a2 ] )
self.assertEqual( len( ss2.atom_for ), 1 )
self.assertTrue( a2 in ss2.atom_for.values() )
self.assertEqual( len( ss2.descriptor_for ), 1 )
self.assertTrue( a2 in ss2.descriptor_for )
self.assertEqual( len( list( ss2.graph.vertices() ) ), 1 )
self.assertEqual( len( list( ss2.graph.edges() ) ), 0 )
def test_3(self):
atoms = [
topology.Atom( name = "N", element = "N", xyz = ( 11.498, 10.510, 10.231 ) ),
topology.Atom( name = "CA", element = "C", xyz = ( 12.730, 11.073, 10.769 ) ),
topology.Atom( name = "C", element = "C", xyz = ( 13.674, 9.966, 11.221 ) ),
topology.Atom( name = "O", element = "O", xyz = ( 13.739, 8.902, 10.605 ) ),
topology.Atom( name = "CB", element = "C", xyz = ( 12.421, 12.004, 11.944 ) ),
topology.Atom( name = "CG", element = "C", xyz = ( 11.478, 13.179, 11.661 ) ),
topology.Atom( name = "CD1", element = "C", xyz = ( 11.043, 13.834, 12.963 ) ),
topology.Atom( name = "CD2", element = "C", xyz = ( 12.126, 14.201, 10.736 ) ),
]
compound = topology.Compound.from_structure( atoms = atoms, tolerance = 0.1 )
self.assertEqual(
set( frozenset( [ l.name, r.name ] ) for ( l, r ) in compound.bonds ),
set(
[ frozenset( [ "N", "CA" ] ), frozenset( [ "CA", "C" ] ),
frozenset( [ "C", "O" ] ), frozenset( [ "CA", "CB" ] ),
frozenset( [ "CB", "CG" ] ), frozenset( [ "CG", "CD1" ] ),
frozenset( [ "CG", "CD2" ] ),
]
)
)
class TestMcGregorMatch(unittest.TestCase):
def test_asn_leu(self):
l_ca = topology.Atom( label = "CA" )
l_cb = topology.Atom( label = "C" )
l_cg = topology.Atom( label = "C" )
l_cd1 = topology.Atom( label = "C" )
l_cd2 = topology.Atom( label = "C" )
leu = topology.Molecule()
leu.add( atom = l_ca, xyz = ( -1.0085, -0.590773, 0.814318 ) )
leu.add( atom = l_cb, xyz = ( 0.0275, -0.557773, -0.314682 ) )
leu.add( atom = l_cg, xyz = ( 1.2335, 0.374227, -0.138682 ) )
leu.add( atom = l_cd1, xyz = ( 2.3065, 0.046227, -1.16768 ) )
leu.add( atom = l_cd2, xyz = ( 0.8395, 1.84323, -0.230682 ) )
a_ca = topology.Atom( label = "CA" )
a_cb = topology.Atom( label = "C" )
a_cg = topology.Atom( label = "C" )
a_od1 = topology.Atom( label = "C" )
a_nd2 = topology.Atom( label = "C" )
asn = topology.Molecule()
asn.add( atom = a_ca, xyz = ( -1.03327, -0.544348, 0.860946 ) )
asn.add( atom = a_cb, xyz = ( 0.10486, -0.548357, -0.164901 ) )
asn.add( atom = a_cg, xyz = ( 0.990984, 0.682823, -0.070521 ) )
asn.add( atom = a_od1, xyz = ( 1.39496, 1.24684, -1.08724 ) )
asn.add( atom = a_nd2, xyz = ( 1.29745, 1.10599, 1.15228 ) )
res = topology.McGregorMatch(
molecule1 = leu,
molecule2 = asn,
is_valid = lambda match: any( m.label == "CA" for m in match ),
vertex_equality = lambda l, r: l.label == r.label,
edge_equality = lambda l, r: abs( l - r ) < 0.1
)
self.assertEqual( res.length(), 3 )
mapping = res.remapped()
self.assertTrue( ( l_ca, a_ca ) in mapping )
self.assertTrue( ( l_cb, a_cb ) in mapping )
self.assertTrue( ( l_cg, a_cg ) in mapping )
self.assertTrue( ( l_cd1, a_od1 ) not in mapping )
class TestRascalMatch(unittest.TestCase):
def test_asn_leu(self):
l_ca = topology.Atom( label = "CA" )
l_cb = topology.Atom( label = "C" )
l_cg = topology.Atom( label = "C" )
l_cd1 = topology.Atom( label = "C" )
l_cd2 = topology.Atom( label = "C" )
leu = topology.Molecule()
leu.add( atom = l_ca, xyz = ( -1.0085, -0.590773, 0.814318 ) )
leu.add( atom = l_cb, xyz = ( 0.0275, -0.557773, -0.314682 ) )
leu.add( atom = l_cg, xyz = ( 1.2335, 0.374227, -0.138682 ) )
leu.add( atom = l_cd1, xyz = ( 2.3065, 0.046227, -1.16768 ) )
leu.add( atom = l_cd2, xyz = ( 0.8395, 1.84323, -0.230682 ) )
a_ca = topology.Atom( label = "CA" )
a_cb = topology.Atom( label = "C" )
a_cg = topology.Atom( label = "C" )
a_od1 = topology.Atom( label = "C" )
a_nd2 = topology.Atom( label = "C" )
asn = topology.Molecule()
asn.add( atom = a_ca, xyz = ( -1.03327, -0.544348, 0.860946 ) )
asn.add( atom = a_cb, xyz = ( 0.10486, -0.548357, -0.164901 ) )
asn.add( atom = a_cg, xyz = ( 0.990984, 0.682823, -0.070521 ) )
asn.add( atom = a_od1, xyz = ( 1.39496, 1.24684, -1.08724 ) )
asn.add( atom = a_nd2, xyz = ( 1.29745, 1.10599, 1.15228 ) )
m = topology.RascalMatch(
molecule1 = leu,
molecule2 = asn,
vertex_equality = lambda l, r: l.label == r.label,
edge_equality = lambda l, r: abs( l - r ) <= 0.1,
)
self.assertEqual( m.count(), 1 )
self.assertEqual( m.length(), 3 )
mapping = m.remapped()[0]
self.assertEqual( len( mapping ), 3 )
self.assertTrue( ( l_ca, a_ca ) in mapping )
self.assertTrue( ( l_cb, a_cb ) in mapping )
self.assertTrue( ( l_cg, a_cg ) in mapping )
self.assertTrue( ( l_cd1, a_od1 ) not in mapping )
class TestGreedyMatch(unittest.TestCase):
def test_asn_leu(self):
l_ca = topology.Atom( label = "CA" )
l_cb = topology.Atom( label = "C" )
l_cg = topology.Atom( label = "C" )
l_cd1 = topology.Atom( label = "C" )
l_cd2 = topology.Atom( label = "C" )
leu = topology.Molecule()
leu.add( atom = l_ca, xyz = ( -1.0085, -0.590773, 0.814318 ) )
leu.add( atom = l_cb, xyz = ( 0.0275, -0.557773, -0.314682 ) )
leu.add( atom = l_cg, xyz = ( 1.2335, 0.374227, -0.138682 ) )
leu.add( atom = l_cd1, xyz = ( 2.3065, 0.046227, -1.16768 ) )
leu.add( atom = l_cd2, xyz = ( 0.8395, 1.84323, -0.230682 ) )
a_ca = topology.Atom( label = "CA" )
a_cb = topology.Atom( label = "C" )
a_cg = topology.Atom( label = "C" )
a_od1 = topology.Atom( label = "C" )
a_nd2 = topology.Atom( label = "C" )
asn = topology.Molecule()
asn.add( atom = a_ca, xyz = ( -1.03327, -0.544348, 0.860946 ) )
asn.add( atom = a_cb, xyz = ( 0.10486, -0.548357, -0.164901 ) )
asn.add( atom = a_cg, xyz = ( 0.990984, 0.682823, -0.070521 ) )
asn.add( atom = a_od1, xyz = ( 1.39496, 1.24684, -1.08724 ) )
asn.add( atom = a_nd2, xyz = ( 1.29745, 1.10599, 1.15228 ) )
m = topology.GreedyMatch(
molecule1 = leu,
molecule2 = asn,
vertex_equality = lambda l, r: l.label == r.label,
edge_equality = lambda l, r: abs( l - r ) <= 0.1,
)
self.assertEqual( m.count(), 1 )
self.assertEqual( m.length(), 3 )
mapping = m.remapped()[0]
self.assertEqual( len( mapping ), 3 )
self.assertTrue( ( l_ca, a_ca ) in mapping )
self.assertTrue( ( l_cb, a_cb ) in mapping )
self.assertTrue( ( l_cg, a_cg ) in mapping )
self.assertTrue( ( l_cd1, a_od1 ) not in mapping )
suite_atom = unittest.TestLoader().loadTestsFromTestCase(
TestAtom
)
suite_molecule = unittest.TestLoader().loadTestsFromTestCase(
TestMolecule
)
suite_compound = unittest.TestLoader().loadTestsFromTestCase(
TestCompound
)
suite_mcgregor_match = unittest.TestLoader().loadTestsFromTestCase(
TestMcGregorMatch
)
suite_rascal_match= unittest.TestLoader().loadTestsFromTestCase(
TestRascalMatch
)
suite_greedy_match= unittest.TestLoader().loadTestsFromTestCase(
TestGreedyMatch
)
alltests = unittest.TestSuite(
[
suite_atom,
suite_molecule,
suite_compound,
suite_mcgregor_match,
suite_rascal_match,
suite_greedy_match,
]
)
def load_tests(loader, tests, pattern):
return alltests
if __name__ == "__main__":
unittest.TextTestRunner( verbosity = 2 ).run( alltests )
|
tests/schema/get_spec_for_prop_test.py
|
nickgaya/bravado-core
| 122 |
93890
|
<reponame>nickgaya/bravado-core<filename>tests/schema/get_spec_for_prop_test.py
# -*- coding: utf-8 -*-
import pytest
from bravado_core.exception import SwaggerMappingError
from bravado_core.schema import get_spec_for_prop
from bravado_core.spec import Spec
@pytest.fixture
def address_spec():
return {
'type': 'object',
'properties': {
'number': {
'type': 'number',
},
'street_name': {
'type': 'string',
},
'street_type': {
'type': 'string',
'enum': [
'Street',
'Avenue',
'Boulevard',
],
},
},
}
@pytest.fixture
def business_address_spec():
return {
'allOf': [
{
'$ref': '#/definitions/Address',
},
{
'type': 'object',
'properties': {
'company': {
'type': 'string',
},
},
},
],
}
@pytest.fixture
def address():
return {
'number': 1600,
'street_name': 'Pennsylvania',
'street_type': 'Avenue',
}
@pytest.fixture
def business_address():
return {
'company': 'White House',
'number': 1600,
'street_name': 'Pennsylvania',
'street_type': 'Avenue',
}
def test_declared_property(minimal_swagger_spec, address_spec, address):
expected_spec = address_spec['properties']['street_name']
result = get_spec_for_prop(
minimal_swagger_spec, address_spec, address, 'street_name',
)
assert expected_spec == result
def test_properties_and_additionalProperties_not_present(minimal_swagger_spec, address):
object_spec = {'type': 'object'}
result = get_spec_for_prop(
minimal_swagger_spec, object_spec, address, 'street_name',
)
assert result is None
def test_properties_not_present_and_additionalProperties_True(minimal_swagger_spec, address):
object_spec = {
'type': 'object',
'additionalProperties': True,
}
result = get_spec_for_prop(
minimal_swagger_spec, object_spec, address, 'street_name',
)
assert result is None
def test_properties_not_present_and_additionalProperties_False(minimal_swagger_spec, address):
object_spec = {
'type': 'object',
'additionalProperties': False,
}
result = get_spec_for_prop(
minimal_swagger_spec, object_spec, address, 'street_name',
)
assert result is None
def test_additionalProperties_with_spec(minimal_swagger_spec, address_spec, address):
address_spec['additionalProperties'] = {'type': 'string'}
expected_spec = {'type': 'string'}
# 'city' is not a declared property so it gets classified under
# additionalProperties
result = get_spec_for_prop(
minimal_swagger_spec, address_spec, address, 'city',
)
assert expected_spec == result
def test_additionalProperties_not_dict_like(minimal_swagger_spec, address_spec, address):
address_spec['additionalProperties'] = 'i am not a dict'
with pytest.raises(SwaggerMappingError) as excinfo:
get_spec_for_prop(minimal_swagger_spec, address_spec, address, 'city')
assert "Don't know what to do" in str(excinfo.value)
@pytest.mark.filterwarnings("ignore:.*with siblings that will be overwritten")
def test_get_spec_for_prop_with_x_nullable_and_reference(minimal_swagger_dict):
# TODO: remove is_nullable support once https://github.com/Yelp/bravado-core/issues/335 is addressed
minimal_swagger_dict['definitions'] = {
'referenced': {
'type': 'string',
},
'model': {
'type': 'object',
'properties': {
'property': {
'x-nullable': True,
'$ref': '#/definitions/referenced',
},
},
},
}
swagger_spec = Spec.from_dict(minimal_swagger_dict)
assert {'x-nullable': True, 'type': 'string'} == get_spec_for_prop(
swagger_spec,
minimal_swagger_dict['definitions']['model'],
None,
'property',
)
def test_composition(minimal_swagger_dict, address_spec, address, business_address_spec, business_address):
minimal_swagger_dict['definitions']['Address'] = address_spec
minimal_swagger_dict['definitions']['BusinessAddress'] = business_address_spec
swagger_spec = Spec.from_dict(minimal_swagger_dict)
expected_spec_1 = address_spec['properties']['street_name']
result_1 = get_spec_for_prop(
swagger_spec, address_spec, address, 'street_name',
)
assert expected_spec_1 == result_1
expected_spec_2 = business_address_spec['allOf'][1]['properties']['company']
result_2 = get_spec_for_prop(
swagger_spec, business_address_spec, business_address, 'company',
)
assert expected_spec_2 == result_2
def test_object_is_ref(minimal_swagger_dict, address_spec, address):
minimal_swagger_dict['definitions']['Address'] = address_spec
address_ref_spec = {'$ref': '#/definitions/Address'}
swagger_spec = Spec.from_dict(minimal_swagger_dict)
result = get_spec_for_prop(
swagger_spec, address_ref_spec, address, 'street_type',
)
assert address_spec['properties']['street_type'] == result
def test_property_is_ref(minimal_swagger_dict, address):
street_type_spec = {
'type': 'string',
'enum': ['Street', 'Avenue', 'Boulevard'],
}
address_spec = {
'type': 'object',
'properties': {
'street_type': {
'$ref': '#/definitions/StreetType',
},
},
}
minimal_swagger_dict['definitions']['StreetType'] = street_type_spec
swagger_spec = Spec.from_dict(minimal_swagger_dict)
result = get_spec_for_prop(
swagger_spec, address_spec, address, 'street_type',
)
assert street_type_spec == result
|
Lib/objc/CryptoTokenKit.py
|
snazari/Pyto
| 701 |
93909
|
<reponame>snazari/Pyto<gh_stars>100-1000
"""
Classes from the 'CryptoTokenKit' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
TKTokenWatcher = _Class("TKTokenWatcher")
TKTokenWatcherTokenInfo = _Class("TKTokenWatcherTokenInfo")
TKTokenWatcherProxy = _Class("TKTokenWatcherProxy")
TKTokenKeyAlgorithm = _Class("TKTokenKeyAlgorithm")
TKTokenKeyExchangeParameters = _Class("TKTokenKeyExchangeParameters")
TKApplicationProxy = _Class("TKApplicationProxy")
TKTokenConnection = _Class("TKTokenConnection")
TKTokenSessionConnection = _Class("TKTokenSessionConnection")
TKTokenAccessUserPromptInfo = _Class("TKTokenAccessUserPromptInfo")
TKSharedResource = _Class("TKSharedResource")
TKSharedResourceSlot = _Class("TKSharedResourceSlot")
TKTokenAccessRegistry = _Class("TKTokenAccessRegistry")
TKSmartCardATR = _Class("TKSmartCardATR")
TKSmartCardATRInterfaceGroup = _Class("TKSmartCardATRInterfaceGroup")
TKTokenAccessUserPromptNoop = _Class("TKTokenAccessUserPromptNoop")
TKTokenAccessUserPromptRemoteAlert = _Class("TKTokenAccessUserPromptRemoteAlert")
TKTokenKeychainContents = _Class("TKTokenKeychainContents")
TKTokenKeychainItem = _Class("TKTokenKeychainItem")
TKTokenKeychainKey = _Class("TKTokenKeychainKey")
TKTokenKeychainCertificate = _Class("TKTokenKeychainCertificate")
TKTokenAccessRequest = _Class("TKTokenAccessRequest")
TKClientTokenAdvertisedItem = _Class("TKClientTokenAdvertisedItem")
TKClientTokenSession = _Class("TKClientTokenSession")
TKClientTokenObject = _Class("TKClientTokenObject")
TKClientToken = _Class("TKClientToken")
TKTokenSession = _Class("TKTokenSession")
TKSmartCardTokenSession = _Class("TKSmartCardTokenSession")
TKTokenAuthOperation = _Class("TKTokenAuthOperation")
TKTokenPasswordAuthOperation = _Class("TKTokenPasswordAuthOperation")
TKTokenSmartCardPINAuthOperation = _Class("TKTokenSmartCardPINAuthOperation")
TKSmartCardSessionEngine = _Class("TKSmartCardSessionEngine")
TKSmartCardSlotEngine = _Class("TKSmartCardSlotEngine")
_TKSmartCardSlotReservation = _Class("_TKSmartCardSlotReservation")
TKPowerMonitor = _Class("TKPowerMonitor")
TKSmartCardSessionRequest = _Class("TKSmartCardSessionRequest")
TKTokenDriverConfiguration = _Class("TKTokenDriverConfiguration")
TKTokenConfiguration = _Class("TKTokenConfiguration")
TKTokenConfigurationTransaction = _Class("TKTokenConfigurationTransaction")
TKTokenConfigurationConnection = _Class("TKTokenConfigurationConnection")
TKTokenID = _Class("TKTokenID")
TKSmartCardUserInteraction = _Class("TKSmartCardUserInteraction")
TKSmartCardUserInteractionForStringEntry = _Class(
"TKSmartCardUserInteractionForStringEntry"
)
TKSmartCardUserInteractionForConfirmation = _Class(
"TKSmartCardUserInteractionForConfirmation"
)
TKSmartCardUserInteractionForPINOperation = _Class(
"TKSmartCardUserInteractionForPINOperation"
)
TKSmartCardUserInteractionForSecurePINChange = _Class(
"TKSmartCardUserInteractionForSecurePINChange"
)
TKSmartCardUserInteractionForSecurePINVerification = _Class(
"TKSmartCardUserInteractionForSecurePINVerification"
)
TKSmartCardSlotScreen = _Class("TKSmartCardSlotScreen")
TKSmartCardPINFormat = _Class("TKSmartCardPINFormat")
TKSmartCard = _Class("TKSmartCard")
TKSmartCardWithError = _Class("TKSmartCardWithError")
TKSmartCardSlot = _Class("TKSmartCardSlot")
TKSmartCardSlotProxy = _Class("TKSmartCardSlotProxy")
TKSmartCardSlotManager = _Class("TKSmartCardSlotManager")
TKTokenAccessDBBackedByUserDefaults = _Class("TKTokenAccessDBBackedByUserDefaults")
TKTLVRecord = _Class("TKTLVRecord")
TKCompactTLVRecord = _Class("TKCompactTLVRecord")
TKSimpleTLVRecord = _Class("TKSimpleTLVRecord")
TKBERTLVRecord = _Class("TKBERTLVRecord")
TKDataSource = _Class("TKDataSource")
TKTokenDriverRequest = _Class("TKTokenDriverRequest")
TKTokenService_Subsystem = _Class("TKTokenService_Subsystem")
TKTokenDriver = _Class("TKTokenDriver")
TKSmartCardTokenDriver = _Class("TKSmartCardTokenDriver")
TKToken = _Class("TKToken")
TKSmartCardToken = _Class("TKSmartCardToken")
TKTokenBaseContext = _Class("TKTokenBaseContext")
TKTokenDriverContext = _Class("TKTokenDriverContext")
|
dist/cygwin-prebuilts/make-cygwin-prebuilts.py
|
gnaggnoyil/wslbridge
| 376 |
93931
|
<reponame>gnaggnoyil/wslbridge
#!python3
import os
import sys
sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import util
import re
import shutil
import subprocess
import dllversion
from os.path import abspath
from subprocess import check_call
from util import glob_paths, rmpath, mkdirs, buildTimeStamp, projectDir, getGppVer
sys.platform == 'win32' or sys.exit('error: script only runs on Windows (no Cygwin/MSYS)')
shutil.which('7z') or sys.exit('error: 7z missing')
shutil.which('curl') or sys.exit('error: curl missing')
buildDir = os.path.join(projectDir, 'out\\build-cygwin')
artifactDir = os.path.join(projectDir, 'out\\artifact')
rmpath(buildDir)
mkdirs(buildDir)
mkdirs(artifactDir)
os.chdir(buildDir)
for setup, cygwin in (('setup-x86_64', 'cygwin64'), ('setup-x86', 'cygwin32')):
check_call(['curl', '-fL', '-O', 'https://cygwin.com/{}.exe'.format(setup)])
check_call([
abspath('{}.exe'.format(setup)),
'-l', abspath('{}-packages'.format(cygwin)),
'-P', 'gcc-g++,make',
'-s', 'http://mirrors.kernel.org/sourceware/cygwin',
'-R', abspath(cygwin),
'--no-admin', '--no-desktop', '--no-shortcuts', '--no-startmenu', '--quiet-mode',
])
check_call(['{}/bin/ash.exe'.format(cygwin), '/bin/rebaseall', '-v'])
cygVer = dllversion.fileVersion('{}/bin/cygwin1.dll'.format(cygwin))
gppVer = getGppVer('{}/bin/g++.exe'.format(cygwin))
filename = '{}\\{}-{}-dll{}-gcc{}.7z'.format(artifactDir, cygwin, buildTimeStamp, cygVer, gppVer)
rmpath(filename)
open(cygwin + '/tmp/.keep', 'wb').close()
check_call(['7z', 'a', '-mx=9', filename] + glob_paths([
cygwin + '/dev',
cygwin + '/etc/setup',
cygwin + '/tmp/.keep',
cygwin + '/bin',
cygwin + '/lib',
cygwin + '/usr/include',
cygwin + '/usr/*-pc-cygwin',
]))
|
servefiles/sendurls.py
|
Jerry-Shaw/FBI
| 1,684 |
93953
|
<gh_stars>1000+
#!/usr/bin/env python
# coding: utf-8 -*-
import socket
import struct
import sys
import time
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
if len(sys.argv) < 3:
print('Usage: ' + sys.argv[0] + ' <target ip> <url>...')
sys.exit(1)
target_ip = sys.argv[1]
file_list_payload = ''
for url in sys.argv[2:]:
parsed = urlparse(url);
if not parsed.scheme in ('http', 'https') or parsed.netloc == '':
print(url + ': Invalid URL')
sys.exit(1)
file_list_payload += url + '\n'
file_list_payloadBytes = file_list_payload.encode('ascii')
print('URLs:')
print(file_list_payload)
try:
print('Sending URL(s) to '+ target_ip + ' on port 5000...')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((target_ip, 5000))
sock.sendall(struct.pack('!L', len(file_list_payloadBytes)) + file_list_payloadBytes)
while len(sock.recv(1)) < 1:
time.sleep(0.05)
sock.close()
except Exception as e:
print('An error occurred: ' + str(e))
sys.exit(1)
|
examples/libtest/DatetimeModuleTest.py
|
takipsizad/pyjs
| 739 |
93976
|
# Testing datetime module
import sys
import UnitTest
import datetime
class DatetimeModuleTest(UnitTest.UnitTest):
def testDate(self):
d = datetime.date(2010, 4, 9)
self.assertEqual(d.year, 2010)
self.assertEqual(d.month, 4)
self.assertEqual(d.day, 9)
self.assertEqual(d.weekday(), 4)
def testTime(self):
t = datetime.time(9, 45, 11, 95000)
self.assertEqual(t.hour, 9)
self.assertEqual(t.minute, 45)
self.assertEqual(t.second, 11)
self.assertEqual(t.microsecond, 95000)
def testTimestamp(self):
d = datetime.date.fromtimestamp(1270804609)
self.assertEqual(str(d), '2010-04-09')
dt = str(datetime.datetime.fromtimestamp(1270804609.95))
# CET: 2010-04-09 11:16:49.950000
self.assertEqual(
(dt[:11], dt[16:]),
("2010-04-09 ", ":49.950000"),
)
def testCtime(self):
d = datetime.date(2010, 4, 9)
self.assertEqual(d.ctime(), "Fri Apr 9 00:00:00 2010")
dt = datetime.datetime(2010, 4, 9, 10, 57, 32)
self.assertEqual(dt.ctime(), "Fri Apr 9 10:57:32 2010")
def testIsoCalendar(self):
d = datetime.date(2010, 4, 9)
self.assertEqual(d.isocalendar(), (2010, 14, 5))
d1 = datetime.date(2007, 12, 31)
self.assertEqual(d1.isocalendar(), (2008, 1, 1))
def testIsoFormat(self):
d = datetime.date(2010, 4, 9)
self.assertEqual(d.isoformat(), '2010-04-09')
dt = datetime.datetime(2010, 4, 9, 10, 57, 32)
self.assertEqual(dt.isoformat(), '2010-04-09T10:57:32')
dt2 = datetime.datetime(2010, 4, 9, 10, 57, 32, 95000)
self.assertEqual(dt2.isoformat(), '2010-04-09T10:57:32.095000')
def testOrdinal(self):
d = datetime.date.fromordinal(1)
self.assertEqual(str(d), '0001-01-01')
d1 = datetime.date.fromordinal(733871)
self.assertEqual(str(d1), '2010-04-09')
self.assertEqual(d1.toordinal(), 733871)
def testReplace(self):
d = datetime.date(2010, 4, 9).replace(month=6, day=13)
self.assertEqual(str(d), '2010-06-13')
t = datetime.time(23, 59, 59).replace(minute=45, microsecond=95000)
self.assertEqual(str(t), '23:45:59.095000')
dt = datetime.datetime(2010, 4, 9, 10, 57, 32).replace(month=6, day=13, hour=12, minute=0, second=0)
self.assertEqual(str(dt), '2010-06-13 12:00:00')
def testTimetuple(self):
tm = datetime.date(2010, 4, 9).timetuple()
self.assertEqual(tm.tm_year, 2010)
self.assertEqual(tm.tm_mon, 4)
self.assertEqual(tm.tm_mday, 9)
self.assertEqual(tm.tm_hour, 0)
self.assertEqual(tm.tm_min, 0)
self.assertEqual(tm.tm_sec, 0)
self.assertEqual(tm.tm_wday, 4)
self.assertEqual(tm.tm_yday, 99)
def testStrftime(self):
d = datetime.date(2010, 4, 9)
self.assertEqual(d.strftime("%d/%m/%y"), "09/04/10")
def testStrptime(self):
d = datetime.datetime.strptime("010100 1234", "%d%m%y %H%M")
self.assertEqual(str(d), '2000-01-01 12:34:00')
def testComparision(self):
d1 = datetime.date(2010, 6, 8)
d2 = datetime.date(2010, 6, 8)
d3 = datetime.date(2010, 4, 9)
self.assertTrue(d1 == d2, "d1 and d2 differ")
self.assertTrue(d1 > d3, "d1 is not later than d3")
self.assertTrue(d3 < d1, "d3 is not earlier than d1")
def testOperations(self):
d1 = datetime.date(2010, 4, 9)
d2 = datetime.date(2010, 6, 13)
diff = d2 - d1
self.assertEqual(diff.days, 65)
self.assertEqual(str(d1 + diff), "2010-06-13")
self.assertEqual(str(d1 - diff), "2010-02-03")
if __name__ == '__main__':
from RunTests import RunTests
t = RunTests()
t.add(DatetimeModuleTest)
t.start_test()
|
pandas_vs_datatable/code/mutate.py
|
korur/Blog
| 108 |
93985
|
import os
import re
import json
import time
import numpy as np
import pandas as pd
from plotnine import *
# Config
PATH = os.getcwd()
path_n = re.split(pattern=r"/|\\", string=PATH)[1:]
if os.name == "posix":
path_n = "/" + os.path.join(*path_n)
else:
drive = PATH[0:3]
path_n = drive + os.path.join(*path_n)
RUNS = 100
def infer_column_cats(dir: "Path to working directoty.") -> tuple:
"""Helper function to identify dataset sizes based on file names."""
files = os.listdir(os.path.join(dir, "data"))
cats = set([re.match(pattern=".*_(.*).csv$", string=file).group(1) for file in files])
cols = set([re.match(pattern=".*_(.*)_.*.csv$", string=file).group(1) for file in files])
return cats, cols
def time_function(func: "Function call to be evaluted as str.") -> float:
"""Helper function to time data access."""
start = time.time()
exec(func)
return time.time() - start
def create_stats(measures: "List of function timings.",
col: "Current Column.", row: "Current Row",
scenario: "Current Scenario.") -> dict:
"""Helper function to create result dataset."""
return {"scenario": scenario,
"no_column": col,
"data_length": row,
"min": np.min(measures),
"max": np.max(measures),
"avg": np.mean(measures),
"q50": np.median(measures)}
scenarios = json.load(open(os.path.join(path_n, "output", "mutate.JSON")))
nrows, ncols = infer_column_cats(path_n)
timings, results = [], []
for col in ncols:
print(f"-Column: {col}--")
for row in nrows:
print(f"--Row: {row}")
data = pd.read_csv(os.path.join(path_n, "data", f"sim_data_{col}_{row}.csv"))
for i, scenario in enumerate(scenarios[col]["mutate"]):
print(f"---Scenario {i+1}: {scenario}---")
sel = re.search(pattern=r'([A-Z]{3})', string=scenario).group(1)
print(sel)
if sel == "INT":
func = f"temp['result'] = temp['{scenario}'] + 1"
elif sel == "DBL":
func = f"temp['result'] = temp['{scenario}'] * 2"
elif sel == "STR":
func = f"temp['result'] = temp['{scenario}'] + 'a'"
elif sel == "LGL":
func = f"temp['result'] = ~temp['{scenario}']"
for j in range(RUNS):
temp = data
timings.append(time_function(func=func))
temp = None
results.append(create_stats(measures=timings, col=col, row=row, scenario=sel))
print(results[-1])
timings = []
results_df = pd.DataFrame(results)
results_df[["data_length", "no_column"]] = results_df[["data_length", "no_column"]].apply(pd.to_numeric,
axis=1,
downcast="integer")
results_df.sort_values(["data_length", "no_column"])
results_df[["min", "max", "q50", "avg"]] = round(results_df[["min", "max", "q50", "avg"]] * 1000, 2)
# results_df["sel_col"] = results_df["scenario"].apply(lambda x: re.search(pattern="([13])", string=x).group(1))
# results_df["pos_col"] = results_df["scenario"].apply(lambda x: re.search(pattern="[13](.*)$", string=x).group(1))
results_df.to_csv(os.path.join(path_n, "output", "mutate_results_pandas.csv"), index=False)
|
test/test_durable.py
|
timgates42/paxos
| 420 |
93993
|
<gh_stars>100-1000
import sys
import os
import os.path
import hashlib
import struct
import tempfile
import shutil
import pickle
#from twisted.trial import unittest
import unittest
this_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append( os.path.dirname(this_dir) )
from paxos import durable
class DObj(object):
def __init__(self):
self.state = 'initial'
class DurableReadTester (unittest.TestCase):
def setUp(self):
tmpfs_dir = '/dev/shm' if os.path.exists('/dev/shm') else None
self.tdir = tempfile.mkdtemp(dir=tmpfs_dir)
self.fds = list()
def tearDown(self):
shutil.rmtree(self.tdir)
for fd in self.fds:
os.close(fd)
def newfd(self, data=None):
bin_flag = 0 if not hasattr(os, 'O_BINARY') else os.O_BINARY
fd = os.open( os.path.join(self.tdir, str(len(self.fds))), os.O_CREAT | os.O_RDWR | bin_flag)
self.fds.append( fd )
if data is not None:
os.write(fd, data)
return fd
def test_read_zero_length(self):
self.assertRaises(durable.FileTruncated, durable.read, self.newfd())
def test_read_header_too_small(self):
self.assertRaises(durable.FileTruncated, durable.read, self.newfd('\0'*31))
def test_read_no_pickle_data(self):
data = '\0'*24 + struct.pack('>Q', 5)
self.assertRaises(durable.FileTruncated, durable.read, self.newfd(data))
def test_read_bad_hash_mismatch(self):
data = '\0'*24 + struct.pack('>Q', 5) + 'x'*5
self.assertRaises(durable.HashMismatch, durable.read, self.newfd(data))
def test_read_ok(self):
pdata = 'x'*5
p = pickle.dumps(pdata, pickle.HIGHEST_PROTOCOL)
data = '\0'*8 + struct.pack('>Q', len(p)) + p
data = hashlib.md5(data).digest() + data
self.assertEqual( durable.read(self.newfd(data) ), (0, pdata) )
class DurableObjectHandlerTester (unittest.TestCase):
def setUp(self):
tmpfs_dir = '/dev/shm' if os.path.exists('/dev/shm') else None
self.o = DObj()
self.tdir = tempfile.mkdtemp(dir=tmpfs_dir)
self.doh = durable.DurableObjectHandler(self.tdir, 'id1')
self.dohs = [self.doh,]
def tearDown(self):
for doh in self.dohs:
doh.close()
shutil.rmtree(self.tdir)
def newdoh(self, obj_id=None):
if obj_id is None:
obj_id = 'id' + str(len(self.dohs))
doh = durable.DurableObjectHandler(self.tdir, obj_id)
self.dohs.append(doh)
return doh
def test_bad_directory(self):
self.assertRaises(Exception, durable.DurableObjectHandler, '/@#$!$^FOOBARBAZ', 'blah')
def test_no_save(self):
self.doh.close()
d = self.newdoh('id1')
self.assertEquals(d.recovered, None)
self.assertEquals(d.serial, 1)
def test_one_save(self):
self.doh.save(self.o)
self.doh.close()
d = self.newdoh('id1')
self.assertTrue( os.stat(self.doh.fn_a).st_size > 0 )
self.assertTrue( os.stat(self.doh.fn_b).st_size == 0 )
self.assertTrue( isinstance(d.recovered, DObj) )
self.assertEquals(d.recovered.state, 'initial')
def test_two_save(self):
self.doh.save(self.o)
self.o.state = 'second'
self.doh.save(self.o)
self.doh.close()
d = self.newdoh('id1')
self.assertTrue( os.stat(self.doh.fn_a).st_size > 0 )
self.assertTrue( os.stat(self.doh.fn_b).st_size > 0 )
self.assertTrue( isinstance(d.recovered, DObj) )
self.assertEquals(d.recovered.state, 'second')
def test_three_save(self):
self.doh.save(self.o)
self.o.state = 'second'
self.doh.save(self.o)
self.o.state = 'third'
self.doh.save(self.o)
self.doh.close()
d = self.newdoh('id1')
self.assertTrue( isinstance(d.recovered, DObj) )
self.assertEquals(d.recovered.state, 'third')
def test_new_object_corrupted(self):
self.test_two_save()
with open(self.doh.fn_b, 'wb') as f:
f.write('\0')
f.flush()
d = self.newdoh('id1')
self.assertTrue( isinstance(d.recovered, DObj) )
self.assertEquals(d.recovered.state, 'initial')
def test_old_object_corrupted(self):
self.test_two_save()
with open(self.doh.fn_a, 'wb') as f:
f.write('\0')
f.flush()
d = self.newdoh('id1')
self.assertTrue( isinstance(d.recovered, DObj) )
self.assertEquals(d.recovered.state, 'second')
def test_unrecoverable_corruption(self):
self.test_two_save()
with open(self.doh.fn_a, 'wb') as f:
f.write('\0')
f.flush()
with open(self.doh.fn_b, 'wb') as f:
f.write('\0')
f.flush()
def diehorribly():
self.newdoh('id1')
self.assertRaises(durable.UnrecoverableFailure, diehorribly)
|
Algo and DSA/LeetCode-Solutions-master/Python/time-based-key-value-store.py
|
Sourav692/FAANG-Interview-Preparation
| 3,269 |
93999
|
<filename>Algo and DSA/LeetCode-Solutions-master/Python/time-based-key-value-store.py<gh_stars>1000+
# Time: set: O(1)
# get: O(logn)
# Space: O(n)
import collections
import bisect
class TimeMap(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.lookup = collections.defaultdict(list)
def set(self, key, value, timestamp):
"""
:type key: str
:type value: str
:type timestamp: int
:rtype: None
"""
self.lookup[key].append((timestamp, value))
def get(self, key, timestamp):
"""
:type key: str
:type timestamp: int
:rtype: str
"""
A = self.lookup.get(key, None)
if A is None:
return ""
i = bisect.bisect_right(A, (timestamp+1, 0))
return A[i-1][1] if i else ""
# Your TimeMap object will be instantiated and called as such:
# obj = TimeMap()
# obj.set(key,value,timestamp)
# param_2 = obj.get(key,timestamp)
|
model_compiler/src/model_compiler/compilers/keras_model_file_to_tflite_model.py
|
yuanliya/Adlik
| 548 |
94050
|
# Copyright 2019 ZTE corporation. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import tensorflow as tf
from . import repository
from ..models.sources.keras_model_file import KerasModelFile
from ..models.targets.tflite_model import TfLiteModel
from .. import tflite_util
from .. import keras_util
@repository.REPOSITORY.register(source_type=KerasModelFile, target_type=TfLiteModel, config_type=tflite_util.Config)
def compile_source(source: KerasModelFile, config: tflite_util.Config) -> TfLiteModel:
if source.script_path:
custom_objects = keras_util.get_custom_objects(source.script_path)
else:
custom_objects = None
model = tf.keras.models.load_model(filepath=source.model_path, custom_objects=custom_objects, compile=False)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = tflite_util.get_tflite_model(converter, config)
return TfLiteModel(tflite_model, config.input_formats)
|
HelperTools/CAV_to_constraint_arr.py
|
kataya/arcade-expressions
| 119 |
94077
|
import arcpy
import pandas as pd
import os
fc = r"C:\\temp\\GasPipelineEnterpriseDataManagement\\Databases\\UPDM_UtilityNetwork.gdb\\UtilityNetwork\\PipelineLine"
field_group_name = 'Limit Material By Asset Type'
def view_cav(table, subtype_field):
index = ['fieldGroupName', 'subtype', 'isRetired', 'id']
data = {}
for cav in arcpy.da.ListContingentValues(table):
contingent_value = {k: getattr(cav, k, None) for k in index}
for field in cav.values:
contingent_value[field.name] = dict(CODED_VALUE=field.code,
RANGE=field.range,
ANY='|ANY|',
NULL='<NULL>')[field.type]
data.setdefault(cav.fieldGroupName, []).append(contingent_value)
return [pd.DataFrame(values).set_index(index).rename_axis(index={'subtype': subtype_field}).fillna('<NULL>') for
values in data.values()]
desc = arcpy.Describe(fc)
for df in view_cav(fc, desc.subtypeFieldName):
if field_group_name in df.index:
subtypes = set()
valid_combos = []
df = df.reset_index().drop(['fieldGroupName', 'id'], axis=1)
df = df[df['isRetired'] == False].drop(['isRetired'], axis=1)
for row in df.itertuples(index=False):
valid_combos.append("::".join(map(str, row)).replace('<NULL>', ''))
subtypes.add(str(row[0]))
subtypes = sorted(subtypes)
field_list = [f'$feature.{fld}' for fld in df.columns]
func = f'''
// Assigned To: {os.path.basename(fc)}
// Type: Constraint
// Name: {field_group_name}
// Description: Limit values combinations using the fields {', '.join(list(df.columns))}
// Subtypes: All
// Error: 5601
// Error Message: Incompatible types for {', '.join(list(df.columns))}
// Trigger: Insert, Update
// ************* User Variables *************
// This section has the functions and variables that need to be adjusted based on your implementation
var valid_asset_groups = [{', '.join(subtypes)}];
if (indexof(valid_asset_groups, $feature.{desc.subtypeFieldName}) == -1) {{
return true;
}}
var feature_fields = [{', '.join(field_list)}];
var valid_values = {valid_combos};
// ************* End User Variables Section *************
function splice(arr, start, end) {{
var new_arr = [];
var k = 0;
for (var i = start; i < end; i++) {{
new_arr[k++] = arr[i];
}}
return new_arr;
}}
function join_array(a, b) {{
var new_arr = [];
var k = 0;
for (var i in a) {{
new_arr[k++] = a[i];
}}
for (var i in b) {{
new_arr[k++] = b[i];
}}
return new_arr;
}}
var feature_values = [Concatenate(feature_fields, '::')];
var any_sent = '|ANY|';
var fld_count = Count(feature_fields);
for (var i = 0; i < fld_count; i++) {{
var start_arr = splice(feature_fields, 0, i)
start_arr[Count(start_arr)] = any_sent;
var end_arr = splice(feature_fields, i + 1, fld_count)
feature_values[i + 1] = Concatenate(join_array(start_arr, end_arr), '::')
}}
var match_found = false;
for (var i = 0; i < Count(feature_values); i++){{
if (IndexOf(valid_values, feature_values[i]) > -1) {{
match_found = true;
break;
}}
}}
if (match_found == false) {{
return {{"errorMessage": "The selected attributes for {', '.join(list(df.columns))} are not valid."}}
}}
return true;
'''
print(func)
break
|
mayan/apps/authentication/urls.py
|
eshbeata/open-paperless
| 2,743 |
94091
|
<filename>mayan/apps/authentication/urls.py<gh_stars>1000+
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import url
from django.contrib.auth.views import logout
from .views import (
login_view, password_change_done, password_change_view,
password_reset_complete_view, password_reset_confirm_view,
password_reset_done_view, password_reset_view
)
urlpatterns = [
url(r'^login/$', login_view, name='login_view'),
url(
r'^password/change/done/$', password_change_done,
name='password_change_done'
),
url(
r'^password/change/$', password_change_view,
name='password_change_view'
),
url(
r'^logout/$', logout, {'next_page': settings.LOGIN_REDIRECT_URL},
name='logout_view'
),
url(
r'^password/reset/$', password_reset_view, name='password_reset_view'
),
url(
r'^password/reset/confirm/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
password_reset_confirm_view, name='password_reset_confirm_view'
),
url(
r'^password/reset/complete/$', password_reset_complete_view,
name='password_reset_complete_view'
),
url(
r'^password/reset/done/$', password_reset_done_view,
name='password_reset_done_view'
),
]
|
lte/gateway/python/integ_tests/s1aptests/test_ipv4v6_secondary_pdn_spgw_initiated_ded_bearer.py
|
Aitend/magma
| 849 |
94094
|
"""
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import ipaddress
import time
import unittest
import s1ap_types
import s1ap_wrapper
from integ_tests.s1aptests.s1ap_utils import SpgwUtil
class TestIPv4v6SecondaryPdnSpgwInitiatedDedBearer(unittest.TestCase):
"""Test ipv4v6 secondary pdn with spgw initiated dedicated bearer"""
def setUp(self):
"""Initialize"""
self._s1ap_wrapper = s1ap_wrapper.TestWrapper()
self._spgw_util = SpgwUtil()
def tearDown(self):
"""Cleanup"""
self._s1ap_wrapper.cleanup()
def test_ipv4v6_secondary_pdn_spgw_initiated_ded_bearer(self):
"""Attach a single UE + add a secondary pdn with
IPv4v6 + trigger dedicated bearer from spgw + detach
"""
num_ue = 1
self._s1ap_wrapper.configUEDevice(num_ue)
req = self._s1ap_wrapper.ue_req
ue_id = req.ue_id
# APN of the secondary PDNs
ims_apn = {
"apn_name": "ims", # APN-name
"qci": 5, # qci
"priority": 15, # priority
"pre_cap": 0, # preemption-capability
"pre_vul": 0, # preemption-vulnerability
"mbr_ul": 200000000, # MBR UL
"mbr_dl": 100000000, # MBR DL
"pdn_type": 2, # PDN Type 0-IPv4,1-IPv6,2-IPv4v6
}
apn_list = [ims_apn]
self._s1ap_wrapper.configAPN(
"IMSI" + "".join([str(i) for i in req.imsi]), apn_list,
)
print(
"*********************** Running End to End attach for UE id ",
ue_id,
)
print("***** Sleeping for 5 seconds")
time.sleep(5)
# Attach
attach = self._s1ap_wrapper.s1_util.attach(
ue_id,
s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,
s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,
s1ap_types.ueAttachAccept_t,
)
addr = attach.esmInfo.pAddr.addrInfo
default_ip = ipaddress.ip_address(bytes(addr[:4]))
# Wait on EMM Information from MME
self._s1ap_wrapper._s1_util.receive_emm_info()
print("***** Sleeping for 5 seconds")
time.sleep(5)
apn = "ims"
# PDN Type 2 = IPv6, 3 = IPv4v6
pdn_type = 3
# Send PDN Connectivity Request
self._s1ap_wrapper.sendPdnConnectivityReq(
ue_id, apn, pdn_type=pdn_type,
)
# Receive PDN CONN RSP/Activate default EPS bearer context request
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_PDN_CONN_RSP_IND.value,
)
act_def_bearer_req = response.cast(s1ap_types.uePdnConRsp_t)
pdn_type = act_def_bearer_req.m.pdnInfo.pAddr.pdnType
addr = act_def_bearer_req.m.pdnInfo.pAddr.addrInfo
sec_ip_ipv4 = None
if pdn_type == 1:
sec_ip_ipv4 = ipaddress.ip_address(bytes(addr[:4]))
elif pdn_type == 3:
sec_ip_ipv4 = ipaddress.ip_address(bytes(addr[8:12]))
print(
"********************** Sending Activate default EPS bearer "
"context accept for APN-%s, UE id-%d" % (apn, ue_id),
)
print(
"********************** Added default bearer for apn-%s,"
" bearer id-%d, pdn type-%d"
% (apn, act_def_bearer_req.m.pdnInfo.epsBearerId, pdn_type),
)
# Receive Router Advertisement message
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_ROUTER_ADV_IND.value,
)
router_adv = response.cast(s1ap_types.ueRouterAdv_t)
print(
"******************* Received Router Advertisement for APN-%s"
" ,bearer id-%d" % (apn, router_adv.bearerId),
)
ipv6_addr = "".join([chr(i) for i in router_adv.ipv6Addr]).rstrip(
"\x00",
)
print("******* UE IPv6 address: ", ipv6_addr)
sec_ip_ipv6 = ipaddress.ip_address(ipv6_addr)
print("***** Sleeping for 5 seconds")
time.sleep(5)
# Add dedicated bearer
print("********************** Adding dedicated bearer to ims PDN")
# Create default ipv4v6 flow list
flow_list = self._spgw_util.create_default_ipv4v6_flows()
self._spgw_util.create_bearer(
"IMSI" + "".join([str(i) for i in req.imsi]),
act_def_bearer_req.m.pdnInfo.epsBearerId,
flow_list,
)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_ACT_DED_BER_REQ.value,
)
act_ded_ber_ctxt_req = response.cast(s1ap_types.UeActDedBearCtxtReq_t)
self._s1ap_wrapper.sendActDedicatedBearerAccept(
req.ue_id, act_ded_ber_ctxt_req.bearerId,
)
print(
"************* Added dedicated bearer",
act_ded_ber_ctxt_req.bearerId,
)
print("***** Sleeping for 10 seconds")
time.sleep(10)
# ipv4 default pdn + ipv4v6(ims) pdn + dedicated bearer for ims pdn
num_ul_flows = 3
# For ipv4v6 pdn, pass default_ip, sec_ip_ipv4 and sec_ip_ipv6
if pdn_type == 3:
dl_flow_rules = {
default_ip: [],
sec_ip_ipv4: [flow_list],
sec_ip_ipv6: [flow_list],
}
# For ipv6 pdn, pass default_ip, sec_ip_ipv6
if pdn_type == 2:
dl_flow_rules = {
default_ip: [],
sec_ip_ipv6: [flow_list],
}
# Verify if flow rules are created
self._s1ap_wrapper.s1_util.verify_flow_rules(
num_ul_flows, dl_flow_rules,
)
print(
"********************** Deleting dedicated bearer for IMSI",
"".join([str(i) for i in req.imsi]),
)
self._spgw_util.delete_bearer(
"IMSI" + "".join([str(i) for i in req.imsi]),
act_def_bearer_req.m.pdnInfo.epsBearerId,
act_ded_ber_ctxt_req.bearerId,
)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_DEACTIVATE_BER_REQ.value,
)
print("******************* Received deactivate eps bearer context")
deactv_bearer_req = response.cast(s1ap_types.UeDeActvBearCtxtReq_t)
self._s1ap_wrapper.sendDeactDedicatedBearerAccept(
req.ue_id, deactv_bearer_req.bearerId,
)
print("***** Sleeping for 5 seconds")
time.sleep(5)
# ipv4 default pdn + ipv4v6(ims) pdn
num_ul_flows = 2
# For ipv4v6 pdn, pass default_ip, sec_ip_ipv4 and sec_ip_ipv6
if pdn_type == 3:
dl_flow_rules = {
default_ip: [],
sec_ip_ipv4: [],
sec_ip_ipv6: [],
}
# For ipv6 pdn, pass default_ip, sec_ip_ipv6
if pdn_type == 2:
dl_flow_rules = {
default_ip: [],
sec_ip_ipv6: [],
}
# Verify if flow rules are created
self._s1ap_wrapper.s1_util.verify_flow_rules(
num_ul_flows, dl_flow_rules,
)
print(
"******************* Running UE detach (switch-off) for ",
"UE id ",
ue_id,
)
# Now detach the UE
self._s1ap_wrapper.s1_util.detach(
ue_id, s1ap_types.ueDetachType_t.UE_SWITCHOFF_DETACH.value, False,
)
if __name__ == "__main__":
unittest.main()
|
model/prd_evaluation.py
|
hengfei-wang/SCNeRF
| 272 |
94095
|
from reprojection import runSuperGlueSinglePair,image_pair_candidates, runSIFTSinglePair
from ray_dist_loss import preprocess_match, proj_ray_dist_loss_single
import torch
import numpy as np
import os
from random import random
import numpy as np
import torch
import torchvision.transforms as TF
import matplotlib.pyplot as plt
tol=1e-4
match_num = 4
run_unit_test = lambda args, kwargs, test_name: None if not args.debug else \
test_name(**kwargs)
def unit_test_matches(**kwargs):
msg = "Failed to pass the unit test named matches"
print("Starting Unit Test : matches")
dirname = "_unit_test_matches_result"
# Check whether argument is currently provided.
assert "args" in kwargs.keys(), msg
assert "result" in kwargs.keys(), msg
assert "img_i" in kwargs.keys(), msg
assert "img_j" in kwargs.keys(), msg
assert "img_i_idx" in kwargs.keys(), msg
assert "img_j_idx" in kwargs.keys(), msg
args= kwargs["args"]
result = kwargs["result"]
img_i, img_j = kwargs["img_i"], kwargs["img_j"]
img_i_idx, img_j_idx = kwargs["img_i_idx"], kwargs["img_j_idx"]
kps1, kps2 = result
W = img_i.shape[1]
# Draw matches and save them
assert hasattr(args, "datadir"), msg
scene_name = args.datadir.split("/")[-1]
scene_path = os.path.join(dirname, scene_name)
os.makedirs(scene_path, exist_ok=True)
img_name = "{}_{}.png".format(img_i_idx, img_j_idx)
img_path = os.path.join(scene_path, img_name)
img_cat = torch.cat([img_i, img_j], dim=1)
img_cat_pil = TF.ToPILImage()(img_cat.permute(2, 0, 1))
plt.imshow(img_cat_pil)
i_visualize = np.random.choice(range(len(kps1)), match_num)
for i in i_visualize:
kp1, kp2 = kps1[i].cpu().numpy(), kps2[i].cpu().numpy()
color = (random(), random(), random())
plt.plot([kp1[0], kp2[0]+W], [kp1[1], kp2[1]], c=color, lw=2)
plt.savefig(img_path)
plt.close()
def projected_ray_distance_evaluation(
images,
index_list,
args,
ray_fun,
ray_fun_gt,
H,
W,
mode,
matcher,
gt_intrinsic,
gt_extrinsic,
method,
device,
intrinsic=None,
extrinsic=None,
camera_model=None,
i_map=None,
):
prd_list = []
match_fun = runSuperGlueSinglePair if args.matcher == "superglue" else \
runSIFTSinglePair
extrinsic_gt_numpy = gt_extrinsic[index_list].cpu().numpy()
with torch.no_grad():
feasible_image_pairs = image_pair_candidates(
extrinsic_gt_numpy, args, index_list
)
for img_i in feasible_image_pairs.keys():
for img_j in feasible_image_pairs[img_i]:
if img_i >= img_j:
continue
result = match_fun(
matcher,
images[img_i],
images[img_j],
0,
args
)
kps0_list, kps1_list = preprocess_match(result)
if kps0_list is None and kps1_list is None:
continue
result = kps0_list, kps1_list
kwargs_unit_test = {
"args": args,
"result": result,
"img_i": images[img_i],
"img_j": images[img_j],
"img_i_idx": img_i,
"img_j_idx": img_j
}
run_unit_test(
args, kwargs_unit_test, unit_test_matches
)
if mode != "train":
# Acquiring correct matches using the ground truth camera info
# In the training mode, we don't use the ground truth information.
rays_i_gt = ray_fun_gt(
H=H, W=W,focal=gt_intrinsic[0][0],
extrinsic=gt_extrinsic[img_i], kps_list=kps0_list
)
rays_j_gt = ray_fun_gt(
H=H, W=W,focal=gt_intrinsic[0][0],
extrinsic=gt_extrinsic[img_j], kps_list=kps1_list
)
filter_idx = filter_matches_with_gt(
kps0_list=kps0_list,
kps1_list=kps1_list,
H=H,
W=W,
gt_intrinsic=gt_intrinsic,
gt_extrinsic=gt_extrinsic[[img_i, img_j]],
rays0=rays_i_gt,
rays1=rays_j_gt,
args=args,
device=device,
method=method
)
kps0_list = kps0_list[filter_idx]
kps1_list = kps1_list[filter_idx]
if camera_model is None:
# Evaluate with gt_extrinsic for val,test
# Evaluate with noisy_extrinsic for train
extrinsic_evaluate = gt_extrinsic if mode != "train" else \
extrinsic
rays_i = ray_fun(
H=H, W=W, focal=intrinsic[0][0],
extrinsic=extrinsic_evaluate[img_i], kps_list=kps0_list
)
rays_j = ray_fun(
H=H, W=W, focal=intrinsic[0][0],
extrinsic=extrinsic_evaluate[img_j], kps_list=kps1_list
)
projected_ray_dist, _ = proj_ray_dist_loss_single(
kps0_list=kps0_list, kps1_list=kps1_list, img_idx0=img_i,
img_idx1=img_j, rays0=rays_i, rays1=rays_j, mode=mode,
device=device, H=H, W=W, args=args,
intrinsic=gt_intrinsic, extrinsic=extrinsic_evaluate
)
else:
# In the train mode, we use the
extrinsic_evaluate = gt_extrinsic if mode != "train" else \
None
extrinsic_evaluate_i = gt_extrinsic[img_i] if mode != "train" \
else None
extrinsic_evaluate_j = gt_extrinsic[img_j] if mode != "train" \
else None
camera_idx_i = np.where(i_map == img_i)[0][0] \
if mode == "train" else None
camera_idx_j = np.where(i_map == img_j)[0][0] \
if mode == "train" else None
rays_i = ray_fun(
H=H, W=W, camera_model=camera_model,
extrinsic=extrinsic_evaluate_i, kps_list=kps0_list,
idx_in_camera_param=camera_idx_i
)
rays_j = ray_fun(
H=H, W=W, camera_model=camera_model,
extrinsic=extrinsic_evaluate_j, kps_list=kps1_list,
idx_in_camera_param=camera_idx_j
)
projected_ray_dist, _ = proj_ray_dist_loss_single(
kps0_list=kps0_list, kps1_list=kps1_list, img_idx0=img_i,
img_idx1=img_j, rays0=rays_i, rays1=rays_j, mode=mode,
device=device, H=H, W=W, args=args, i_map=i_map,
camera_model=camera_model, extrinsic=extrinsic_evaluate
)
if not torch.isnan(projected_ray_dist):
prd_list.append(projected_ray_dist.item())
prd_list = torch.tensor(prd_list)
return prd_list.mean()
# Since SuperGlue sometimes fail to acquire reliable matches,
# we filter matches using the ground truth information only when
# evaluating PRD on val/test.
def filter_matches_with_gt(
kps0_list,
kps1_list,
W,
H,
gt_intrinsic,
gt_extrinsic,
rays0,
rays1,
args,
method,
device,
eps=1e-6
):
assert method in ["NeRF", "NeRF++"]
assert kps0_list.dim() == 2 and kps1_list.dim() == 2
gt_intrinsic=gt_intrinsic.clone().detach()
# NeRF is using an opposite coordinate.
if method == "NeRF":
gt_intrinsic[0][0] = -gt_intrinsic[0][0]
rays0_o, rays0_d = rays0
rays1_o, rays1_d = rays1
rays0_o, rays0_d = rays0_o.unsqueeze(0), rays0_d.unsqueeze(0)
rays1_o, rays1_d = rays1_o.unsqueeze(0), rays1_d.unsqueeze(0)
gt_extrinsic_inv = torch.inverse(gt_extrinsic.cpu())
gt_extrinsic_inv = gt_extrinsic_inv.to(device)
rays0_d = rays0_d / (rays0_d.norm(p=2, dim=-1)[:, :, None] + eps)
rays1_d = rays1_d / (rays1_d.norm(p=2, dim=-1)[:, :, None] + eps)
rays0_o_world = torch.cat(
[
rays0_o,
torch.ones((rays0_o.shape[:2]), device=device)[:, :, None]
],
dim=-1
)[:, :, :3]
rays1_o_world = torch.cat(
[
rays1_o,
torch.ones((rays1_o.shape[:2]), device=device)[:, :, None]
],
dim=-1
)[:, :, :3]
rays0_d_world = rays0_d[:, :, :3]
rays1_d_world = rays1_d[:, :, :3]
r0_r1 = torch.einsum("ijk, ijk -> ij", rays0_d_world, rays1_d_world)
t0 = (
torch.einsum(
"ijk, ijk -> ij", rays0_d_world, rays0_o_world - rays1_o_world
) - r0_r1
* torch.einsum(
"ijk, ijk -> ij", rays1_d_world, rays0_o_world - rays1_o_world
)
) / (r0_r1 ** 2 - 1 + eps)
t1 = (
torch.einsum(
"ijk, ijk -> ij", rays1_d_world, rays1_o_world - rays0_o_world
) - r0_r1
* torch.einsum(
"ijk, ijk -> ij", rays0_d_world, rays1_o_world - rays0_o_world
)
) / (r0_r1 ** 2 - 1 + eps)
p0 = t0[:, :, None] * rays0_d_world + rays0_o_world
p1 = t1[:, :, None] * rays1_d_world + rays1_o_world
p0_4d = torch.cat(
[p0, torch.ones((p0.shape[:2]), device=device)[:, :, None]], dim=-1
)
p1_4d = torch.cat(
[p1, torch.ones((p1.shape[:2]), device=device)[:, :, None]], dim=-1
)
p0_proj_to_im1 = torch.einsum(
"ijk, ipk -> ijp", p0_4d, gt_extrinsic_inv[1:]
)
p1_proj_to_im0 = torch.einsum(
"ijk, ipk -> ijp", p1_4d, gt_extrinsic_inv[:-1]
)
p0_norm_im1 = torch.einsum("ijk, pk -> ijp", p0_proj_to_im1, gt_intrinsic)
p1_norm_im0 = torch.einsum("ijk, pk -> ijp", p1_proj_to_im0, gt_intrinsic)
p0_norm_im1_2d = p0_norm_im1[:, :, :2] / (p0_norm_im1[:, :, 2, None] + eps)
p1_norm_im0_2d = p1_norm_im0[:, :, :2] / (p1_norm_im0[:, :, 2, None] + eps)
# Chirality check: remove rays behind cameras
# First, flatten the correspondences
# Find indices of valid rays
valid_t0 = (t0 > 0).flatten()
valid_t1 = (t1 > 0).flatten()
valid = torch.logical_and(valid_t0, valid_t1)
# Second, select losses that are valid
# When using NeRF++
loss0_list = ((p1_norm_im0_2d - kps0_list) ** 2).sum(-1).flatten()
loss1_list = ((p0_norm_im1_2d - kps1_list) ** 2).sum(-1).flatten()
# Remove cloned tensor after the computation
del gt_intrinsic
return torch.logical_and(
torch.logical_and(loss0_list < 1.0, loss1_list < 1.0), valid
)
|
robomimic/scripts/get_dataset_info.py
|
akolobov/robomimic
| 107 |
94097
|
<filename>robomimic/scripts/get_dataset_info.py<gh_stars>100-1000
"""
Helper script to report dataset information. By default, will print trajectory length statistics,
the maximum and minimum action element in the dataset, filter keys present, environment
metadata, and the structure of the first demonstration. If --verbose is passed, it will
report the exact demo keys under each filter key, and the structure of all demonstrations
(not just the first one).
Args:
dataset (str): path to hdf5 dataset
filter_key (str): if provided, report statistics on the subset of trajectories
in the file that correspond to this filter key
verbose (bool): if flag is provided, print more details, like the structure of all
demonstrations (not just the first one)
Example usage:
# run script on example hdf5 packaged with repository
python get_dataset_info.py --dataset ../../tests/assets/test.hdf5
# run script only on validation data
python get_dataset_info.py --dataset ../../tests/assets/test.hdf5 --filter_key valid
"""
import h5py
import json
import argparse
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset",
type=str,
help="path to hdf5 dataset",
)
parser.add_argument(
"--filter_key",
type=str,
default=None,
help="(optional) if provided, report statistics on the subset of trajectories \
in the file that correspond to this filter key",
)
parser.add_argument(
"--verbose",
action='store_true',
help="verbose output",
)
args = parser.parse_args()
# extract demonstration list from file
filter_key = args.filter_key
all_filter_keys = None
f = h5py.File(args.dataset, "r")
if filter_key is not None:
# use the demonstrations from the filter key instead
print("NOTE: using filter key {}".format(filter_key))
demos = sorted([elem.decode("utf-8") for elem in np.array(f["mask/{}".format(filter_key)])])
else:
# use all demonstrations
demos = sorted(list(f["data"].keys()))
# extract filter key information
if "mask" in f:
all_filter_keys = {}
for fk in f["mask"]:
fk_demos = sorted([elem.decode("utf-8") for elem in np.array(f["mask/{}".format(fk)])])
all_filter_keys[fk] = fk_demos
# put demonstration list in increasing episode order
inds = np.argsort([int(elem[5:]) for elem in demos])
demos = [demos[i] for i in inds]
# extract length of each trajectory in the file
traj_lengths = []
action_min = np.inf
action_max = -np.inf
for ep in demos:
traj_lengths.append(f["data/{}/actions".format(ep)].shape[0])
action_min = min(action_min, np.min(f["data/{}/actions".format(ep)][()]))
action_max = max(action_max, np.max(f["data/{}/actions".format(ep)][()]))
traj_lengths = np.array(traj_lengths)
# report statistics on the data
print("")
print("total transitions: {}".format(np.sum(traj_lengths)))
print("total trajectories: {}".format(traj_lengths.shape[0]))
print("traj length mean: {}".format(np.mean(traj_lengths)))
print("traj length std: {}".format(np.std(traj_lengths)))
print("traj length min: {}".format(np.min(traj_lengths)))
print("traj length max: {}".format(np.max(traj_lengths)))
print("action min: {}".format(action_min))
print("action max: {}".format(action_max))
print("")
print("==== Filter Keys ====")
if all_filter_keys is not None:
for fk in all_filter_keys:
print("filter key {} with {} demos".format(fk, len(all_filter_keys[fk])))
else:
print("no filter keys")
print("")
if args.verbose:
if all_filter_keys is not None:
print("==== Filter Key Contents ====")
for fk in all_filter_keys:
print("filter_key {} with {} demos: {}".format(fk, len(all_filter_keys[fk]), all_filter_keys[fk]))
print("")
env_meta = json.loads(f["data"].attrs["env_args"])
print("==== Env Meta ====")
print(json.dumps(env_meta, indent=4))
print("")
print("==== Dataset Structure ====")
for ep in demos:
print("episode {} with {} transitions".format(ep, f["data/{}".format(ep)].attrs["num_samples"]))
for k in f["data/{}".format(ep)]:
if k in ["obs", "next_obs"]:
print(" key: {}".format(k))
for obs_k in f["data/{}/{}".format(ep, k)]:
shape = f["data/{}/{}/{}".format(ep, k, obs_k)].shape
print(" observation key {} with shape {}".format(obs_k, shape))
elif isinstance(f["data/{}/{}".format(ep, k)], h5py.Dataset):
key_shape = f["data/{}/{}".format(ep, k)].shape
print(" key: {} with shape {}".format(k, key_shape))
if not args.verbose:
break
f.close()
# maybe display error message
print("")
if (action_min < -1.) or (action_max > 1.):
raise Exception("Dataset should have actions in [-1., 1.] but got bounds [{}, {}]".format(action_min, action_max))
|
CPAC/nuisance/__init__.py
|
gkiar/C-PAC
| 125 |
94102
|
from .utils import (
find_offending_time_points,
temporal_variance_mask,
generate_summarize_tissue_mask,
NuisanceRegressor
)
from .nuisance import (
create_regressor_workflow,
create_nuisance_regression_workflow,
filtering_bold_and_regressors
)
from .bandpass import (
bandpass_voxels
)
from .utils.compcor import (
cosine_filter
)
__all__ = [
'create_regressor_workflow',
'create_nuisance_regression_workflow',
'filtering_bold_and_regressors',
'find_offending_time_points',
'temporal_variance_mask',
'generate_summarize_tissue_mask',
'bandpass_voxels',
'cosine_filter'
]
|
sqlalchemy_utils/types/encrypted/padding.py
|
susnux/sqlalchemy-utils
| 879 |
94113
|
import six
class InvalidPaddingError(Exception):
pass
class Padding(object):
"""Base class for padding and unpadding."""
def __init__(self, block_size):
self.block_size = block_size
def pad(self, value):
raise NotImplementedError('Subclasses must implement this!')
def unpad(self, value):
raise NotImplementedError('Subclasses must implement this!')
class PKCS5Padding(Padding):
"""Provide PKCS5 padding and unpadding."""
def pad(self, value):
if not isinstance(value, six.binary_type):
value = value.encode()
padding_length = (self.block_size - len(value) % self.block_size)
padding_sequence = padding_length * six.b(chr(padding_length))
value_with_padding = value + padding_sequence
return value_with_padding
def unpad(self, value):
# Perform some input validations.
# In case of error, we throw a generic InvalidPaddingError()
if not value or len(value) < self.block_size:
# PKCS5 padded output will always be at least 1 block size
raise InvalidPaddingError()
if len(value) % self.block_size != 0:
# PKCS5 padded output will be a multiple of the block size
raise InvalidPaddingError()
if isinstance(value, six.binary_type):
padding_length = value[-1]
if isinstance(value, six.string_types):
padding_length = ord(value[-1])
if padding_length == 0 or padding_length > self.block_size:
raise InvalidPaddingError()
def convert_byte_or_char_to_number(x):
return ord(x) if isinstance(x, six.string_types) else x
if any([padding_length != convert_byte_or_char_to_number(x)
for x in value[-padding_length:]]):
raise InvalidPaddingError()
value_without_padding = value[0:-padding_length]
return value_without_padding
class OneAndZeroesPadding(Padding):
"""Provide the one and zeroes padding and unpadding.
This mechanism pads with 0x80 followed by zero bytes.
For unpadding it strips off all trailing zero bytes and the 0x80 byte.
"""
BYTE_80 = 0x80
BYTE_00 = 0x00
def pad(self, value):
if not isinstance(value, six.binary_type):
value = value.encode()
padding_length = (self.block_size - len(value) % self.block_size)
one_part_bytes = six.b(chr(self.BYTE_80))
zeroes_part_bytes = (padding_length - 1) * six.b(chr(self.BYTE_00))
padding_sequence = one_part_bytes + zeroes_part_bytes
value_with_padding = value + padding_sequence
return value_with_padding
def unpad(self, value):
value_without_padding = value.rstrip(six.b(chr(self.BYTE_00)))
value_without_padding = value_without_padding.rstrip(
six.b(chr(self.BYTE_80)))
return value_without_padding
class ZeroesPadding(Padding):
"""Provide zeroes padding and unpadding.
This mechanism pads with 0x00 except the last byte equals
to the padding length. For unpadding it reads the last byte
and strips off that many bytes.
"""
BYTE_00 = 0x00
def pad(self, value):
if not isinstance(value, six.binary_type):
value = value.encode()
padding_length = (self.block_size - len(value) % self.block_size)
zeroes_part_bytes = (padding_length - 1) * six.b(chr(self.BYTE_00))
last_part_bytes = six.b(chr(padding_length))
padding_sequence = zeroes_part_bytes + last_part_bytes
value_with_padding = value + padding_sequence
return value_with_padding
def unpad(self, value):
if isinstance(value, six.binary_type):
padding_length = value[-1]
if isinstance(value, six.string_types):
padding_length = ord(value[-1])
value_without_padding = value[0:-padding_length]
return value_without_padding
class NaivePadding(Padding):
"""Naive padding and unpadding using '*'.
The class is provided only for backwards compatibility.
"""
CHARACTER = six.b('*')
def pad(self, value):
num_of_bytes = (self.block_size - len(value) % self.block_size)
value_with_padding = value + num_of_bytes * self.CHARACTER
return value_with_padding
def unpad(self, value):
value_without_padding = value.rstrip(self.CHARACTER)
return value_without_padding
PADDING_MECHANISM = {
'pkcs5': PKCS5Padding,
'oneandzeroes': OneAndZeroesPadding,
'zeroes': ZeroesPadding,
'naive': NaivePadding
}
|
PWGJE/EMCALJetTasks/Tracks/analysis/util/PtReachCalculation.py
|
maroozm/AliPhysics
| 114 |
94118
|
#**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
from PWGJE.EMCALJetTasks.Tracks.analysis.base.SpectrumFitter import MinBiasFitter, TriggeredSpectrumFitter
from scipy.optimize import fsolve
class PtReachCalculator(object):
"""
classdocs
"""
def __init__(self, name, data, isMinBias, limit):
'''
Constructor
'''
self.__fitter = None
if isMinBias:
self.__fitter = MinBiasFitter(name, data)
else:
self.__fitter = TriggeredSpectrumFitter(name, data)
self.__limit = limit
def GetPtReach(self, numberOfEvents):
"""
Get the Pt reach for a given number of events
"""
model = lambda p : numberOfEvents * self.__fitter.GetParameterisedValueAt(p) - self.__limit
initialGuess = 10.
result = fsolve(model, initialGuess)
return result
def GetPtReachForIntegral(self, numberOfEvents):
"""
Get the Pt reach for a given number of events using integrated yield above
"""
model = lambda p : numberOfEvents * self.__fitter.GetNormalisedIntegralAbove(p) - self.__limit
initialGuess = 10.
result = fsolve(model, initialGuess)
return result
|
lib/gcn/sparse/torch_vertex.py
|
ChenFengYe/relightable-nr
| 105 |
94146
|
<reponame>ChenFengYe/relightable-nr
import torch
from torch import nn
import torch_geometric as tg
from .torch_nn import MLP
from .torch_edge import DilatedKnnGraph
class MRConv(nn.Module):
"""
Max-Relative Graph Convolution (Paper: https://arxiv.org/abs/1904.03751)
"""
def __init__(self, in_channels, out_channels, act_type='relu', norm_type=None, bias=True, aggr='max'):
super(MRConv, self).__init__()
self.nn = MLP([in_channels*2, out_channels], act_type, norm_type, bias)
self.aggr = aggr
def forward(self, x, edge_index):
""""""
x_j = tg.utils.scatter_(self.aggr, torch.index_select(x, 0, edge_index[0]) - torch.index_select(x, 0, edge_index[1]), edge_index[1])
return self.nn(torch.cat([x, x_j], dim=1))
class EdgConv(tg.nn.EdgeConv):
"""
Edge convolution layer (with activation, batch normalization)
"""
def __init__(self, in_channels, out_channels, act_type='relu', norm_type=None, bias=True, aggr='max'):
super(EdgConv, self).__init__(MLP([in_channels*2, out_channels], act_type, norm_type, bias), aggr)
def forward(self, x, edge_index):
return super(EdgConv, self).forward(x, edge_index)
class GraphConv(nn.Module):
"""
Static graph convolution layer
"""
def __init__(self, in_channels, out_channels, conv_type='edge',
act_type='relu', norm_type=None, bias=True):
super(GraphConv, self).__init__()
if conv_type == 'edge':
self.gconv = EdgConv(in_channels, out_channels, act_type, norm_type, bias)
elif conv_type == 'mr':
self.gconv = MRConv(in_channels, out_channels, act_type, norm_type, bias)
def forward(self, x, edge_index):
return self.gconv(x, edge_index)
class DynConv(GraphConv):
"""
Dynamic graph convolution layer
"""
def __init__(self, in_channels, out_channels, kernel_size=9, dilation=1, conv_type='edge', act_type='relu',
norm_type=None, bias=True, stochastic=False, epsilon=1.0, knn_type='matrix'):
super(DynConv, self).__init__(in_channels, out_channels, conv_type, act_type, norm_type, bias)
self.k = kernel_size
self.d = dilation
self.dilated_knn_graph = DilatedKnnGraph(kernel_size, dilation, stochastic, epsilon, knn_type)
def forward(self, x, batch=None):
edge_index = self.dilated_knn_graph(x, batch)
return super(DynConv, self).forward(x, edge_index)
class ResDynBlock(nn.Module):
"""
Residual Dynamic graph convolution block
:input: (x0, x1, x2, ... , xi), batch
:output:(x0, x1, x2, ... , xi ,xi+1) , batch
"""
def __init__(self, channels, kernel_size=9, dilation=1, conv_type='edge', act_type='relu', norm_type=None,
bias=True, stochastic=False, epsilon=1.0, knn_type='matrix'):
super(ResDynBlock, self).__init__()
self.body = DynConv(channels, channels, kernel_size, dilation, conv_type,
act_type, norm_type, bias, stochastic, epsilon, knn_type)
# input: (x0, x1, x2, ..., xi); (xi-1, xi), output is (xi, xi+1)
def forward(self, x, batch):
return self.body(x, batch) + x, batch
class DenseDynBlock(nn.Module):
"""
Dense Dynamic graph convolution block
"""
def __init__(self, channels, kernel_size=9, dilation=1, conv_type='edge', act_type='relu', norm_type=None,
bias=True, stochastic=False, epsilon=1.0, knn_type='matrix'):
super(DenseDynBlock, self).__init__()
self.body = DynConv(channels*2, channels, kernel_size, dilation, conv_type,
act_type, norm_type, bias, stochastic, epsilon, knn_type)
def forward(self, x, batch):
dense = self.body(batch)
return torch.cat((x, dense), 1), batch
|
ipymd/formats/python.py
|
nathanfdunn/ipymd
| 521 |
94174
|
<reponame>nathanfdunn/ipymd
# -*- coding: utf-8 -*-
"""Python reader and writer."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import re
import ast
from collections import OrderedDict
from ..lib.base_lexer import BaseGrammar, BaseLexer
from ..lib.markdown import MarkdownFilter
from ..lib.python import _is_python
from ..ext.six import StringIO
from ..utils.utils import _ensure_string, _preprocess
#------------------------------------------------------------------------------
# Python reader and writer
#------------------------------------------------------------------------------
class PythonSplitGrammar(BaseGrammar):
"""Grammar used to split Python code into chunks while not cutting
long Python strings."""
_triple_quotes = "'''"
_triple_doublequotes = '"""'
_triple = _triple_quotes + '|' + _triple_doublequotes
# '''text''' or """text""".
text_var = re.compile(r"^({0})((?!{0}).|\n)*?\1".format(_triple))
# Two new lines followed by non-space
newline = re.compile(r'^[\n]{2,}(?=[^ ])')
linebreak = re.compile(r'^\n+')
other = re.compile(r'^(?!{0}).'.format(_triple))
class PythonSplitLexer(BaseLexer):
"""Lexer for splitting Python code into chunks."""
grammar_class = PythonSplitGrammar
default_rules = ['text_var', 'newline', 'linebreak', 'other']
def __init__(self):
super(PythonSplitLexer, self).__init__()
self._chunks = ['']
@property
def current(self):
if not self._chunks:
return None
else:
return self._chunks[-1]
@property
def chunks(self):
return [chunk for chunk in self._chunks if chunk]
@current.setter
def current(self, value):
self._chunks[-1] = value
def new_chunk(self):
self._chunks.append('')
def append(self, text):
self.current += text
def parse_newline(self, m):
self.new_chunk()
def parse_linebreak(self, m):
self.append(m.group(0))
def parse_text_var(self, m):
self.append(m.group(0))
def parse_other(self, m):
self.append(m.group(0))
def _split_python(python):
"""Split Python source into chunks.
Chunks are separated by at least two return lines. The break must not
be followed by a space. Also, long Python strings spanning several lines
are not splitted.
"""
python = _preprocess(python)
if not python:
return []
lexer = PythonSplitLexer()
lexer.read(python)
return lexer.chunks
def _is_chunk_markdown(source):
"""Return whether a chunk contains Markdown contents."""
lines = source.splitlines()
if all(line.startswith('# ') for line in lines):
# The chunk is a Markdown *unless* it is commented Python code.
source = '\n'.join(line[2:] for line in lines
if not line[2:].startswith('#')) # skip headers
if not source:
return True
# Try to parse the chunk: if it fails, it is Markdown, otherwise,
# it is Python.
return not _is_python(source)
return False
def _remove_hash(source):
"""Remove the leading '#' of every line in the source."""
return '\n'.join(line[2:].rstrip() for line in source.splitlines())
def _add_hash(source):
"""Add a leading hash '#' at the beginning of every line in the source."""
source = '\n'.join('# ' + line.rstrip()
for line in source.splitlines())
return source
class PythonReader(object):
"""Python reader."""
def read(self, python):
chunks = _split_python(python)
for chunk in chunks:
if _is_chunk_markdown(chunk):
yield self._markdown_cell(_remove_hash(chunk))
else:
yield self._code_cell(chunk)
def _code_cell(self, source):
return {'cell_type': 'code',
'input': source,
'output': None}
def _markdown_cell(self, source):
return {'cell_type': 'markdown',
'source': source}
class PythonWriter(object):
"""Python writer."""
def __init__(self, keep_markdown=None):
self._output = StringIO()
self._markdown_filter = MarkdownFilter(keep_markdown)
def _new_paragraph(self):
self._output.write('\n\n')
def append_comments(self, source):
source = source.rstrip()
# Filter Markdown contents.
source = self._markdown_filter(source)
# Skip empty cells.
if not source:
return
comments = _add_hash(source)
self._output.write(comments)
self._new_paragraph()
def append_code(self, input):
self._output.write(input)
self._new_paragraph()
def write(self, cell):
if cell['cell_type'] == 'markdown':
self.append_comments(cell['source'])
elif cell['cell_type'] == 'code':
self.append_code(cell['input'])
@property
def contents(self):
return self._output.getvalue().rstrip() + '\n' # end of file \n
def close(self):
self._output.close()
def __del__(self):
self.close()
PYTHON_FORMAT = dict(
reader=PythonReader,
writer=PythonWriter,
file_extension='.py',
file_type='text',
)
|
plyer/platforms/linux/battery.py
|
EdwardCoventry/plyer
| 1,184 |
94185
|
'''
Module of Linux API for plyer.battery.
'''
from math import floor
from os import environ
from os.path import exists, join
from subprocess import Popen, PIPE
from plyer.facades import Battery
from plyer.utils import whereis_exe
class LinuxBattery(Battery):
'''
Implementation of Linux battery API via accessing the sysclass power_supply
path from the kernel.
'''
def _get_state(self):
status = {"isCharging": None, "percentage": None}
kernel_bat_path = join('/sys', 'class', 'power_supply', 'BAT0')
uevent = join(kernel_bat_path, 'uevent')
with open(uevent) as fle:
lines = [
line.decode('utf-8').strip()
for line in fle.readlines()
]
output = {
line.split('=')[0]: line.split('=')[1]
for line in lines
}
is_charging = output['POWER_SUPPLY_STATUS'] == 'Charging'
total = float(output['POWER_SUPPLY_CHARGE_FULL'])
now = float(output['POWER_SUPPLY_CHARGE_NOW'])
capacity = floor(now / total * 100)
status['percentage'] = capacity
status['isCharging'] = is_charging
return status
class UPowerBattery(Battery):
'''
Implementation of UPower battery API.
'''
def _get_state(self):
# if no LANG specified, return empty string
old_lang = environ.get('LANG', '')
environ['LANG'] = 'C'
status = {"isCharging": None, "percentage": None}
# We are supporting only one battery now
# this will fail if there is no object with such path,
# however it's safer than 'upower -d' which provides
# multiple unrelated 'state' and 'percentage' keywords
dev = "/org/freedesktop/UPower/devices/battery_BAT0"
upower_process = Popen(
["upower", "--show-info", dev],
stdout=PIPE
)
output = upower_process.communicate()[0].decode()
environ['LANG'] = old_lang
if not output:
return status
state = percentage = None
for line in output.splitlines():
if 'state' in line:
state = line.rpartition(':')[-1].strip()
if 'percentage' in line:
percentage = line.rpartition(':')[-1].strip()[:-1]
# switching decimal comma to dot
# (different LC_NUMERIC locale)
percentage = float(
percentage.replace(',', '.')
)
if state:
status['isCharging'] = state == "charging"
status['percentage'] = percentage
return status
def instance():
'''
Instance for facade proxy.
'''
import sys
if whereis_exe('upower'):
return UPowerBattery()
sys.stderr.write("upower not found.")
if exists(join('/sys', 'class', 'power_supply', 'BAT0')):
return LinuxBattery()
return Battery()
|
tapas/utils/constants.py
|
Martin36/tapas
| 816 |
94196
|
<reponame>Martin36/tapas
# coding=utf-8
# Copyright 2019 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Constants used by Dopa tables project."""
import enum
EMPTY_TEXT = 'EMPTY'
NUMBER_TYPE = 'number'
DATE_TYPE = 'date'
class Relation(enum.Enum):
HEADER_TO_CELL = 1 # Connects header to cell.
CELL_TO_HEADER = 2 # Connects cell to header.
QUERY_TO_HEADER = 3 # Connects query to headers.
QUERY_TO_CELL = 4 # Connects query to cells.
ROW_TO_CELL = 5 # Connects row to cells.
CELL_TO_ROW = 6 # Connects cells to row.
EQ = 7 # Annotation value is same as cell value
LT = 8 # Annotation value is less than cell value
GT = 9 # Annotation value is greater than cell value
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.